1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic iSCSI HBA Driver 4 * Copyright (c) 2003-2013 QLogic Corporation 5 */ 6 #include <linux/moduleparam.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/iscsi_boot_sysfs.h> 10 #include <linux/inet.h> 11 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsicam.h> 14 15 #include "ql4_def.h" 16 #include "ql4_version.h" 17 #include "ql4_glbl.h" 18 #include "ql4_dbg.h" 19 #include "ql4_inline.h" 20 #include "ql4_83xx.h" 21 22 /* 23 * Driver version 24 */ 25 static char qla4xxx_version_str[40]; 26 27 /* 28 * SRB allocation cache 29 */ 30 static struct kmem_cache *srb_cachep; 31 32 /* 33 * Module parameter information and variables 34 */ 35 static int ql4xdisablesysfsboot = 1; 36 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 37 MODULE_PARM_DESC(ql4xdisablesysfsboot, 38 " Set to disable exporting boot targets to sysfs.\n" 39 "\t\t 0 - Export boot targets\n" 40 "\t\t 1 - Do not export boot targets (Default)"); 41 42 int ql4xdontresethba; 43 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 44 MODULE_PARM_DESC(ql4xdontresethba, 45 " Don't reset the HBA for driver recovery.\n" 46 "\t\t 0 - It will reset HBA (Default)\n" 47 "\t\t 1 - It will NOT reset HBA"); 48 49 int ql4xextended_error_logging; 50 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 51 MODULE_PARM_DESC(ql4xextended_error_logging, 52 " Option to enable extended error logging.\n" 53 "\t\t 0 - no logging (Default)\n" 54 "\t\t 2 - debug logging"); 55 56 int ql4xenablemsix = 1; 57 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 58 MODULE_PARM_DESC(ql4xenablemsix, 59 " Set to enable MSI or MSI-X interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 62 "\t\t 2 = enable MSI interrupt mechanism."); 63 64 #define QL4_DEF_QDEPTH 32 65 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 66 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 67 MODULE_PARM_DESC(ql4xmaxqdepth, 68 " Maximum queue depth to report for target devices.\n" 69 "\t\t Default: 32."); 70 71 static int ql4xqfulltracking = 1; 72 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 73 MODULE_PARM_DESC(ql4xqfulltracking, 74 " Enable or disable dynamic tracking and adjustment of\n" 75 "\t\t scsi device queue depth.\n" 76 "\t\t 0 - Disable.\n" 77 "\t\t 1 - Enable. (Default)"); 78 79 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 80 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 81 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 82 " Target Session Recovery Timeout.\n" 83 "\t\t Default: 120 sec."); 84 85 int ql4xmdcapmask = 0; 86 module_param(ql4xmdcapmask, int, S_IRUGO); 87 MODULE_PARM_DESC(ql4xmdcapmask, 88 " Set the Minidump driver capture mask level.\n" 89 "\t\t Default is 0 (firmware default capture mask)\n" 90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 91 92 int ql4xenablemd = 1; 93 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 94 MODULE_PARM_DESC(ql4xenablemd, 95 " Set to enable minidump.\n" 96 "\t\t 0 - disable minidump\n" 97 "\t\t 1 - enable minidump (Default)"); 98 99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 100 /* 101 * SCSI host template entry points 102 */ 103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 104 105 /* 106 * iSCSI template entry points 107 */ 108 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 109 enum iscsi_param param, char *buf); 110 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 111 enum iscsi_param param, char *buf); 112 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 113 enum iscsi_host_param param, char *buf); 114 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 115 uint32_t len); 116 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 117 enum iscsi_param_type param_type, 118 int param, char *buf); 119 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 120 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 121 struct sockaddr *dst_addr, 122 int non_blocking); 123 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 124 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 125 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 126 enum iscsi_param param, char *buf); 127 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 128 static struct iscsi_cls_conn * 129 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 130 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 131 struct iscsi_cls_conn *cls_conn, 132 uint64_t transport_fd, int is_leading); 133 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 134 static struct iscsi_cls_session * 135 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 136 uint16_t qdepth, uint32_t initial_cmdsn); 137 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 138 static void qla4xxx_task_work(struct work_struct *wdata); 139 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 140 static int qla4xxx_task_xmit(struct iscsi_task *); 141 static void qla4xxx_task_cleanup(struct iscsi_task *); 142 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 143 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 144 struct iscsi_stats *stats); 145 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 146 uint32_t iface_type, uint32_t payload_size, 147 uint32_t pid, struct sockaddr *dst_addr); 148 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 149 uint32_t *num_entries, char *buf); 150 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 151 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 152 int len); 153 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 154 155 /* 156 * SCSI host template entry points 157 */ 158 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 159 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_slave_alloc(struct scsi_device *device); 164 static umode_t qla4_attr_is_visible(int param_type, int param); 165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 166 167 /* 168 * iSCSI Flash DDB sysfs entry points 169 */ 170 static int 171 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 172 struct iscsi_bus_flash_conn *fnode_conn, 173 void *data, int len); 174 static int 175 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 176 int param, char *buf); 177 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 178 int len); 179 static int 180 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 181 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 182 struct iscsi_bus_flash_conn *fnode_conn); 183 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 184 struct iscsi_bus_flash_conn *fnode_conn); 185 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 186 187 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 188 QLA82XX_LEGACY_INTR_CONFIG; 189 190 static const uint32_t qla4_82xx_reg_tbl[] = { 191 QLA82XX_PEG_HALT_STATUS1, 192 QLA82XX_PEG_HALT_STATUS2, 193 QLA82XX_PEG_ALIVE_COUNTER, 194 QLA82XX_CRB_DRV_ACTIVE, 195 QLA82XX_CRB_DEV_STATE, 196 QLA82XX_CRB_DRV_STATE, 197 QLA82XX_CRB_DRV_SCRATCH, 198 QLA82XX_CRB_DEV_PART_INFO, 199 QLA82XX_CRB_DRV_IDC_VERSION, 200 QLA82XX_FW_VERSION_MAJOR, 201 QLA82XX_FW_VERSION_MINOR, 202 QLA82XX_FW_VERSION_SUB, 203 CRB_CMDPEG_STATE, 204 CRB_TEMP_STATE, 205 }; 206 207 static const uint32_t qla4_83xx_reg_tbl[] = { 208 QLA83XX_PEG_HALT_STATUS1, 209 QLA83XX_PEG_HALT_STATUS2, 210 QLA83XX_PEG_ALIVE_COUNTER, 211 QLA83XX_CRB_DRV_ACTIVE, 212 QLA83XX_CRB_DEV_STATE, 213 QLA83XX_CRB_DRV_STATE, 214 QLA83XX_CRB_DRV_SCRATCH, 215 QLA83XX_CRB_DEV_PART_INFO1, 216 QLA83XX_CRB_IDC_VER_MAJOR, 217 QLA83XX_FW_VER_MAJOR, 218 QLA83XX_FW_VER_MINOR, 219 QLA83XX_FW_VER_SUB, 220 QLA83XX_CMDPEG_STATE, 221 QLA83XX_ASIC_TEMP, 222 }; 223 224 static struct scsi_host_template qla4xxx_driver_template = { 225 .module = THIS_MODULE, 226 .name = DRIVER_NAME, 227 .proc_name = DRIVER_NAME, 228 .queuecommand = qla4xxx_queuecommand, 229 .cmd_size = sizeof(struct qla4xxx_cmd_priv), 230 231 .eh_abort_handler = qla4xxx_eh_abort, 232 .eh_device_reset_handler = qla4xxx_eh_device_reset, 233 .eh_target_reset_handler = qla4xxx_eh_target_reset, 234 .eh_host_reset_handler = qla4xxx_eh_host_reset, 235 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 236 237 .slave_alloc = qla4xxx_slave_alloc, 238 .change_queue_depth = scsi_change_queue_depth, 239 240 .this_id = -1, 241 .cmd_per_lun = 3, 242 .sg_tablesize = SG_ALL, 243 244 .max_sectors = 0xFFFF, 245 .shost_groups = qla4xxx_host_groups, 246 .host_reset = qla4xxx_host_reset, 247 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 248 }; 249 250 static struct iscsi_transport qla4xxx_iscsi_transport = { 251 .owner = THIS_MODULE, 252 .name = DRIVER_NAME, 253 .caps = CAP_TEXT_NEGO | 254 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 255 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 256 CAP_MULTI_R2T, 257 .attr_is_visible = qla4_attr_is_visible, 258 .create_session = qla4xxx_session_create, 259 .destroy_session = qla4xxx_session_destroy, 260 .start_conn = qla4xxx_conn_start, 261 .create_conn = qla4xxx_conn_create, 262 .bind_conn = qla4xxx_conn_bind, 263 .unbind_conn = iscsi_conn_unbind, 264 .stop_conn = iscsi_conn_stop, 265 .destroy_conn = qla4xxx_conn_destroy, 266 .set_param = iscsi_set_param, 267 .get_conn_param = qla4xxx_conn_get_param, 268 .get_session_param = qla4xxx_session_get_param, 269 .get_ep_param = qla4xxx_get_ep_param, 270 .ep_connect = qla4xxx_ep_connect, 271 .ep_poll = qla4xxx_ep_poll, 272 .ep_disconnect = qla4xxx_ep_disconnect, 273 .get_stats = qla4xxx_conn_get_stats, 274 .send_pdu = iscsi_conn_send_pdu, 275 .xmit_task = qla4xxx_task_xmit, 276 .cleanup_task = qla4xxx_task_cleanup, 277 .alloc_pdu = qla4xxx_alloc_pdu, 278 279 .get_host_param = qla4xxx_host_get_param, 280 .set_iface_param = qla4xxx_iface_set_param, 281 .get_iface_param = qla4xxx_get_iface_param, 282 .bsg_request = qla4xxx_bsg_request, 283 .send_ping = qla4xxx_send_ping, 284 .get_chap = qla4xxx_get_chap_list, 285 .delete_chap = qla4xxx_delete_chap, 286 .set_chap = qla4xxx_set_chap_entry, 287 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 288 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 289 .new_flashnode = qla4xxx_sysfs_ddb_add, 290 .del_flashnode = qla4xxx_sysfs_ddb_delete, 291 .login_flashnode = qla4xxx_sysfs_ddb_login, 292 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 293 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 294 .get_host_stats = qla4xxx_get_host_stats, 295 }; 296 297 static struct scsi_transport_template *qla4xxx_scsi_transport; 298 299 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) 300 { 301 u32 reg_val = 0; 302 int rval = QLA_SUCCESS; 303 304 if (is_qla8022(ha)) 305 reg_val = readl(&ha->qla4_82xx_reg->host_status); 306 else if (is_qla8032(ha) || is_qla8042(ha)) 307 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); 308 else 309 reg_val = readw(&ha->reg->ctrl_status); 310 311 if (reg_val == QL4_ISP_REG_DISCONNECT) 312 rval = QLA_ERROR; 313 314 return rval; 315 } 316 317 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 318 uint32_t iface_type, uint32_t payload_size, 319 uint32_t pid, struct sockaddr *dst_addr) 320 { 321 struct scsi_qla_host *ha = to_qla_host(shost); 322 struct sockaddr_in *addr; 323 struct sockaddr_in6 *addr6; 324 uint32_t options = 0; 325 uint8_t ipaddr[IPv6_ADDR_LEN]; 326 int rval; 327 328 memset(ipaddr, 0, IPv6_ADDR_LEN); 329 /* IPv4 to IPv4 */ 330 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 331 (dst_addr->sa_family == AF_INET)) { 332 addr = (struct sockaddr_in *)dst_addr; 333 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 334 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 335 "dest: %pI4\n", __func__, 336 &ha->ip_config.ip_address, ipaddr)); 337 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 338 ipaddr); 339 if (rval) 340 rval = -EINVAL; 341 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 342 (dst_addr->sa_family == AF_INET6)) { 343 /* IPv6 to IPv6 */ 344 addr6 = (struct sockaddr_in6 *)dst_addr; 345 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 346 347 options |= PING_IPV6_PROTOCOL_ENABLE; 348 349 /* Ping using LinkLocal address */ 350 if ((iface_num == 0) || (iface_num == 1)) { 351 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 352 "src: %pI6 dest: %pI6\n", __func__, 353 &ha->ip_config.ipv6_link_local_addr, 354 ipaddr)); 355 options |= PING_IPV6_LINKLOCAL_ADDR; 356 rval = qla4xxx_ping_iocb(ha, options, payload_size, 357 pid, ipaddr); 358 } else { 359 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 360 "not supported\n", __func__, iface_num); 361 rval = -ENOSYS; 362 goto exit_send_ping; 363 } 364 365 /* 366 * If ping using LinkLocal address fails, try ping using 367 * IPv6 address 368 */ 369 if (rval != QLA_SUCCESS) { 370 options &= ~PING_IPV6_LINKLOCAL_ADDR; 371 if (iface_num == 0) { 372 options |= PING_IPV6_ADDR0; 373 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 374 "Ping src: %pI6 " 375 "dest: %pI6\n", __func__, 376 &ha->ip_config.ipv6_addr0, 377 ipaddr)); 378 } else if (iface_num == 1) { 379 options |= PING_IPV6_ADDR1; 380 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 381 "Ping src: %pI6 " 382 "dest: %pI6\n", __func__, 383 &ha->ip_config.ipv6_addr1, 384 ipaddr)); 385 } 386 rval = qla4xxx_ping_iocb(ha, options, payload_size, 387 pid, ipaddr); 388 if (rval) 389 rval = -EINVAL; 390 } 391 } else 392 rval = -ENOSYS; 393 exit_send_ping: 394 return rval; 395 } 396 397 static umode_t qla4_attr_is_visible(int param_type, int param) 398 { 399 switch (param_type) { 400 case ISCSI_HOST_PARAM: 401 switch (param) { 402 case ISCSI_HOST_PARAM_HWADDRESS: 403 case ISCSI_HOST_PARAM_IPADDRESS: 404 case ISCSI_HOST_PARAM_INITIATOR_NAME: 405 case ISCSI_HOST_PARAM_PORT_STATE: 406 case ISCSI_HOST_PARAM_PORT_SPEED: 407 return S_IRUGO; 408 default: 409 return 0; 410 } 411 case ISCSI_PARAM: 412 switch (param) { 413 case ISCSI_PARAM_PERSISTENT_ADDRESS: 414 case ISCSI_PARAM_PERSISTENT_PORT: 415 case ISCSI_PARAM_CONN_ADDRESS: 416 case ISCSI_PARAM_CONN_PORT: 417 case ISCSI_PARAM_TARGET_NAME: 418 case ISCSI_PARAM_TPGT: 419 case ISCSI_PARAM_TARGET_ALIAS: 420 case ISCSI_PARAM_MAX_BURST: 421 case ISCSI_PARAM_MAX_R2T: 422 case ISCSI_PARAM_FIRST_BURST: 423 case ISCSI_PARAM_MAX_RECV_DLENGTH: 424 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 425 case ISCSI_PARAM_IFACE_NAME: 426 case ISCSI_PARAM_CHAP_OUT_IDX: 427 case ISCSI_PARAM_CHAP_IN_IDX: 428 case ISCSI_PARAM_USERNAME: 429 case ISCSI_PARAM_PASSWORD: 430 case ISCSI_PARAM_USERNAME_IN: 431 case ISCSI_PARAM_PASSWORD_IN: 432 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 433 case ISCSI_PARAM_DISCOVERY_SESS: 434 case ISCSI_PARAM_PORTAL_TYPE: 435 case ISCSI_PARAM_CHAP_AUTH_EN: 436 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 437 case ISCSI_PARAM_BIDI_CHAP_EN: 438 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 439 case ISCSI_PARAM_DEF_TIME2WAIT: 440 case ISCSI_PARAM_DEF_TIME2RETAIN: 441 case ISCSI_PARAM_HDRDGST_EN: 442 case ISCSI_PARAM_DATADGST_EN: 443 case ISCSI_PARAM_INITIAL_R2T_EN: 444 case ISCSI_PARAM_IMM_DATA_EN: 445 case ISCSI_PARAM_PDU_INORDER_EN: 446 case ISCSI_PARAM_DATASEQ_INORDER_EN: 447 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 448 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 449 case ISCSI_PARAM_TCP_WSF_DISABLE: 450 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 451 case ISCSI_PARAM_TCP_TIMER_SCALE: 452 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 453 case ISCSI_PARAM_TCP_XMIT_WSF: 454 case ISCSI_PARAM_TCP_RECV_WSF: 455 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 456 case ISCSI_PARAM_IPV4_TOS: 457 case ISCSI_PARAM_IPV6_TC: 458 case ISCSI_PARAM_IPV6_FLOW_LABEL: 459 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 460 case ISCSI_PARAM_KEEPALIVE_TMO: 461 case ISCSI_PARAM_LOCAL_PORT: 462 case ISCSI_PARAM_ISID: 463 case ISCSI_PARAM_TSID: 464 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 465 case ISCSI_PARAM_ERL: 466 case ISCSI_PARAM_STATSN: 467 case ISCSI_PARAM_EXP_STATSN: 468 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 469 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 470 case ISCSI_PARAM_LOCAL_IPADDR: 471 return S_IRUGO; 472 default: 473 return 0; 474 } 475 case ISCSI_NET_PARAM: 476 switch (param) { 477 case ISCSI_NET_PARAM_IPV4_ADDR: 478 case ISCSI_NET_PARAM_IPV4_SUBNET: 479 case ISCSI_NET_PARAM_IPV4_GW: 480 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 481 case ISCSI_NET_PARAM_IFACE_ENABLE: 482 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 483 case ISCSI_NET_PARAM_IPV6_ADDR: 484 case ISCSI_NET_PARAM_IPV6_ROUTER: 485 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 486 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 487 case ISCSI_NET_PARAM_VLAN_ID: 488 case ISCSI_NET_PARAM_VLAN_PRIORITY: 489 case ISCSI_NET_PARAM_VLAN_ENABLED: 490 case ISCSI_NET_PARAM_MTU: 491 case ISCSI_NET_PARAM_PORT: 492 case ISCSI_NET_PARAM_IPADDR_STATE: 493 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 494 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 495 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 496 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 497 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 498 case ISCSI_NET_PARAM_TCP_WSF: 499 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 500 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 501 case ISCSI_NET_PARAM_CACHE_ID: 502 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 503 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 504 case ISCSI_NET_PARAM_IPV4_TOS_EN: 505 case ISCSI_NET_PARAM_IPV4_TOS: 506 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 507 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 508 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 509 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 510 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 511 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 512 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 513 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 514 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 515 case ISCSI_NET_PARAM_REDIRECT_EN: 516 case ISCSI_NET_PARAM_IPV4_TTL: 517 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 518 case ISCSI_NET_PARAM_IPV6_MLD_EN: 519 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 520 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 521 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 522 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 523 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 524 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 525 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 526 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 527 return S_IRUGO; 528 default: 529 return 0; 530 } 531 case ISCSI_IFACE_PARAM: 532 switch (param) { 533 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 534 case ISCSI_IFACE_PARAM_HDRDGST_EN: 535 case ISCSI_IFACE_PARAM_DATADGST_EN: 536 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 537 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 538 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 539 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 540 case ISCSI_IFACE_PARAM_ERL: 541 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 542 case ISCSI_IFACE_PARAM_FIRST_BURST: 543 case ISCSI_IFACE_PARAM_MAX_R2T: 544 case ISCSI_IFACE_PARAM_MAX_BURST: 545 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 546 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 547 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 548 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 549 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 550 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 551 return S_IRUGO; 552 default: 553 return 0; 554 } 555 case ISCSI_FLASHNODE_PARAM: 556 switch (param) { 557 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 558 case ISCSI_FLASHNODE_PORTAL_TYPE: 559 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 560 case ISCSI_FLASHNODE_DISCOVERY_SESS: 561 case ISCSI_FLASHNODE_ENTRY_EN: 562 case ISCSI_FLASHNODE_HDR_DGST_EN: 563 case ISCSI_FLASHNODE_DATA_DGST_EN: 564 case ISCSI_FLASHNODE_IMM_DATA_EN: 565 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 566 case ISCSI_FLASHNODE_DATASEQ_INORDER: 567 case ISCSI_FLASHNODE_PDU_INORDER: 568 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 569 case ISCSI_FLASHNODE_SNACK_REQ_EN: 570 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 571 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 572 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 573 case ISCSI_FLASHNODE_ERL: 574 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 575 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 576 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 577 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 578 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 579 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 580 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 581 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 582 case ISCSI_FLASHNODE_FIRST_BURST: 583 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 584 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 585 case ISCSI_FLASHNODE_MAX_R2T: 586 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 587 case ISCSI_FLASHNODE_ISID: 588 case ISCSI_FLASHNODE_TSID: 589 case ISCSI_FLASHNODE_PORT: 590 case ISCSI_FLASHNODE_MAX_BURST: 591 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 592 case ISCSI_FLASHNODE_IPADDR: 593 case ISCSI_FLASHNODE_ALIAS: 594 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 595 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 596 case ISCSI_FLASHNODE_LOCAL_PORT: 597 case ISCSI_FLASHNODE_IPV4_TOS: 598 case ISCSI_FLASHNODE_IPV6_TC: 599 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 600 case ISCSI_FLASHNODE_NAME: 601 case ISCSI_FLASHNODE_TPGT: 602 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 603 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 604 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 605 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 606 case ISCSI_FLASHNODE_TCP_RECV_WSF: 607 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 608 case ISCSI_FLASHNODE_USERNAME: 609 case ISCSI_FLASHNODE_PASSWORD: 610 case ISCSI_FLASHNODE_STATSN: 611 case ISCSI_FLASHNODE_EXP_STATSN: 612 case ISCSI_FLASHNODE_IS_BOOT_TGT: 613 return S_IRUGO; 614 default: 615 return 0; 616 } 617 } 618 619 return 0; 620 } 621 622 /** 623 * qla4xxx_create_chap_list - Create CHAP list from FLASH 624 * @ha: pointer to adapter structure 625 * 626 * Read flash and make a list of CHAP entries, during login when a CHAP entry 627 * is received, it will be checked in this list. If entry exist then the CHAP 628 * entry index is set in the DDB. If CHAP entry does not exist in this list 629 * then a new entry is added in FLASH in CHAP table and the index obtained is 630 * used in the DDB. 631 **/ 632 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 633 { 634 int rval = 0; 635 uint8_t *chap_flash_data = NULL; 636 uint32_t offset; 637 dma_addr_t chap_dma; 638 uint32_t chap_size = 0; 639 640 if (is_qla40XX(ha)) 641 chap_size = MAX_CHAP_ENTRIES_40XX * 642 sizeof(struct ql4_chap_table); 643 else /* Single region contains CHAP info for both 644 * ports which is divided into half for each port. 645 */ 646 chap_size = ha->hw.flt_chap_size / 2; 647 648 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 649 &chap_dma, GFP_KERNEL); 650 if (!chap_flash_data) { 651 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 652 return; 653 } 654 655 if (is_qla40XX(ha)) { 656 offset = FLASH_CHAP_OFFSET; 657 } else { 658 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 659 if (ha->port_num == 1) 660 offset += chap_size; 661 } 662 663 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 664 if (rval != QLA_SUCCESS) 665 goto exit_chap_list; 666 667 if (ha->chap_list == NULL) 668 ha->chap_list = vmalloc(chap_size); 669 if (ha->chap_list == NULL) { 670 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 671 goto exit_chap_list; 672 } 673 674 memcpy(ha->chap_list, chap_flash_data, chap_size); 675 676 exit_chap_list: 677 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 678 } 679 680 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 681 int16_t chap_index, 682 struct ql4_chap_table **chap_entry) 683 { 684 int rval = QLA_ERROR; 685 int max_chap_entries; 686 687 if (!ha->chap_list) { 688 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 689 goto exit_get_chap; 690 } 691 692 if (is_qla80XX(ha)) 693 max_chap_entries = (ha->hw.flt_chap_size / 2) / 694 sizeof(struct ql4_chap_table); 695 else 696 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 697 698 if (chap_index > max_chap_entries) { 699 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 700 goto exit_get_chap; 701 } 702 703 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 704 if ((*chap_entry)->cookie != 705 cpu_to_le16(CHAP_VALID_COOKIE)) { 706 *chap_entry = NULL; 707 } else { 708 rval = QLA_SUCCESS; 709 } 710 711 exit_get_chap: 712 return rval; 713 } 714 715 /** 716 * qla4xxx_find_free_chap_index - Find the first free chap index 717 * @ha: pointer to adapter structure 718 * @chap_index: CHAP index to be returned 719 * 720 * Find the first free chap index available in the chap table 721 * 722 * Note: Caller should acquire the chap lock before getting here. 723 **/ 724 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 725 uint16_t *chap_index) 726 { 727 int i, rval; 728 int free_index = -1; 729 int max_chap_entries = 0; 730 struct ql4_chap_table *chap_table; 731 732 if (is_qla80XX(ha)) 733 max_chap_entries = (ha->hw.flt_chap_size / 2) / 734 sizeof(struct ql4_chap_table); 735 else 736 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 737 738 if (!ha->chap_list) { 739 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 740 rval = QLA_ERROR; 741 goto exit_find_chap; 742 } 743 744 for (i = 0; i < max_chap_entries; i++) { 745 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 746 747 if ((chap_table->cookie != 748 cpu_to_le16(CHAP_VALID_COOKIE)) && 749 (i > MAX_RESRV_CHAP_IDX)) { 750 free_index = i; 751 break; 752 } 753 } 754 755 if (free_index != -1) { 756 *chap_index = free_index; 757 rval = QLA_SUCCESS; 758 } else { 759 rval = QLA_ERROR; 760 } 761 762 exit_find_chap: 763 return rval; 764 } 765 766 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 767 uint32_t *num_entries, char *buf) 768 { 769 struct scsi_qla_host *ha = to_qla_host(shost); 770 struct ql4_chap_table *chap_table; 771 struct iscsi_chap_rec *chap_rec; 772 int max_chap_entries = 0; 773 int valid_chap_entries = 0; 774 int ret = 0, i; 775 776 if (is_qla80XX(ha)) 777 max_chap_entries = (ha->hw.flt_chap_size / 2) / 778 sizeof(struct ql4_chap_table); 779 else 780 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 781 782 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 783 __func__, *num_entries, chap_tbl_idx); 784 785 if (!buf) { 786 ret = -ENOMEM; 787 goto exit_get_chap_list; 788 } 789 790 qla4xxx_create_chap_list(ha); 791 792 chap_rec = (struct iscsi_chap_rec *) buf; 793 mutex_lock(&ha->chap_sem); 794 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 795 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 796 if (chap_table->cookie != 797 cpu_to_le16(CHAP_VALID_COOKIE)) 798 continue; 799 800 chap_rec->chap_tbl_idx = i; 801 strlcpy(chap_rec->username, chap_table->name, 802 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 803 strlcpy(chap_rec->password, chap_table->secret, 804 QL4_CHAP_MAX_SECRET_LEN); 805 chap_rec->password_length = chap_table->secret_len; 806 807 if (chap_table->flags & BIT_7) /* local */ 808 chap_rec->chap_type = CHAP_TYPE_OUT; 809 810 if (chap_table->flags & BIT_6) /* peer */ 811 chap_rec->chap_type = CHAP_TYPE_IN; 812 813 chap_rec++; 814 815 valid_chap_entries++; 816 if (valid_chap_entries == *num_entries) 817 break; 818 } 819 mutex_unlock(&ha->chap_sem); 820 821 exit_get_chap_list: 822 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 823 __func__, valid_chap_entries); 824 *num_entries = valid_chap_entries; 825 return ret; 826 } 827 828 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 829 { 830 int ret = 0; 831 uint16_t *chap_tbl_idx = (uint16_t *) data; 832 struct iscsi_cls_session *cls_session; 833 struct iscsi_session *sess; 834 struct ddb_entry *ddb_entry; 835 836 if (!iscsi_is_session_dev(dev)) 837 goto exit_is_chap_active; 838 839 cls_session = iscsi_dev_to_session(dev); 840 sess = cls_session->dd_data; 841 ddb_entry = sess->dd_data; 842 843 if (iscsi_is_session_online(cls_session)) 844 goto exit_is_chap_active; 845 846 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 847 ret = 1; 848 849 exit_is_chap_active: 850 return ret; 851 } 852 853 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 854 uint16_t chap_tbl_idx) 855 { 856 int ret = 0; 857 858 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 859 __qla4xxx_is_chap_active); 860 861 return ret; 862 } 863 864 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 865 { 866 struct scsi_qla_host *ha = to_qla_host(shost); 867 struct ql4_chap_table *chap_table; 868 dma_addr_t chap_dma; 869 int max_chap_entries = 0; 870 uint32_t offset = 0; 871 uint32_t chap_size; 872 int ret = 0; 873 874 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 875 if (chap_table == NULL) 876 return -ENOMEM; 877 878 if (is_qla80XX(ha)) 879 max_chap_entries = (ha->hw.flt_chap_size / 2) / 880 sizeof(struct ql4_chap_table); 881 else 882 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 883 884 if (chap_tbl_idx > max_chap_entries) { 885 ret = -EINVAL; 886 goto exit_delete_chap; 887 } 888 889 /* Check if chap index is in use. 890 * If chap is in use don't delet chap entry */ 891 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 892 if (ret) { 893 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 894 "delete from flash\n", chap_tbl_idx); 895 ret = -EBUSY; 896 goto exit_delete_chap; 897 } 898 899 chap_size = sizeof(struct ql4_chap_table); 900 if (is_qla40XX(ha)) 901 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 902 else { 903 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 904 /* flt_chap_size is CHAP table size for both ports 905 * so divide it by 2 to calculate the offset for second port 906 */ 907 if (ha->port_num == 1) 908 offset += (ha->hw.flt_chap_size / 2); 909 offset += (chap_tbl_idx * chap_size); 910 } 911 912 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 913 if (ret != QLA_SUCCESS) { 914 ret = -EINVAL; 915 goto exit_delete_chap; 916 } 917 918 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 919 __le16_to_cpu(chap_table->cookie))); 920 921 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 922 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 923 goto exit_delete_chap; 924 } 925 926 chap_table->cookie = cpu_to_le16(0xFFFF); 927 928 offset = FLASH_CHAP_OFFSET | 929 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 930 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 931 FLASH_OPT_RMW_COMMIT); 932 if (ret == QLA_SUCCESS && ha->chap_list) { 933 mutex_lock(&ha->chap_sem); 934 /* Update ha chap_list cache */ 935 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 936 chap_table, sizeof(struct ql4_chap_table)); 937 mutex_unlock(&ha->chap_sem); 938 } 939 if (ret != QLA_SUCCESS) 940 ret = -EINVAL; 941 942 exit_delete_chap: 943 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 944 return ret; 945 } 946 947 /** 948 * qla4xxx_set_chap_entry - Make chap entry with given information 949 * @shost: pointer to host 950 * @data: chap info - credentials, index and type to make chap entry 951 * @len: length of data 952 * 953 * Add or update chap entry with the given information 954 **/ 955 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 956 { 957 struct scsi_qla_host *ha = to_qla_host(shost); 958 struct iscsi_chap_rec chap_rec; 959 struct ql4_chap_table *chap_entry = NULL; 960 struct iscsi_param_info *param_info; 961 struct nlattr *attr; 962 int max_chap_entries = 0; 963 int type; 964 int rem = len; 965 int rc = 0; 966 int size; 967 968 memset(&chap_rec, 0, sizeof(chap_rec)); 969 970 nla_for_each_attr(attr, data, len, rem) { 971 param_info = nla_data(attr); 972 973 switch (param_info->param) { 974 case ISCSI_CHAP_PARAM_INDEX: 975 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 976 break; 977 case ISCSI_CHAP_PARAM_CHAP_TYPE: 978 chap_rec.chap_type = param_info->value[0]; 979 break; 980 case ISCSI_CHAP_PARAM_USERNAME: 981 size = min_t(size_t, sizeof(chap_rec.username), 982 param_info->len); 983 memcpy(chap_rec.username, param_info->value, size); 984 break; 985 case ISCSI_CHAP_PARAM_PASSWORD: 986 size = min_t(size_t, sizeof(chap_rec.password), 987 param_info->len); 988 memcpy(chap_rec.password, param_info->value, size); 989 break; 990 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 991 chap_rec.password_length = param_info->value[0]; 992 break; 993 default: 994 ql4_printk(KERN_ERR, ha, 995 "%s: No such sysfs attribute\n", __func__); 996 rc = -ENOSYS; 997 goto exit_set_chap; 998 } 999 } 1000 1001 if (chap_rec.chap_type == CHAP_TYPE_IN) 1002 type = BIDI_CHAP; 1003 else 1004 type = LOCAL_CHAP; 1005 1006 if (is_qla80XX(ha)) 1007 max_chap_entries = (ha->hw.flt_chap_size / 2) / 1008 sizeof(struct ql4_chap_table); 1009 else 1010 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 1011 1012 mutex_lock(&ha->chap_sem); 1013 if (chap_rec.chap_tbl_idx < max_chap_entries) { 1014 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 1015 &chap_entry); 1016 if (!rc) { 1017 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 1018 ql4_printk(KERN_INFO, ha, 1019 "Type mismatch for CHAP entry %d\n", 1020 chap_rec.chap_tbl_idx); 1021 rc = -EINVAL; 1022 goto exit_unlock_chap; 1023 } 1024 1025 /* If chap index is in use then don't modify it */ 1026 rc = qla4xxx_is_chap_active(shost, 1027 chap_rec.chap_tbl_idx); 1028 if (rc) { 1029 ql4_printk(KERN_INFO, ha, 1030 "CHAP entry %d is in use\n", 1031 chap_rec.chap_tbl_idx); 1032 rc = -EBUSY; 1033 goto exit_unlock_chap; 1034 } 1035 } 1036 } else { 1037 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1038 if (rc) { 1039 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1040 rc = -EBUSY; 1041 goto exit_unlock_chap; 1042 } 1043 } 1044 1045 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1046 chap_rec.chap_tbl_idx, type); 1047 1048 exit_unlock_chap: 1049 mutex_unlock(&ha->chap_sem); 1050 1051 exit_set_chap: 1052 return rc; 1053 } 1054 1055 1056 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1057 { 1058 struct scsi_qla_host *ha = to_qla_host(shost); 1059 struct iscsi_offload_host_stats *host_stats = NULL; 1060 int host_stats_size; 1061 int ret = 0; 1062 int ddb_idx = 0; 1063 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1064 int stats_size; 1065 dma_addr_t iscsi_stats_dma; 1066 1067 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1068 1069 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1070 1071 if (host_stats_size != len) { 1072 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1073 __func__, len, host_stats_size); 1074 ret = -EINVAL; 1075 goto exit_host_stats; 1076 } 1077 host_stats = (struct iscsi_offload_host_stats *)buf; 1078 1079 if (!buf) { 1080 ret = -ENOMEM; 1081 goto exit_host_stats; 1082 } 1083 1084 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1085 1086 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1087 &iscsi_stats_dma, GFP_KERNEL); 1088 if (!ql_iscsi_stats) { 1089 ql4_printk(KERN_ERR, ha, 1090 "Unable to allocate memory for iscsi stats\n"); 1091 ret = -ENOMEM; 1092 goto exit_host_stats; 1093 } 1094 1095 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1096 iscsi_stats_dma); 1097 if (ret != QLA_SUCCESS) { 1098 ql4_printk(KERN_ERR, ha, 1099 "Unable to retrieve iscsi stats\n"); 1100 ret = -EIO; 1101 goto exit_host_stats; 1102 } 1103 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1104 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1105 host_stats->mactx_multicast_frames = 1106 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1107 host_stats->mactx_broadcast_frames = 1108 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1109 host_stats->mactx_pause_frames = 1110 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1111 host_stats->mactx_control_frames = 1112 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1113 host_stats->mactx_deferral = 1114 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1115 host_stats->mactx_excess_deferral = 1116 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1117 host_stats->mactx_late_collision = 1118 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1119 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1120 host_stats->mactx_single_collision = 1121 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1122 host_stats->mactx_multiple_collision = 1123 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1124 host_stats->mactx_collision = 1125 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1126 host_stats->mactx_frames_dropped = 1127 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1128 host_stats->mactx_jumbo_frames = 1129 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1130 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1131 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1132 host_stats->macrx_unknown_control_frames = 1133 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1134 host_stats->macrx_pause_frames = 1135 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1136 host_stats->macrx_control_frames = 1137 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1138 host_stats->macrx_dribble = 1139 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1140 host_stats->macrx_frame_length_error = 1141 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1142 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1143 host_stats->macrx_carrier_sense_error = 1144 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1145 host_stats->macrx_frame_discarded = 1146 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1147 host_stats->macrx_frames_dropped = 1148 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1149 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1150 host_stats->mac_encoding_error = 1151 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1152 host_stats->macrx_length_error_large = 1153 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1154 host_stats->macrx_length_error_small = 1155 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1156 host_stats->macrx_multicast_frames = 1157 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1158 host_stats->macrx_broadcast_frames = 1159 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1160 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1161 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1162 host_stats->iptx_fragments = 1163 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1164 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1165 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1166 host_stats->iprx_fragments = 1167 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1168 host_stats->ip_datagram_reassembly = 1169 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1170 host_stats->ip_invalid_address_error = 1171 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1172 host_stats->ip_error_packets = 1173 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1174 host_stats->ip_fragrx_overlap = 1175 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1176 host_stats->ip_fragrx_outoforder = 1177 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1178 host_stats->ip_datagram_reassembly_timeout = 1179 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1180 host_stats->ipv6tx_packets = 1181 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1182 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1183 host_stats->ipv6tx_fragments = 1184 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1185 host_stats->ipv6rx_packets = 1186 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1187 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1188 host_stats->ipv6rx_fragments = 1189 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1190 host_stats->ipv6_datagram_reassembly = 1191 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1192 host_stats->ipv6_invalid_address_error = 1193 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1194 host_stats->ipv6_error_packets = 1195 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1196 host_stats->ipv6_fragrx_overlap = 1197 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1198 host_stats->ipv6_fragrx_outoforder = 1199 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1200 host_stats->ipv6_datagram_reassembly_timeout = 1201 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1202 host_stats->tcptx_segments = 1203 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1204 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1205 host_stats->tcprx_segments = 1206 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1207 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1208 host_stats->tcp_duplicate_ack_retx = 1209 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1210 host_stats->tcp_retx_timer_expired = 1211 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1212 host_stats->tcprx_duplicate_ack = 1213 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1214 host_stats->tcprx_pure_ackr = 1215 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1216 host_stats->tcptx_delayed_ack = 1217 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1218 host_stats->tcptx_pure_ack = 1219 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1220 host_stats->tcprx_segment_error = 1221 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1222 host_stats->tcprx_segment_outoforder = 1223 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1224 host_stats->tcprx_window_probe = 1225 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1226 host_stats->tcprx_window_update = 1227 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1228 host_stats->tcptx_window_probe_persist = 1229 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1230 host_stats->ecc_error_correction = 1231 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1232 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1233 host_stats->iscsi_data_bytes_tx = 1234 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1235 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1236 host_stats->iscsi_data_bytes_rx = 1237 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1238 host_stats->iscsi_io_completed = 1239 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1240 host_stats->iscsi_unexpected_io_rx = 1241 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1242 host_stats->iscsi_format_error = 1243 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1244 host_stats->iscsi_hdr_digest_error = 1245 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1246 host_stats->iscsi_data_digest_error = 1247 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1248 host_stats->iscsi_sequence_error = 1249 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1250 exit_host_stats: 1251 if (ql_iscsi_stats) 1252 dma_free_coherent(&ha->pdev->dev, stats_size, 1253 ql_iscsi_stats, iscsi_stats_dma); 1254 1255 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1256 __func__); 1257 return ret; 1258 } 1259 1260 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1261 enum iscsi_param_type param_type, 1262 int param, char *buf) 1263 { 1264 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1265 struct scsi_qla_host *ha = to_qla_host(shost); 1266 int ival; 1267 char *pval = NULL; 1268 int len = -ENOSYS; 1269 1270 if (param_type == ISCSI_NET_PARAM) { 1271 switch (param) { 1272 case ISCSI_NET_PARAM_IPV4_ADDR: 1273 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1274 break; 1275 case ISCSI_NET_PARAM_IPV4_SUBNET: 1276 len = sprintf(buf, "%pI4\n", 1277 &ha->ip_config.subnet_mask); 1278 break; 1279 case ISCSI_NET_PARAM_IPV4_GW: 1280 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1281 break; 1282 case ISCSI_NET_PARAM_IFACE_ENABLE: 1283 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1284 OP_STATE(ha->ip_config.ipv4_options, 1285 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1286 } else { 1287 OP_STATE(ha->ip_config.ipv6_options, 1288 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1289 } 1290 1291 len = sprintf(buf, "%s\n", pval); 1292 break; 1293 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1294 len = sprintf(buf, "%s\n", 1295 (ha->ip_config.tcp_options & 1296 TCPOPT_DHCP_ENABLE) ? 1297 "dhcp" : "static"); 1298 break; 1299 case ISCSI_NET_PARAM_IPV6_ADDR: 1300 if (iface->iface_num == 0) 1301 len = sprintf(buf, "%pI6\n", 1302 &ha->ip_config.ipv6_addr0); 1303 if (iface->iface_num == 1) 1304 len = sprintf(buf, "%pI6\n", 1305 &ha->ip_config.ipv6_addr1); 1306 break; 1307 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1308 len = sprintf(buf, "%pI6\n", 1309 &ha->ip_config.ipv6_link_local_addr); 1310 break; 1311 case ISCSI_NET_PARAM_IPV6_ROUTER: 1312 len = sprintf(buf, "%pI6\n", 1313 &ha->ip_config.ipv6_default_router_addr); 1314 break; 1315 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1316 pval = (ha->ip_config.ipv6_addl_options & 1317 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1318 "nd" : "static"; 1319 1320 len = sprintf(buf, "%s\n", pval); 1321 break; 1322 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1323 pval = (ha->ip_config.ipv6_addl_options & 1324 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1325 "auto" : "static"; 1326 1327 len = sprintf(buf, "%s\n", pval); 1328 break; 1329 case ISCSI_NET_PARAM_VLAN_ID: 1330 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1331 ival = ha->ip_config.ipv4_vlan_tag & 1332 ISCSI_MAX_VLAN_ID; 1333 else 1334 ival = ha->ip_config.ipv6_vlan_tag & 1335 ISCSI_MAX_VLAN_ID; 1336 1337 len = sprintf(buf, "%d\n", ival); 1338 break; 1339 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1340 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1341 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1342 ISCSI_MAX_VLAN_PRIORITY; 1343 else 1344 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1345 ISCSI_MAX_VLAN_PRIORITY; 1346 1347 len = sprintf(buf, "%d\n", ival); 1348 break; 1349 case ISCSI_NET_PARAM_VLAN_ENABLED: 1350 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1351 OP_STATE(ha->ip_config.ipv4_options, 1352 IPOPT_VLAN_TAGGING_ENABLE, pval); 1353 } else { 1354 OP_STATE(ha->ip_config.ipv6_options, 1355 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1356 } 1357 len = sprintf(buf, "%s\n", pval); 1358 break; 1359 case ISCSI_NET_PARAM_MTU: 1360 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1361 break; 1362 case ISCSI_NET_PARAM_PORT: 1363 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1364 len = sprintf(buf, "%d\n", 1365 ha->ip_config.ipv4_port); 1366 else 1367 len = sprintf(buf, "%d\n", 1368 ha->ip_config.ipv6_port); 1369 break; 1370 case ISCSI_NET_PARAM_IPADDR_STATE: 1371 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1372 pval = iscsi_get_ipaddress_state_name( 1373 ha->ip_config.ipv4_addr_state); 1374 } else { 1375 if (iface->iface_num == 0) 1376 pval = iscsi_get_ipaddress_state_name( 1377 ha->ip_config.ipv6_addr0_state); 1378 else if (iface->iface_num == 1) 1379 pval = iscsi_get_ipaddress_state_name( 1380 ha->ip_config.ipv6_addr1_state); 1381 } 1382 1383 len = sprintf(buf, "%s\n", pval); 1384 break; 1385 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1386 pval = iscsi_get_ipaddress_state_name( 1387 ha->ip_config.ipv6_link_local_state); 1388 len = sprintf(buf, "%s\n", pval); 1389 break; 1390 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1391 pval = iscsi_get_router_state_name( 1392 ha->ip_config.ipv6_default_router_state); 1393 len = sprintf(buf, "%s\n", pval); 1394 break; 1395 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1396 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1397 OP_STATE(~ha->ip_config.tcp_options, 1398 TCPOPT_DELAYED_ACK_DISABLE, pval); 1399 } else { 1400 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1401 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1402 } 1403 len = sprintf(buf, "%s\n", pval); 1404 break; 1405 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1406 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1407 OP_STATE(~ha->ip_config.tcp_options, 1408 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1409 } else { 1410 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1411 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1412 } 1413 len = sprintf(buf, "%s\n", pval); 1414 break; 1415 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1416 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1417 OP_STATE(~ha->ip_config.tcp_options, 1418 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1419 } else { 1420 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1421 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1422 pval); 1423 } 1424 len = sprintf(buf, "%s\n", pval); 1425 break; 1426 case ISCSI_NET_PARAM_TCP_WSF: 1427 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1428 len = sprintf(buf, "%d\n", 1429 ha->ip_config.tcp_wsf); 1430 else 1431 len = sprintf(buf, "%d\n", 1432 ha->ip_config.ipv6_tcp_wsf); 1433 break; 1434 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1435 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1436 ival = (ha->ip_config.tcp_options & 1437 TCPOPT_TIMER_SCALE) >> 1; 1438 else 1439 ival = (ha->ip_config.ipv6_tcp_options & 1440 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1441 1442 len = sprintf(buf, "%d\n", ival); 1443 break; 1444 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1445 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1446 OP_STATE(ha->ip_config.tcp_options, 1447 TCPOPT_TIMESTAMP_ENABLE, pval); 1448 } else { 1449 OP_STATE(ha->ip_config.ipv6_tcp_options, 1450 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1451 } 1452 len = sprintf(buf, "%s\n", pval); 1453 break; 1454 case ISCSI_NET_PARAM_CACHE_ID: 1455 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1456 len = sprintf(buf, "%d\n", 1457 ha->ip_config.ipv4_cache_id); 1458 else 1459 len = sprintf(buf, "%d\n", 1460 ha->ip_config.ipv6_cache_id); 1461 break; 1462 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1463 OP_STATE(ha->ip_config.tcp_options, 1464 TCPOPT_DNS_SERVER_IP_EN, pval); 1465 1466 len = sprintf(buf, "%s\n", pval); 1467 break; 1468 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1469 OP_STATE(ha->ip_config.tcp_options, 1470 TCPOPT_SLP_DA_INFO_EN, pval); 1471 1472 len = sprintf(buf, "%s\n", pval); 1473 break; 1474 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1475 OP_STATE(ha->ip_config.ipv4_options, 1476 IPOPT_IPV4_TOS_EN, pval); 1477 1478 len = sprintf(buf, "%s\n", pval); 1479 break; 1480 case ISCSI_NET_PARAM_IPV4_TOS: 1481 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1482 break; 1483 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1484 OP_STATE(ha->ip_config.ipv4_options, 1485 IPOPT_GRAT_ARP_EN, pval); 1486 1487 len = sprintf(buf, "%s\n", pval); 1488 break; 1489 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1490 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1491 pval); 1492 1493 len = sprintf(buf, "%s\n", pval); 1494 break; 1495 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1496 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1497 (char *)ha->ip_config.ipv4_alt_cid : ""; 1498 1499 len = sprintf(buf, "%s\n", pval); 1500 break; 1501 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1502 OP_STATE(ha->ip_config.ipv4_options, 1503 IPOPT_REQ_VID_EN, pval); 1504 1505 len = sprintf(buf, "%s\n", pval); 1506 break; 1507 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1508 OP_STATE(ha->ip_config.ipv4_options, 1509 IPOPT_USE_VID_EN, pval); 1510 1511 len = sprintf(buf, "%s\n", pval); 1512 break; 1513 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1514 pval = (ha->ip_config.ipv4_vid_len) ? 1515 (char *)ha->ip_config.ipv4_vid : ""; 1516 1517 len = sprintf(buf, "%s\n", pval); 1518 break; 1519 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1520 OP_STATE(ha->ip_config.ipv4_options, 1521 IPOPT_LEARN_IQN_EN, pval); 1522 1523 len = sprintf(buf, "%s\n", pval); 1524 break; 1525 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1526 OP_STATE(~ha->ip_config.ipv4_options, 1527 IPOPT_FRAGMENTATION_DISABLE, pval); 1528 1529 len = sprintf(buf, "%s\n", pval); 1530 break; 1531 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1532 OP_STATE(ha->ip_config.ipv4_options, 1533 IPOPT_IN_FORWARD_EN, pval); 1534 1535 len = sprintf(buf, "%s\n", pval); 1536 break; 1537 case ISCSI_NET_PARAM_REDIRECT_EN: 1538 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1539 OP_STATE(ha->ip_config.ipv4_options, 1540 IPOPT_ARP_REDIRECT_EN, pval); 1541 } else { 1542 OP_STATE(ha->ip_config.ipv6_options, 1543 IPV6_OPT_REDIRECT_EN, pval); 1544 } 1545 len = sprintf(buf, "%s\n", pval); 1546 break; 1547 case ISCSI_NET_PARAM_IPV4_TTL: 1548 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1549 break; 1550 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1551 OP_STATE(ha->ip_config.ipv6_options, 1552 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1553 1554 len = sprintf(buf, "%s\n", pval); 1555 break; 1556 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1557 OP_STATE(ha->ip_config.ipv6_addl_options, 1558 IPV6_ADDOPT_MLD_EN, pval); 1559 1560 len = sprintf(buf, "%s\n", pval); 1561 break; 1562 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1563 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1564 break; 1565 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1566 len = sprintf(buf, "%d\n", 1567 ha->ip_config.ipv6_traffic_class); 1568 break; 1569 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1570 len = sprintf(buf, "%d\n", 1571 ha->ip_config.ipv6_hop_limit); 1572 break; 1573 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1574 len = sprintf(buf, "%d\n", 1575 ha->ip_config.ipv6_nd_reach_time); 1576 break; 1577 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1578 len = sprintf(buf, "%d\n", 1579 ha->ip_config.ipv6_nd_rexmit_timer); 1580 break; 1581 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1582 len = sprintf(buf, "%d\n", 1583 ha->ip_config.ipv6_nd_stale_timeout); 1584 break; 1585 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1586 len = sprintf(buf, "%d\n", 1587 ha->ip_config.ipv6_dup_addr_detect_count); 1588 break; 1589 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1590 len = sprintf(buf, "%d\n", 1591 ha->ip_config.ipv6_gw_advrt_mtu); 1592 break; 1593 default: 1594 len = -ENOSYS; 1595 } 1596 } else if (param_type == ISCSI_IFACE_PARAM) { 1597 switch (param) { 1598 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1599 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1600 break; 1601 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1602 OP_STATE(ha->ip_config.iscsi_options, 1603 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1604 1605 len = sprintf(buf, "%s\n", pval); 1606 break; 1607 case ISCSI_IFACE_PARAM_DATADGST_EN: 1608 OP_STATE(ha->ip_config.iscsi_options, 1609 ISCSIOPTS_DATA_DIGEST_EN, pval); 1610 1611 len = sprintf(buf, "%s\n", pval); 1612 break; 1613 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1614 OP_STATE(ha->ip_config.iscsi_options, 1615 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1616 1617 len = sprintf(buf, "%s\n", pval); 1618 break; 1619 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1620 OP_STATE(ha->ip_config.iscsi_options, 1621 ISCSIOPTS_INITIAL_R2T_EN, pval); 1622 1623 len = sprintf(buf, "%s\n", pval); 1624 break; 1625 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1626 OP_STATE(ha->ip_config.iscsi_options, 1627 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1628 1629 len = sprintf(buf, "%s\n", pval); 1630 break; 1631 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1632 OP_STATE(ha->ip_config.iscsi_options, 1633 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1634 1635 len = sprintf(buf, "%s\n", pval); 1636 break; 1637 case ISCSI_IFACE_PARAM_ERL: 1638 len = sprintf(buf, "%d\n", 1639 (ha->ip_config.iscsi_options & 1640 ISCSIOPTS_ERL)); 1641 break; 1642 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1643 len = sprintf(buf, "%u\n", 1644 ha->ip_config.iscsi_max_pdu_size * 1645 BYTE_UNITS); 1646 break; 1647 case ISCSI_IFACE_PARAM_FIRST_BURST: 1648 len = sprintf(buf, "%u\n", 1649 ha->ip_config.iscsi_first_burst_len * 1650 BYTE_UNITS); 1651 break; 1652 case ISCSI_IFACE_PARAM_MAX_R2T: 1653 len = sprintf(buf, "%d\n", 1654 ha->ip_config.iscsi_max_outstnd_r2t); 1655 break; 1656 case ISCSI_IFACE_PARAM_MAX_BURST: 1657 len = sprintf(buf, "%u\n", 1658 ha->ip_config.iscsi_max_burst_len * 1659 BYTE_UNITS); 1660 break; 1661 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1662 OP_STATE(ha->ip_config.iscsi_options, 1663 ISCSIOPTS_CHAP_AUTH_EN, pval); 1664 1665 len = sprintf(buf, "%s\n", pval); 1666 break; 1667 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1668 OP_STATE(ha->ip_config.iscsi_options, 1669 ISCSIOPTS_BIDI_CHAP_EN, pval); 1670 1671 len = sprintf(buf, "%s\n", pval); 1672 break; 1673 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1674 OP_STATE(ha->ip_config.iscsi_options, 1675 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1676 1677 len = sprintf(buf, "%s\n", pval); 1678 break; 1679 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1680 OP_STATE(ha->ip_config.iscsi_options, 1681 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1682 1683 len = sprintf(buf, "%s\n", pval); 1684 break; 1685 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1686 OP_STATE(ha->ip_config.iscsi_options, 1687 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1688 1689 len = sprintf(buf, "%s\n", pval); 1690 break; 1691 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1692 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1693 break; 1694 default: 1695 len = -ENOSYS; 1696 } 1697 } 1698 1699 return len; 1700 } 1701 1702 static struct iscsi_endpoint * 1703 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1704 int non_blocking) 1705 { 1706 int ret; 1707 struct iscsi_endpoint *ep; 1708 struct qla_endpoint *qla_ep; 1709 struct scsi_qla_host *ha; 1710 struct sockaddr_in *addr; 1711 struct sockaddr_in6 *addr6; 1712 1713 if (!shost) { 1714 ret = -ENXIO; 1715 pr_err("%s: shost is NULL\n", __func__); 1716 return ERR_PTR(ret); 1717 } 1718 1719 ha = iscsi_host_priv(shost); 1720 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1721 if (!ep) { 1722 ret = -ENOMEM; 1723 return ERR_PTR(ret); 1724 } 1725 1726 qla_ep = ep->dd_data; 1727 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1728 if (dst_addr->sa_family == AF_INET) { 1729 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1730 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1731 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1732 (char *)&addr->sin_addr)); 1733 } else if (dst_addr->sa_family == AF_INET6) { 1734 memcpy(&qla_ep->dst_addr, dst_addr, 1735 sizeof(struct sockaddr_in6)); 1736 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1737 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1738 (char *)&addr6->sin6_addr)); 1739 } else { 1740 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1741 __func__); 1742 } 1743 1744 qla_ep->host = shost; 1745 1746 return ep; 1747 } 1748 1749 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1750 { 1751 struct qla_endpoint *qla_ep; 1752 struct scsi_qla_host *ha; 1753 int ret = 0; 1754 1755 qla_ep = ep->dd_data; 1756 ha = to_qla_host(qla_ep->host); 1757 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1758 1759 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1760 ret = 1; 1761 1762 return ret; 1763 } 1764 1765 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1766 { 1767 struct qla_endpoint *qla_ep; 1768 struct scsi_qla_host *ha; 1769 1770 qla_ep = ep->dd_data; 1771 ha = to_qla_host(qla_ep->host); 1772 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1773 ha->host_no)); 1774 iscsi_destroy_endpoint(ep); 1775 } 1776 1777 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1778 enum iscsi_param param, 1779 char *buf) 1780 { 1781 struct qla_endpoint *qla_ep = ep->dd_data; 1782 struct sockaddr *dst_addr; 1783 struct scsi_qla_host *ha; 1784 1785 if (!qla_ep) 1786 return -ENOTCONN; 1787 1788 ha = to_qla_host(qla_ep->host); 1789 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1790 ha->host_no)); 1791 1792 switch (param) { 1793 case ISCSI_PARAM_CONN_PORT: 1794 case ISCSI_PARAM_CONN_ADDRESS: 1795 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1796 if (!dst_addr) 1797 return -ENOTCONN; 1798 1799 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1800 &qla_ep->dst_addr, param, buf); 1801 default: 1802 return -ENOSYS; 1803 } 1804 } 1805 1806 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1807 struct iscsi_stats *stats) 1808 { 1809 struct iscsi_session *sess; 1810 struct iscsi_cls_session *cls_sess; 1811 struct ddb_entry *ddb_entry; 1812 struct scsi_qla_host *ha; 1813 struct ql_iscsi_stats *ql_iscsi_stats; 1814 int stats_size; 1815 int ret; 1816 dma_addr_t iscsi_stats_dma; 1817 1818 cls_sess = iscsi_conn_to_session(cls_conn); 1819 sess = cls_sess->dd_data; 1820 ddb_entry = sess->dd_data; 1821 ha = ddb_entry->ha; 1822 1823 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1824 ha->host_no)); 1825 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1826 /* Allocate memory */ 1827 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1828 &iscsi_stats_dma, GFP_KERNEL); 1829 if (!ql_iscsi_stats) { 1830 ql4_printk(KERN_ERR, ha, 1831 "Unable to allocate memory for iscsi stats\n"); 1832 goto exit_get_stats; 1833 } 1834 1835 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1836 iscsi_stats_dma); 1837 if (ret != QLA_SUCCESS) { 1838 ql4_printk(KERN_ERR, ha, 1839 "Unable to retrieve iscsi stats\n"); 1840 goto free_stats; 1841 } 1842 1843 /* octets */ 1844 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1845 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1846 /* xmit pdus */ 1847 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1848 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1849 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1850 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1851 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1852 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1853 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1854 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1855 /* recv pdus */ 1856 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1857 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1858 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1859 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1860 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1861 stats->logoutrsp_pdus = 1862 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1863 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1864 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1865 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1866 1867 free_stats: 1868 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1869 iscsi_stats_dma); 1870 exit_get_stats: 1871 return; 1872 } 1873 1874 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1875 { 1876 struct iscsi_cls_session *session; 1877 unsigned long flags; 1878 enum blk_eh_timer_return ret = BLK_EH_DONE; 1879 1880 session = starget_to_session(scsi_target(sc->device)); 1881 1882 spin_lock_irqsave(&session->lock, flags); 1883 if (session->state == ISCSI_SESSION_FAILED) 1884 ret = BLK_EH_RESET_TIMER; 1885 spin_unlock_irqrestore(&session->lock, flags); 1886 1887 return ret; 1888 } 1889 1890 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1891 { 1892 struct scsi_qla_host *ha = to_qla_host(shost); 1893 struct iscsi_cls_host *ihost = shost->shost_data; 1894 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1895 1896 qla4xxx_get_firmware_state(ha); 1897 1898 switch (ha->addl_fw_state & 0x0F00) { 1899 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1900 speed = ISCSI_PORT_SPEED_10MBPS; 1901 break; 1902 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1903 speed = ISCSI_PORT_SPEED_100MBPS; 1904 break; 1905 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1906 speed = ISCSI_PORT_SPEED_1GBPS; 1907 break; 1908 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1909 speed = ISCSI_PORT_SPEED_10GBPS; 1910 break; 1911 } 1912 ihost->port_speed = speed; 1913 } 1914 1915 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1916 { 1917 struct scsi_qla_host *ha = to_qla_host(shost); 1918 struct iscsi_cls_host *ihost = shost->shost_data; 1919 uint32_t state = ISCSI_PORT_STATE_DOWN; 1920 1921 if (test_bit(AF_LINK_UP, &ha->flags)) 1922 state = ISCSI_PORT_STATE_UP; 1923 1924 ihost->port_state = state; 1925 } 1926 1927 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1928 enum iscsi_host_param param, char *buf) 1929 { 1930 struct scsi_qla_host *ha = to_qla_host(shost); 1931 int len; 1932 1933 switch (param) { 1934 case ISCSI_HOST_PARAM_HWADDRESS: 1935 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1936 break; 1937 case ISCSI_HOST_PARAM_IPADDRESS: 1938 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1939 break; 1940 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1941 len = sprintf(buf, "%s\n", ha->name_string); 1942 break; 1943 case ISCSI_HOST_PARAM_PORT_STATE: 1944 qla4xxx_set_port_state(shost); 1945 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1946 break; 1947 case ISCSI_HOST_PARAM_PORT_SPEED: 1948 qla4xxx_set_port_speed(shost); 1949 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1950 break; 1951 default: 1952 return -ENOSYS; 1953 } 1954 1955 return len; 1956 } 1957 1958 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1959 { 1960 if (ha->iface_ipv4) 1961 return; 1962 1963 /* IPv4 */ 1964 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1965 &qla4xxx_iscsi_transport, 1966 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1967 if (!ha->iface_ipv4) 1968 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1969 "iface0.\n"); 1970 } 1971 1972 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1973 { 1974 if (!ha->iface_ipv6_0) 1975 /* IPv6 iface-0 */ 1976 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1977 &qla4xxx_iscsi_transport, 1978 ISCSI_IFACE_TYPE_IPV6, 0, 1979 0); 1980 if (!ha->iface_ipv6_0) 1981 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1982 "iface0.\n"); 1983 1984 if (!ha->iface_ipv6_1) 1985 /* IPv6 iface-1 */ 1986 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1987 &qla4xxx_iscsi_transport, 1988 ISCSI_IFACE_TYPE_IPV6, 1, 1989 0); 1990 if (!ha->iface_ipv6_1) 1991 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1992 "iface1.\n"); 1993 } 1994 1995 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 1996 { 1997 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 1998 qla4xxx_create_ipv4_iface(ha); 1999 2000 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 2001 qla4xxx_create_ipv6_iface(ha); 2002 } 2003 2004 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 2005 { 2006 if (ha->iface_ipv4) { 2007 iscsi_destroy_iface(ha->iface_ipv4); 2008 ha->iface_ipv4 = NULL; 2009 } 2010 } 2011 2012 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 2013 { 2014 if (ha->iface_ipv6_0) { 2015 iscsi_destroy_iface(ha->iface_ipv6_0); 2016 ha->iface_ipv6_0 = NULL; 2017 } 2018 if (ha->iface_ipv6_1) { 2019 iscsi_destroy_iface(ha->iface_ipv6_1); 2020 ha->iface_ipv6_1 = NULL; 2021 } 2022 } 2023 2024 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 2025 { 2026 qla4xxx_destroy_ipv4_iface(ha); 2027 qla4xxx_destroy_ipv6_iface(ha); 2028 } 2029 2030 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 2031 struct iscsi_iface_param_info *iface_param, 2032 struct addr_ctrl_blk *init_fw_cb) 2033 { 2034 /* 2035 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 2036 * iface_num 1 is valid only for IPv6 Addr. 2037 */ 2038 switch (iface_param->param) { 2039 case ISCSI_NET_PARAM_IPV6_ADDR: 2040 if (iface_param->iface_num & 0x1) 2041 /* IPv6 Addr 1 */ 2042 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2043 sizeof(init_fw_cb->ipv6_addr1)); 2044 else 2045 /* IPv6 Addr 0 */ 2046 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2047 sizeof(init_fw_cb->ipv6_addr0)); 2048 break; 2049 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2050 if (iface_param->iface_num & 0x1) 2051 break; 2052 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2053 sizeof(init_fw_cb->ipv6_if_id)); 2054 break; 2055 case ISCSI_NET_PARAM_IPV6_ROUTER: 2056 if (iface_param->iface_num & 0x1) 2057 break; 2058 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2059 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2060 break; 2061 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2062 /* Autocfg applies to even interface */ 2063 if (iface_param->iface_num & 0x1) 2064 break; 2065 2066 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2067 init_fw_cb->ipv6_addtl_opts &= 2068 cpu_to_le16( 2069 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2070 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2071 init_fw_cb->ipv6_addtl_opts |= 2072 cpu_to_le16( 2073 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2074 else 2075 ql4_printk(KERN_ERR, ha, 2076 "Invalid autocfg setting for IPv6 addr\n"); 2077 break; 2078 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2079 /* Autocfg applies to even interface */ 2080 if (iface_param->iface_num & 0x1) 2081 break; 2082 2083 if (iface_param->value[0] == 2084 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2085 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2086 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2087 else if (iface_param->value[0] == 2088 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2089 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2090 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2091 else 2092 ql4_printk(KERN_ERR, ha, 2093 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2094 break; 2095 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2096 /* Autocfg applies to even interface */ 2097 if (iface_param->iface_num & 0x1) 2098 break; 2099 2100 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2101 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2102 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2103 break; 2104 case ISCSI_NET_PARAM_IFACE_ENABLE: 2105 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2106 init_fw_cb->ipv6_opts |= 2107 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2108 qla4xxx_create_ipv6_iface(ha); 2109 } else { 2110 init_fw_cb->ipv6_opts &= 2111 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2112 0xFFFF); 2113 qla4xxx_destroy_ipv6_iface(ha); 2114 } 2115 break; 2116 case ISCSI_NET_PARAM_VLAN_TAG: 2117 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2118 break; 2119 init_fw_cb->ipv6_vlan_tag = 2120 cpu_to_be16(*(uint16_t *)iface_param->value); 2121 break; 2122 case ISCSI_NET_PARAM_VLAN_ENABLED: 2123 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2124 init_fw_cb->ipv6_opts |= 2125 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2126 else 2127 init_fw_cb->ipv6_opts &= 2128 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2129 break; 2130 case ISCSI_NET_PARAM_MTU: 2131 init_fw_cb->eth_mtu_size = 2132 cpu_to_le16(*(uint16_t *)iface_param->value); 2133 break; 2134 case ISCSI_NET_PARAM_PORT: 2135 /* Autocfg applies to even interface */ 2136 if (iface_param->iface_num & 0x1) 2137 break; 2138 2139 init_fw_cb->ipv6_port = 2140 cpu_to_le16(*(uint16_t *)iface_param->value); 2141 break; 2142 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2143 if (iface_param->iface_num & 0x1) 2144 break; 2145 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2146 init_fw_cb->ipv6_tcp_opts |= 2147 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2148 else 2149 init_fw_cb->ipv6_tcp_opts &= 2150 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2151 0xFFFF); 2152 break; 2153 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2154 if (iface_param->iface_num & 0x1) 2155 break; 2156 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2157 init_fw_cb->ipv6_tcp_opts |= 2158 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2159 else 2160 init_fw_cb->ipv6_tcp_opts &= 2161 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2162 break; 2163 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2164 if (iface_param->iface_num & 0x1) 2165 break; 2166 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2167 init_fw_cb->ipv6_tcp_opts |= 2168 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2169 else 2170 init_fw_cb->ipv6_tcp_opts &= 2171 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2172 break; 2173 case ISCSI_NET_PARAM_TCP_WSF: 2174 if (iface_param->iface_num & 0x1) 2175 break; 2176 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2177 break; 2178 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2179 if (iface_param->iface_num & 0x1) 2180 break; 2181 init_fw_cb->ipv6_tcp_opts &= 2182 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2183 init_fw_cb->ipv6_tcp_opts |= 2184 cpu_to_le16((iface_param->value[0] << 1) & 2185 IPV6_TCPOPT_TIMER_SCALE); 2186 break; 2187 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2188 if (iface_param->iface_num & 0x1) 2189 break; 2190 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2191 init_fw_cb->ipv6_tcp_opts |= 2192 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2193 else 2194 init_fw_cb->ipv6_tcp_opts &= 2195 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2196 break; 2197 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2198 if (iface_param->iface_num & 0x1) 2199 break; 2200 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2201 init_fw_cb->ipv6_opts |= 2202 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2203 else 2204 init_fw_cb->ipv6_opts &= 2205 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2206 break; 2207 case ISCSI_NET_PARAM_REDIRECT_EN: 2208 if (iface_param->iface_num & 0x1) 2209 break; 2210 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2211 init_fw_cb->ipv6_opts |= 2212 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2213 else 2214 init_fw_cb->ipv6_opts &= 2215 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2216 break; 2217 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2218 if (iface_param->iface_num & 0x1) 2219 break; 2220 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2221 init_fw_cb->ipv6_addtl_opts |= 2222 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2223 else 2224 init_fw_cb->ipv6_addtl_opts &= 2225 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2226 break; 2227 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2228 if (iface_param->iface_num & 0x1) 2229 break; 2230 init_fw_cb->ipv6_flow_lbl = 2231 cpu_to_le16(*(uint16_t *)iface_param->value); 2232 break; 2233 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2234 if (iface_param->iface_num & 0x1) 2235 break; 2236 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2237 break; 2238 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2239 if (iface_param->iface_num & 0x1) 2240 break; 2241 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2242 break; 2243 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2244 if (iface_param->iface_num & 0x1) 2245 break; 2246 init_fw_cb->ipv6_nd_reach_time = 2247 cpu_to_le32(*(uint32_t *)iface_param->value); 2248 break; 2249 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2250 if (iface_param->iface_num & 0x1) 2251 break; 2252 init_fw_cb->ipv6_nd_rexmit_timer = 2253 cpu_to_le32(*(uint32_t *)iface_param->value); 2254 break; 2255 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2256 if (iface_param->iface_num & 0x1) 2257 break; 2258 init_fw_cb->ipv6_nd_stale_timeout = 2259 cpu_to_le32(*(uint32_t *)iface_param->value); 2260 break; 2261 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2262 if (iface_param->iface_num & 0x1) 2263 break; 2264 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2265 break; 2266 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2267 if (iface_param->iface_num & 0x1) 2268 break; 2269 init_fw_cb->ipv6_gw_advrt_mtu = 2270 cpu_to_le32(*(uint32_t *)iface_param->value); 2271 break; 2272 default: 2273 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2274 iface_param->param); 2275 break; 2276 } 2277 } 2278 2279 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2280 struct iscsi_iface_param_info *iface_param, 2281 struct addr_ctrl_blk *init_fw_cb) 2282 { 2283 switch (iface_param->param) { 2284 case ISCSI_NET_PARAM_IPV4_ADDR: 2285 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2286 sizeof(init_fw_cb->ipv4_addr)); 2287 break; 2288 case ISCSI_NET_PARAM_IPV4_SUBNET: 2289 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2290 sizeof(init_fw_cb->ipv4_subnet)); 2291 break; 2292 case ISCSI_NET_PARAM_IPV4_GW: 2293 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2294 sizeof(init_fw_cb->ipv4_gw_addr)); 2295 break; 2296 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2297 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2298 init_fw_cb->ipv4_tcp_opts |= 2299 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2300 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2301 init_fw_cb->ipv4_tcp_opts &= 2302 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2303 else 2304 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2305 break; 2306 case ISCSI_NET_PARAM_IFACE_ENABLE: 2307 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2308 init_fw_cb->ipv4_ip_opts |= 2309 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2310 qla4xxx_create_ipv4_iface(ha); 2311 } else { 2312 init_fw_cb->ipv4_ip_opts &= 2313 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2314 0xFFFF); 2315 qla4xxx_destroy_ipv4_iface(ha); 2316 } 2317 break; 2318 case ISCSI_NET_PARAM_VLAN_TAG: 2319 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2320 break; 2321 init_fw_cb->ipv4_vlan_tag = 2322 cpu_to_be16(*(uint16_t *)iface_param->value); 2323 break; 2324 case ISCSI_NET_PARAM_VLAN_ENABLED: 2325 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2326 init_fw_cb->ipv4_ip_opts |= 2327 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2328 else 2329 init_fw_cb->ipv4_ip_opts &= 2330 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2331 break; 2332 case ISCSI_NET_PARAM_MTU: 2333 init_fw_cb->eth_mtu_size = 2334 cpu_to_le16(*(uint16_t *)iface_param->value); 2335 break; 2336 case ISCSI_NET_PARAM_PORT: 2337 init_fw_cb->ipv4_port = 2338 cpu_to_le16(*(uint16_t *)iface_param->value); 2339 break; 2340 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2341 if (iface_param->iface_num & 0x1) 2342 break; 2343 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2344 init_fw_cb->ipv4_tcp_opts |= 2345 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2346 else 2347 init_fw_cb->ipv4_tcp_opts &= 2348 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2349 0xFFFF); 2350 break; 2351 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2352 if (iface_param->iface_num & 0x1) 2353 break; 2354 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2355 init_fw_cb->ipv4_tcp_opts |= 2356 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2357 else 2358 init_fw_cb->ipv4_tcp_opts &= 2359 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2360 break; 2361 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2362 if (iface_param->iface_num & 0x1) 2363 break; 2364 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2365 init_fw_cb->ipv4_tcp_opts |= 2366 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2367 else 2368 init_fw_cb->ipv4_tcp_opts &= 2369 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2370 break; 2371 case ISCSI_NET_PARAM_TCP_WSF: 2372 if (iface_param->iface_num & 0x1) 2373 break; 2374 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2375 break; 2376 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2377 if (iface_param->iface_num & 0x1) 2378 break; 2379 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2380 init_fw_cb->ipv4_tcp_opts |= 2381 cpu_to_le16((iface_param->value[0] << 1) & 2382 TCPOPT_TIMER_SCALE); 2383 break; 2384 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2385 if (iface_param->iface_num & 0x1) 2386 break; 2387 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2388 init_fw_cb->ipv4_tcp_opts |= 2389 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2390 else 2391 init_fw_cb->ipv4_tcp_opts &= 2392 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2393 break; 2394 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2395 if (iface_param->iface_num & 0x1) 2396 break; 2397 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2398 init_fw_cb->ipv4_tcp_opts |= 2399 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2400 else 2401 init_fw_cb->ipv4_tcp_opts &= 2402 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2403 break; 2404 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2405 if (iface_param->iface_num & 0x1) 2406 break; 2407 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2408 init_fw_cb->ipv4_tcp_opts |= 2409 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2410 else 2411 init_fw_cb->ipv4_tcp_opts &= 2412 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2413 break; 2414 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2415 if (iface_param->iface_num & 0x1) 2416 break; 2417 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2418 init_fw_cb->ipv4_ip_opts |= 2419 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2420 else 2421 init_fw_cb->ipv4_ip_opts &= 2422 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2423 break; 2424 case ISCSI_NET_PARAM_IPV4_TOS: 2425 if (iface_param->iface_num & 0x1) 2426 break; 2427 init_fw_cb->ipv4_tos = iface_param->value[0]; 2428 break; 2429 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2430 if (iface_param->iface_num & 0x1) 2431 break; 2432 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2433 init_fw_cb->ipv4_ip_opts |= 2434 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2435 else 2436 init_fw_cb->ipv4_ip_opts &= 2437 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2438 break; 2439 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2440 if (iface_param->iface_num & 0x1) 2441 break; 2442 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2443 init_fw_cb->ipv4_ip_opts |= 2444 cpu_to_le16(IPOPT_ALT_CID_EN); 2445 else 2446 init_fw_cb->ipv4_ip_opts &= 2447 cpu_to_le16(~IPOPT_ALT_CID_EN); 2448 break; 2449 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2450 if (iface_param->iface_num & 0x1) 2451 break; 2452 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2453 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2454 init_fw_cb->ipv4_dhcp_alt_cid_len = 2455 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2456 break; 2457 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2458 if (iface_param->iface_num & 0x1) 2459 break; 2460 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2461 init_fw_cb->ipv4_ip_opts |= 2462 cpu_to_le16(IPOPT_REQ_VID_EN); 2463 else 2464 init_fw_cb->ipv4_ip_opts &= 2465 cpu_to_le16(~IPOPT_REQ_VID_EN); 2466 break; 2467 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2468 if (iface_param->iface_num & 0x1) 2469 break; 2470 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2471 init_fw_cb->ipv4_ip_opts |= 2472 cpu_to_le16(IPOPT_USE_VID_EN); 2473 else 2474 init_fw_cb->ipv4_ip_opts &= 2475 cpu_to_le16(~IPOPT_USE_VID_EN); 2476 break; 2477 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2478 if (iface_param->iface_num & 0x1) 2479 break; 2480 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2481 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2482 init_fw_cb->ipv4_dhcp_vid_len = 2483 strlen(init_fw_cb->ipv4_dhcp_vid); 2484 break; 2485 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2486 if (iface_param->iface_num & 0x1) 2487 break; 2488 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2489 init_fw_cb->ipv4_ip_opts |= 2490 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2491 else 2492 init_fw_cb->ipv4_ip_opts &= 2493 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2494 break; 2495 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2496 if (iface_param->iface_num & 0x1) 2497 break; 2498 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2499 init_fw_cb->ipv4_ip_opts |= 2500 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2501 else 2502 init_fw_cb->ipv4_ip_opts &= 2503 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2504 break; 2505 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2506 if (iface_param->iface_num & 0x1) 2507 break; 2508 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2509 init_fw_cb->ipv4_ip_opts |= 2510 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2511 else 2512 init_fw_cb->ipv4_ip_opts &= 2513 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2514 break; 2515 case ISCSI_NET_PARAM_REDIRECT_EN: 2516 if (iface_param->iface_num & 0x1) 2517 break; 2518 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2519 init_fw_cb->ipv4_ip_opts |= 2520 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2521 else 2522 init_fw_cb->ipv4_ip_opts &= 2523 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2524 break; 2525 case ISCSI_NET_PARAM_IPV4_TTL: 2526 if (iface_param->iface_num & 0x1) 2527 break; 2528 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2529 break; 2530 default: 2531 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2532 iface_param->param); 2533 break; 2534 } 2535 } 2536 2537 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2538 struct iscsi_iface_param_info *iface_param, 2539 struct addr_ctrl_blk *init_fw_cb) 2540 { 2541 switch (iface_param->param) { 2542 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2543 if (iface_param->iface_num & 0x1) 2544 break; 2545 init_fw_cb->def_timeout = 2546 cpu_to_le16(*(uint16_t *)iface_param->value); 2547 break; 2548 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2549 if (iface_param->iface_num & 0x1) 2550 break; 2551 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2552 init_fw_cb->iscsi_opts |= 2553 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2554 else 2555 init_fw_cb->iscsi_opts &= 2556 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2557 break; 2558 case ISCSI_IFACE_PARAM_DATADGST_EN: 2559 if (iface_param->iface_num & 0x1) 2560 break; 2561 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2562 init_fw_cb->iscsi_opts |= 2563 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2564 else 2565 init_fw_cb->iscsi_opts &= 2566 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2567 break; 2568 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2569 if (iface_param->iface_num & 0x1) 2570 break; 2571 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2572 init_fw_cb->iscsi_opts |= 2573 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2574 else 2575 init_fw_cb->iscsi_opts &= 2576 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2577 break; 2578 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2579 if (iface_param->iface_num & 0x1) 2580 break; 2581 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2582 init_fw_cb->iscsi_opts |= 2583 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2584 else 2585 init_fw_cb->iscsi_opts &= 2586 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2587 break; 2588 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2589 if (iface_param->iface_num & 0x1) 2590 break; 2591 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2592 init_fw_cb->iscsi_opts |= 2593 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2594 else 2595 init_fw_cb->iscsi_opts &= 2596 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2597 break; 2598 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2599 if (iface_param->iface_num & 0x1) 2600 break; 2601 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2602 init_fw_cb->iscsi_opts |= 2603 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2604 else 2605 init_fw_cb->iscsi_opts &= 2606 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2607 break; 2608 case ISCSI_IFACE_PARAM_ERL: 2609 if (iface_param->iface_num & 0x1) 2610 break; 2611 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2612 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2613 ISCSIOPTS_ERL); 2614 break; 2615 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2616 if (iface_param->iface_num & 0x1) 2617 break; 2618 init_fw_cb->iscsi_max_pdu_size = 2619 cpu_to_le32(*(uint32_t *)iface_param->value) / 2620 BYTE_UNITS; 2621 break; 2622 case ISCSI_IFACE_PARAM_FIRST_BURST: 2623 if (iface_param->iface_num & 0x1) 2624 break; 2625 init_fw_cb->iscsi_fburst_len = 2626 cpu_to_le32(*(uint32_t *)iface_param->value) / 2627 BYTE_UNITS; 2628 break; 2629 case ISCSI_IFACE_PARAM_MAX_R2T: 2630 if (iface_param->iface_num & 0x1) 2631 break; 2632 init_fw_cb->iscsi_max_outstnd_r2t = 2633 cpu_to_le16(*(uint16_t *)iface_param->value); 2634 break; 2635 case ISCSI_IFACE_PARAM_MAX_BURST: 2636 if (iface_param->iface_num & 0x1) 2637 break; 2638 init_fw_cb->iscsi_max_burst_len = 2639 cpu_to_le32(*(uint32_t *)iface_param->value) / 2640 BYTE_UNITS; 2641 break; 2642 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2643 if (iface_param->iface_num & 0x1) 2644 break; 2645 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2646 init_fw_cb->iscsi_opts |= 2647 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2648 else 2649 init_fw_cb->iscsi_opts &= 2650 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2651 break; 2652 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2653 if (iface_param->iface_num & 0x1) 2654 break; 2655 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2656 init_fw_cb->iscsi_opts |= 2657 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2658 else 2659 init_fw_cb->iscsi_opts &= 2660 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2661 break; 2662 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2663 if (iface_param->iface_num & 0x1) 2664 break; 2665 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2666 init_fw_cb->iscsi_opts |= 2667 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2668 else 2669 init_fw_cb->iscsi_opts &= 2670 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2671 break; 2672 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2673 if (iface_param->iface_num & 0x1) 2674 break; 2675 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2676 init_fw_cb->iscsi_opts |= 2677 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2678 else 2679 init_fw_cb->iscsi_opts &= 2680 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2681 break; 2682 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2683 if (iface_param->iface_num & 0x1) 2684 break; 2685 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2686 init_fw_cb->iscsi_opts |= 2687 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2688 else 2689 init_fw_cb->iscsi_opts &= 2690 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2691 break; 2692 default: 2693 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2694 iface_param->param); 2695 break; 2696 } 2697 } 2698 2699 static void 2700 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2701 { 2702 struct addr_ctrl_blk_def *acb; 2703 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2704 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2705 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2706 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2707 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2708 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2709 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2710 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2711 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2712 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2713 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2714 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2715 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2716 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2717 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2718 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2719 } 2720 2721 static int 2722 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2723 { 2724 struct scsi_qla_host *ha = to_qla_host(shost); 2725 int rval = 0; 2726 struct iscsi_iface_param_info *iface_param = NULL; 2727 struct addr_ctrl_blk *init_fw_cb = NULL; 2728 dma_addr_t init_fw_cb_dma; 2729 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2730 uint32_t mbox_sts[MBOX_REG_COUNT]; 2731 uint32_t rem = len; 2732 struct nlattr *attr; 2733 2734 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2735 sizeof(struct addr_ctrl_blk), 2736 &init_fw_cb_dma, GFP_KERNEL); 2737 if (!init_fw_cb) { 2738 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2739 __func__); 2740 return -ENOMEM; 2741 } 2742 2743 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2744 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2745 2746 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2747 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2748 rval = -EIO; 2749 goto exit_init_fw_cb; 2750 } 2751 2752 nla_for_each_attr(attr, data, len, rem) { 2753 iface_param = nla_data(attr); 2754 2755 if (iface_param->param_type == ISCSI_NET_PARAM) { 2756 switch (iface_param->iface_type) { 2757 case ISCSI_IFACE_TYPE_IPV4: 2758 switch (iface_param->iface_num) { 2759 case 0: 2760 qla4xxx_set_ipv4(ha, iface_param, 2761 init_fw_cb); 2762 break; 2763 default: 2764 /* Cannot have more than one IPv4 interface */ 2765 ql4_printk(KERN_ERR, ha, 2766 "Invalid IPv4 iface number = %d\n", 2767 iface_param->iface_num); 2768 break; 2769 } 2770 break; 2771 case ISCSI_IFACE_TYPE_IPV6: 2772 switch (iface_param->iface_num) { 2773 case 0: 2774 case 1: 2775 qla4xxx_set_ipv6(ha, iface_param, 2776 init_fw_cb); 2777 break; 2778 default: 2779 /* Cannot have more than two IPv6 interface */ 2780 ql4_printk(KERN_ERR, ha, 2781 "Invalid IPv6 iface number = %d\n", 2782 iface_param->iface_num); 2783 break; 2784 } 2785 break; 2786 default: 2787 ql4_printk(KERN_ERR, ha, 2788 "Invalid iface type\n"); 2789 break; 2790 } 2791 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2792 qla4xxx_set_iscsi_param(ha, iface_param, 2793 init_fw_cb); 2794 } else { 2795 continue; 2796 } 2797 } 2798 2799 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2800 2801 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2802 sizeof(struct addr_ctrl_blk), 2803 FLASH_OPT_RMW_COMMIT); 2804 if (rval != QLA_SUCCESS) { 2805 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2806 __func__); 2807 rval = -EIO; 2808 goto exit_init_fw_cb; 2809 } 2810 2811 rval = qla4xxx_disable_acb(ha); 2812 if (rval != QLA_SUCCESS) { 2813 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2814 __func__); 2815 rval = -EIO; 2816 goto exit_init_fw_cb; 2817 } 2818 2819 wait_for_completion_timeout(&ha->disable_acb_comp, 2820 DISABLE_ACB_TOV * HZ); 2821 2822 qla4xxx_initcb_to_acb(init_fw_cb); 2823 2824 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2825 if (rval != QLA_SUCCESS) { 2826 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2827 __func__); 2828 rval = -EIO; 2829 goto exit_init_fw_cb; 2830 } 2831 2832 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2833 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2834 init_fw_cb_dma); 2835 2836 exit_init_fw_cb: 2837 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2838 init_fw_cb, init_fw_cb_dma); 2839 2840 return rval; 2841 } 2842 2843 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2844 enum iscsi_param param, char *buf) 2845 { 2846 struct iscsi_session *sess = cls_sess->dd_data; 2847 struct ddb_entry *ddb_entry = sess->dd_data; 2848 struct scsi_qla_host *ha = ddb_entry->ha; 2849 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2850 struct ql4_chap_table chap_tbl; 2851 int rval, len; 2852 uint16_t idx; 2853 2854 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2855 switch (param) { 2856 case ISCSI_PARAM_CHAP_IN_IDX: 2857 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2858 sess->password_in, BIDI_CHAP, 2859 &idx); 2860 if (rval) 2861 len = sprintf(buf, "\n"); 2862 else 2863 len = sprintf(buf, "%hu\n", idx); 2864 break; 2865 case ISCSI_PARAM_CHAP_OUT_IDX: 2866 if (ddb_entry->ddb_type == FLASH_DDB) { 2867 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2868 idx = ddb_entry->chap_tbl_idx; 2869 rval = QLA_SUCCESS; 2870 } else { 2871 rval = QLA_ERROR; 2872 } 2873 } else { 2874 rval = qla4xxx_get_chap_index(ha, sess->username, 2875 sess->password, 2876 LOCAL_CHAP, &idx); 2877 } 2878 if (rval) 2879 len = sprintf(buf, "\n"); 2880 else 2881 len = sprintf(buf, "%hu\n", idx); 2882 break; 2883 case ISCSI_PARAM_USERNAME: 2884 case ISCSI_PARAM_PASSWORD: 2885 /* First, populate session username and password for FLASH DDB, 2886 * if not already done. This happens when session login fails 2887 * for a FLASH DDB. 2888 */ 2889 if (ddb_entry->ddb_type == FLASH_DDB && 2890 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2891 !sess->username && !sess->password) { 2892 idx = ddb_entry->chap_tbl_idx; 2893 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2894 chap_tbl.secret, 2895 idx); 2896 if (!rval) { 2897 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2898 (char *)chap_tbl.name, 2899 strlen((char *)chap_tbl.name)); 2900 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2901 (char *)chap_tbl.secret, 2902 chap_tbl.secret_len); 2903 } 2904 } 2905 fallthrough; 2906 default: 2907 return iscsi_session_get_param(cls_sess, param, buf); 2908 } 2909 2910 return len; 2911 } 2912 2913 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2914 enum iscsi_param param, char *buf) 2915 { 2916 struct iscsi_conn *conn; 2917 struct qla_conn *qla_conn; 2918 struct sockaddr *dst_addr; 2919 2920 conn = cls_conn->dd_data; 2921 qla_conn = conn->dd_data; 2922 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2923 2924 switch (param) { 2925 case ISCSI_PARAM_CONN_PORT: 2926 case ISCSI_PARAM_CONN_ADDRESS: 2927 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2928 dst_addr, param, buf); 2929 default: 2930 return iscsi_conn_get_param(cls_conn, param, buf); 2931 } 2932 } 2933 2934 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2935 { 2936 uint32_t mbx_sts = 0; 2937 uint16_t tmp_ddb_index; 2938 int ret; 2939 2940 get_ddb_index: 2941 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2942 2943 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2944 DEBUG2(ql4_printk(KERN_INFO, ha, 2945 "Free DDB index not available\n")); 2946 ret = QLA_ERROR; 2947 goto exit_get_ddb_index; 2948 } 2949 2950 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2951 goto get_ddb_index; 2952 2953 DEBUG2(ql4_printk(KERN_INFO, ha, 2954 "Found a free DDB index at %d\n", tmp_ddb_index)); 2955 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2956 if (ret == QLA_ERROR) { 2957 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2958 ql4_printk(KERN_INFO, ha, 2959 "DDB index = %d not available trying next\n", 2960 tmp_ddb_index); 2961 goto get_ddb_index; 2962 } 2963 DEBUG2(ql4_printk(KERN_INFO, ha, 2964 "Free FW DDB not available\n")); 2965 } 2966 2967 *ddb_index = tmp_ddb_index; 2968 2969 exit_get_ddb_index: 2970 return ret; 2971 } 2972 2973 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2974 struct ddb_entry *ddb_entry, 2975 char *existing_ipaddr, 2976 char *user_ipaddr) 2977 { 2978 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2979 char formatted_ipaddr[DDB_IPADDR_LEN]; 2980 int status = QLA_SUCCESS, ret = 0; 2981 2982 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2983 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2984 '\0', NULL); 2985 if (ret == 0) { 2986 status = QLA_ERROR; 2987 goto out_match; 2988 } 2989 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2990 } else { 2991 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2992 '\0', NULL); 2993 if (ret == 0) { 2994 status = QLA_ERROR; 2995 goto out_match; 2996 } 2997 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 2998 } 2999 3000 if (strcmp(existing_ipaddr, formatted_ipaddr)) 3001 status = QLA_ERROR; 3002 3003 out_match: 3004 return status; 3005 } 3006 3007 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 3008 struct iscsi_cls_conn *cls_conn) 3009 { 3010 int idx = 0, max_ddbs, rval; 3011 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3012 struct iscsi_session *sess, *existing_sess; 3013 struct iscsi_conn *conn, *existing_conn; 3014 struct ddb_entry *ddb_entry; 3015 3016 sess = cls_sess->dd_data; 3017 conn = cls_conn->dd_data; 3018 3019 if (sess->targetname == NULL || 3020 conn->persistent_address == NULL || 3021 conn->persistent_port == 0) 3022 return QLA_ERROR; 3023 3024 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 3025 MAX_DEV_DB_ENTRIES; 3026 3027 for (idx = 0; idx < max_ddbs; idx++) { 3028 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 3029 if (ddb_entry == NULL) 3030 continue; 3031 3032 if (ddb_entry->ddb_type != FLASH_DDB) 3033 continue; 3034 3035 existing_sess = ddb_entry->sess->dd_data; 3036 existing_conn = ddb_entry->conn->dd_data; 3037 3038 if (existing_sess->targetname == NULL || 3039 existing_conn->persistent_address == NULL || 3040 existing_conn->persistent_port == 0) 3041 continue; 3042 3043 DEBUG2(ql4_printk(KERN_INFO, ha, 3044 "IQN = %s User IQN = %s\n", 3045 existing_sess->targetname, 3046 sess->targetname)); 3047 3048 DEBUG2(ql4_printk(KERN_INFO, ha, 3049 "IP = %s User IP = %s\n", 3050 existing_conn->persistent_address, 3051 conn->persistent_address)); 3052 3053 DEBUG2(ql4_printk(KERN_INFO, ha, 3054 "Port = %d User Port = %d\n", 3055 existing_conn->persistent_port, 3056 conn->persistent_port)); 3057 3058 if (strcmp(existing_sess->targetname, sess->targetname)) 3059 continue; 3060 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3061 existing_conn->persistent_address, 3062 conn->persistent_address); 3063 if (rval == QLA_ERROR) 3064 continue; 3065 if (existing_conn->persistent_port != conn->persistent_port) 3066 continue; 3067 break; 3068 } 3069 3070 if (idx == max_ddbs) 3071 return QLA_ERROR; 3072 3073 DEBUG2(ql4_printk(KERN_INFO, ha, 3074 "Match found in fwdb sessions\n")); 3075 return QLA_SUCCESS; 3076 } 3077 3078 static struct iscsi_cls_session * 3079 qla4xxx_session_create(struct iscsi_endpoint *ep, 3080 uint16_t cmds_max, uint16_t qdepth, 3081 uint32_t initial_cmdsn) 3082 { 3083 struct iscsi_cls_session *cls_sess; 3084 struct scsi_qla_host *ha; 3085 struct qla_endpoint *qla_ep; 3086 struct ddb_entry *ddb_entry; 3087 uint16_t ddb_index; 3088 struct iscsi_session *sess; 3089 int ret; 3090 3091 if (!ep) { 3092 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3093 return NULL; 3094 } 3095 3096 qla_ep = ep->dd_data; 3097 ha = to_qla_host(qla_ep->host); 3098 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3099 ha->host_no)); 3100 3101 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3102 if (ret == QLA_ERROR) 3103 return NULL; 3104 3105 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3106 cmds_max, sizeof(struct ddb_entry), 3107 sizeof(struct ql4_task_data), 3108 initial_cmdsn, ddb_index); 3109 if (!cls_sess) 3110 return NULL; 3111 3112 sess = cls_sess->dd_data; 3113 ddb_entry = sess->dd_data; 3114 ddb_entry->fw_ddb_index = ddb_index; 3115 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3116 ddb_entry->ha = ha; 3117 ddb_entry->sess = cls_sess; 3118 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3119 ddb_entry->ddb_change = qla4xxx_ddb_change; 3120 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3121 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3122 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3123 ha->tot_ddbs++; 3124 3125 return cls_sess; 3126 } 3127 3128 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3129 { 3130 struct iscsi_session *sess; 3131 struct ddb_entry *ddb_entry; 3132 struct scsi_qla_host *ha; 3133 unsigned long flags, wtime; 3134 struct dev_db_entry *fw_ddb_entry = NULL; 3135 dma_addr_t fw_ddb_entry_dma; 3136 uint32_t ddb_state; 3137 int ret; 3138 3139 sess = cls_sess->dd_data; 3140 ddb_entry = sess->dd_data; 3141 ha = ddb_entry->ha; 3142 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3143 ha->host_no)); 3144 3145 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3146 &fw_ddb_entry_dma, GFP_KERNEL); 3147 if (!fw_ddb_entry) { 3148 ql4_printk(KERN_ERR, ha, 3149 "%s: Unable to allocate dma buffer\n", __func__); 3150 goto destroy_session; 3151 } 3152 3153 wtime = jiffies + (HZ * LOGOUT_TOV); 3154 do { 3155 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3156 fw_ddb_entry, fw_ddb_entry_dma, 3157 NULL, NULL, &ddb_state, NULL, 3158 NULL, NULL); 3159 if (ret == QLA_ERROR) 3160 goto destroy_session; 3161 3162 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3163 (ddb_state == DDB_DS_SESSION_FAILED)) 3164 goto destroy_session; 3165 3166 schedule_timeout_uninterruptible(HZ); 3167 } while ((time_after(wtime, jiffies))); 3168 3169 destroy_session: 3170 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3171 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3172 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3173 spin_lock_irqsave(&ha->hardware_lock, flags); 3174 qla4xxx_free_ddb(ha, ddb_entry); 3175 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3176 3177 iscsi_session_teardown(cls_sess); 3178 3179 if (fw_ddb_entry) 3180 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3181 fw_ddb_entry, fw_ddb_entry_dma); 3182 } 3183 3184 static struct iscsi_cls_conn * 3185 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3186 { 3187 struct iscsi_cls_conn *cls_conn; 3188 struct iscsi_session *sess; 3189 struct ddb_entry *ddb_entry; 3190 struct scsi_qla_host *ha; 3191 3192 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3193 conn_idx); 3194 if (!cls_conn) { 3195 pr_info("%s: Can not create connection for conn_idx = %u\n", 3196 __func__, conn_idx); 3197 return NULL; 3198 } 3199 3200 sess = cls_sess->dd_data; 3201 ddb_entry = sess->dd_data; 3202 ddb_entry->conn = cls_conn; 3203 3204 ha = ddb_entry->ha; 3205 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3206 conn_idx)); 3207 return cls_conn; 3208 } 3209 3210 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3211 struct iscsi_cls_conn *cls_conn, 3212 uint64_t transport_fd, int is_leading) 3213 { 3214 struct iscsi_conn *conn; 3215 struct qla_conn *qla_conn; 3216 struct iscsi_endpoint *ep; 3217 struct ddb_entry *ddb_entry; 3218 struct scsi_qla_host *ha; 3219 struct iscsi_session *sess; 3220 3221 sess = cls_session->dd_data; 3222 ddb_entry = sess->dd_data; 3223 ha = ddb_entry->ha; 3224 3225 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3226 cls_session->sid, cls_conn->cid)); 3227 3228 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3229 return -EINVAL; 3230 ep = iscsi_lookup_endpoint(transport_fd); 3231 if (!ep) 3232 return -EINVAL; 3233 conn = cls_conn->dd_data; 3234 qla_conn = conn->dd_data; 3235 qla_conn->qla_ep = ep->dd_data; 3236 iscsi_put_endpoint(ep); 3237 return 0; 3238 } 3239 3240 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3241 { 3242 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3243 struct iscsi_session *sess; 3244 struct ddb_entry *ddb_entry; 3245 struct scsi_qla_host *ha; 3246 struct dev_db_entry *fw_ddb_entry = NULL; 3247 dma_addr_t fw_ddb_entry_dma; 3248 uint32_t mbx_sts = 0; 3249 int ret = 0; 3250 int status = QLA_SUCCESS; 3251 3252 sess = cls_sess->dd_data; 3253 ddb_entry = sess->dd_data; 3254 ha = ddb_entry->ha; 3255 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3256 cls_sess->sid, cls_conn->cid)); 3257 3258 /* Check if we have matching FW DDB, if yes then do not 3259 * login to this target. This could cause target to logout previous 3260 * connection 3261 */ 3262 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3263 if (ret == QLA_SUCCESS) { 3264 ql4_printk(KERN_INFO, ha, 3265 "Session already exist in FW.\n"); 3266 ret = -EEXIST; 3267 goto exit_conn_start; 3268 } 3269 3270 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3271 &fw_ddb_entry_dma, GFP_KERNEL); 3272 if (!fw_ddb_entry) { 3273 ql4_printk(KERN_ERR, ha, 3274 "%s: Unable to allocate dma buffer\n", __func__); 3275 ret = -ENOMEM; 3276 goto exit_conn_start; 3277 } 3278 3279 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3280 if (ret) { 3281 /* If iscsid is stopped and started then no need to do 3282 * set param again since ddb state will be already 3283 * active and FW does not allow set ddb to an 3284 * active session. 3285 */ 3286 if (mbx_sts) 3287 if (ddb_entry->fw_ddb_device_state == 3288 DDB_DS_SESSION_ACTIVE) { 3289 ddb_entry->unblock_sess(ddb_entry->sess); 3290 goto exit_set_param; 3291 } 3292 3293 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3294 __func__, ddb_entry->fw_ddb_index); 3295 goto exit_conn_start; 3296 } 3297 3298 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3299 if (status == QLA_ERROR) { 3300 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3301 sess->targetname); 3302 ret = -EINVAL; 3303 goto exit_conn_start; 3304 } 3305 3306 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3307 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3308 3309 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3310 ddb_entry->fw_ddb_device_state)); 3311 3312 exit_set_param: 3313 ret = 0; 3314 3315 exit_conn_start: 3316 if (fw_ddb_entry) 3317 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3318 fw_ddb_entry, fw_ddb_entry_dma); 3319 return ret; 3320 } 3321 3322 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3323 { 3324 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3325 struct iscsi_session *sess; 3326 struct scsi_qla_host *ha; 3327 struct ddb_entry *ddb_entry; 3328 int options; 3329 3330 sess = cls_sess->dd_data; 3331 ddb_entry = sess->dd_data; 3332 ha = ddb_entry->ha; 3333 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3334 cls_conn->cid)); 3335 3336 options = LOGOUT_OPTION_CLOSE_SESSION; 3337 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3338 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3339 } 3340 3341 static void qla4xxx_task_work(struct work_struct *wdata) 3342 { 3343 struct ql4_task_data *task_data; 3344 struct scsi_qla_host *ha; 3345 struct passthru_status *sts; 3346 struct iscsi_task *task; 3347 struct iscsi_hdr *hdr; 3348 uint8_t *data; 3349 uint32_t data_len; 3350 struct iscsi_conn *conn; 3351 int hdr_len; 3352 itt_t itt; 3353 3354 task_data = container_of(wdata, struct ql4_task_data, task_work); 3355 ha = task_data->ha; 3356 task = task_data->task; 3357 sts = &task_data->sts; 3358 hdr_len = sizeof(struct iscsi_hdr); 3359 3360 DEBUG3(printk(KERN_INFO "Status returned\n")); 3361 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3362 DEBUG3(printk(KERN_INFO "Response buffer")); 3363 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3364 3365 conn = task->conn; 3366 3367 switch (sts->completionStatus) { 3368 case PASSTHRU_STATUS_COMPLETE: 3369 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3370 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3371 itt = sts->handle; 3372 hdr->itt = itt; 3373 data = task_data->resp_buffer + hdr_len; 3374 data_len = task_data->resp_len - hdr_len; 3375 iscsi_complete_pdu(conn, hdr, data, data_len); 3376 break; 3377 default: 3378 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3379 sts->completionStatus); 3380 break; 3381 } 3382 return; 3383 } 3384 3385 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3386 { 3387 struct ql4_task_data *task_data; 3388 struct iscsi_session *sess; 3389 struct ddb_entry *ddb_entry; 3390 struct scsi_qla_host *ha; 3391 int hdr_len; 3392 3393 sess = task->conn->session; 3394 ddb_entry = sess->dd_data; 3395 ha = ddb_entry->ha; 3396 task_data = task->dd_data; 3397 memset(task_data, 0, sizeof(struct ql4_task_data)); 3398 3399 if (task->sc) { 3400 ql4_printk(KERN_INFO, ha, 3401 "%s: SCSI Commands not implemented\n", __func__); 3402 return -EINVAL; 3403 } 3404 3405 hdr_len = sizeof(struct iscsi_hdr); 3406 task_data->ha = ha; 3407 task_data->task = task; 3408 3409 if (task->data_count) { 3410 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3411 task->data_count, 3412 DMA_TO_DEVICE); 3413 } 3414 3415 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3416 __func__, task->conn->max_recv_dlength, hdr_len)); 3417 3418 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3419 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3420 task_data->resp_len, 3421 &task_data->resp_dma, 3422 GFP_ATOMIC); 3423 if (!task_data->resp_buffer) 3424 goto exit_alloc_pdu; 3425 3426 task_data->req_len = task->data_count + hdr_len; 3427 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3428 task_data->req_len, 3429 &task_data->req_dma, 3430 GFP_ATOMIC); 3431 if (!task_data->req_buffer) 3432 goto exit_alloc_pdu; 3433 3434 task->hdr = task_data->req_buffer; 3435 3436 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3437 3438 return 0; 3439 3440 exit_alloc_pdu: 3441 if (task_data->resp_buffer) 3442 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3443 task_data->resp_buffer, task_data->resp_dma); 3444 3445 if (task_data->req_buffer) 3446 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3447 task_data->req_buffer, task_data->req_dma); 3448 return -ENOMEM; 3449 } 3450 3451 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3452 { 3453 struct ql4_task_data *task_data; 3454 struct iscsi_session *sess; 3455 struct ddb_entry *ddb_entry; 3456 struct scsi_qla_host *ha; 3457 int hdr_len; 3458 3459 hdr_len = sizeof(struct iscsi_hdr); 3460 sess = task->conn->session; 3461 ddb_entry = sess->dd_data; 3462 ha = ddb_entry->ha; 3463 task_data = task->dd_data; 3464 3465 if (task->data_count) { 3466 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3467 task->data_count, DMA_TO_DEVICE); 3468 } 3469 3470 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3471 __func__, task->conn->max_recv_dlength, hdr_len)); 3472 3473 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3474 task_data->resp_buffer, task_data->resp_dma); 3475 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3476 task_data->req_buffer, task_data->req_dma); 3477 return; 3478 } 3479 3480 static int qla4xxx_task_xmit(struct iscsi_task *task) 3481 { 3482 struct scsi_cmnd *sc = task->sc; 3483 struct iscsi_session *sess = task->conn->session; 3484 struct ddb_entry *ddb_entry = sess->dd_data; 3485 struct scsi_qla_host *ha = ddb_entry->ha; 3486 3487 if (!sc) 3488 return qla4xxx_send_passthru0(task); 3489 3490 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3491 __func__); 3492 return -ENOSYS; 3493 } 3494 3495 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3496 struct iscsi_bus_flash_conn *conn, 3497 struct dev_db_entry *fw_ddb_entry) 3498 { 3499 unsigned long options = 0; 3500 int rc = 0; 3501 3502 options = le16_to_cpu(fw_ddb_entry->options); 3503 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3504 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3505 rc = iscsi_switch_str_param(&sess->portal_type, 3506 PORTAL_TYPE_IPV6); 3507 if (rc) 3508 goto exit_copy; 3509 } else { 3510 rc = iscsi_switch_str_param(&sess->portal_type, 3511 PORTAL_TYPE_IPV4); 3512 if (rc) 3513 goto exit_copy; 3514 } 3515 3516 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3517 &options); 3518 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3519 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3520 3521 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3522 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3523 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3524 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3525 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3526 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3527 &options); 3528 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3529 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3530 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3531 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3532 &options); 3533 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3534 sess->discovery_auth_optional = 3535 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3536 if (test_bit(ISCSIOPT_ERL1, &options)) 3537 sess->erl |= BIT_1; 3538 if (test_bit(ISCSIOPT_ERL0, &options)) 3539 sess->erl |= BIT_0; 3540 3541 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3542 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3543 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3544 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3545 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3546 conn->tcp_timer_scale |= BIT_3; 3547 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3548 conn->tcp_timer_scale |= BIT_2; 3549 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3550 conn->tcp_timer_scale |= BIT_1; 3551 3552 conn->tcp_timer_scale >>= 1; 3553 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3554 3555 options = le16_to_cpu(fw_ddb_entry->ip_options); 3556 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3557 3558 conn->max_recv_dlength = BYTE_UNITS * 3559 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3560 conn->max_xmit_dlength = BYTE_UNITS * 3561 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3562 sess->first_burst = BYTE_UNITS * 3563 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3564 sess->max_burst = BYTE_UNITS * 3565 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3566 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3567 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3568 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3569 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3570 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3571 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3572 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3573 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3574 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3575 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3576 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3577 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3578 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3579 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3580 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3581 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3582 3583 sess->default_taskmgmt_timeout = 3584 le16_to_cpu(fw_ddb_entry->def_timeout); 3585 conn->port = le16_to_cpu(fw_ddb_entry->port); 3586 3587 options = le16_to_cpu(fw_ddb_entry->options); 3588 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3589 if (!conn->ipaddress) { 3590 rc = -ENOMEM; 3591 goto exit_copy; 3592 } 3593 3594 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3595 if (!conn->redirect_ipaddr) { 3596 rc = -ENOMEM; 3597 goto exit_copy; 3598 } 3599 3600 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3601 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3602 3603 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3604 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3605 3606 conn->link_local_ipv6_addr = kmemdup( 3607 fw_ddb_entry->link_local_ipv6_addr, 3608 IPv6_ADDR_LEN, GFP_KERNEL); 3609 if (!conn->link_local_ipv6_addr) { 3610 rc = -ENOMEM; 3611 goto exit_copy; 3612 } 3613 } else { 3614 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3615 } 3616 3617 if (fw_ddb_entry->iscsi_name[0]) { 3618 rc = iscsi_switch_str_param(&sess->targetname, 3619 (char *)fw_ddb_entry->iscsi_name); 3620 if (rc) 3621 goto exit_copy; 3622 } 3623 3624 if (fw_ddb_entry->iscsi_alias[0]) { 3625 rc = iscsi_switch_str_param(&sess->targetalias, 3626 (char *)fw_ddb_entry->iscsi_alias); 3627 if (rc) 3628 goto exit_copy; 3629 } 3630 3631 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3632 3633 exit_copy: 3634 return rc; 3635 } 3636 3637 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3638 struct iscsi_bus_flash_conn *conn, 3639 struct dev_db_entry *fw_ddb_entry) 3640 { 3641 uint16_t options; 3642 3643 options = le16_to_cpu(fw_ddb_entry->options); 3644 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3645 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3646 options |= BIT_8; 3647 else 3648 options &= ~BIT_8; 3649 3650 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3651 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3652 SET_BITVAL(sess->entry_state, options, BIT_3); 3653 fw_ddb_entry->options = cpu_to_le16(options); 3654 3655 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3656 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3657 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3658 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3659 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3660 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3661 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3662 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3663 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3664 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3665 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3666 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3667 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3668 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3669 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3670 3671 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3672 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3673 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3674 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3675 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3676 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3677 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3678 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3679 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3680 3681 options = le16_to_cpu(fw_ddb_entry->ip_options); 3682 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3683 fw_ddb_entry->ip_options = cpu_to_le16(options); 3684 3685 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3686 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3687 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3688 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3689 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3690 fw_ddb_entry->iscsi_first_burst_len = 3691 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3692 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3693 BYTE_UNITS); 3694 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3695 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3696 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3697 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3698 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3699 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3700 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3701 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3702 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3703 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3704 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3705 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3706 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3707 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3708 fw_ddb_entry->port = cpu_to_le16(conn->port); 3709 fw_ddb_entry->def_timeout = 3710 cpu_to_le16(sess->default_taskmgmt_timeout); 3711 3712 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3713 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3714 else 3715 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3716 3717 if (conn->ipaddress) 3718 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3719 sizeof(fw_ddb_entry->ip_addr)); 3720 3721 if (conn->redirect_ipaddr) 3722 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3723 sizeof(fw_ddb_entry->tgt_addr)); 3724 3725 if (conn->link_local_ipv6_addr) 3726 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3727 conn->link_local_ipv6_addr, 3728 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3729 3730 if (sess->targetname) 3731 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3732 sizeof(fw_ddb_entry->iscsi_name)); 3733 3734 if (sess->targetalias) 3735 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3736 sizeof(fw_ddb_entry->iscsi_alias)); 3737 3738 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3739 3740 return 0; 3741 } 3742 3743 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3744 struct iscsi_session *sess, 3745 struct dev_db_entry *fw_ddb_entry) 3746 { 3747 unsigned long options = 0; 3748 uint16_t ddb_link; 3749 uint16_t disc_parent; 3750 char ip_addr[DDB_IPADDR_LEN]; 3751 3752 options = le16_to_cpu(fw_ddb_entry->options); 3753 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3754 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3755 &options); 3756 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3757 3758 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3759 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3760 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3761 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3762 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3763 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3764 &options); 3765 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3766 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3767 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3768 &options); 3769 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3770 sess->discovery_auth_optional = 3771 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3772 if (test_bit(ISCSIOPT_ERL1, &options)) 3773 sess->erl |= BIT_1; 3774 if (test_bit(ISCSIOPT_ERL0, &options)) 3775 sess->erl |= BIT_0; 3776 3777 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3778 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3779 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3780 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3781 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3782 conn->tcp_timer_scale |= BIT_3; 3783 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3784 conn->tcp_timer_scale |= BIT_2; 3785 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3786 conn->tcp_timer_scale |= BIT_1; 3787 3788 conn->tcp_timer_scale >>= 1; 3789 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3790 3791 options = le16_to_cpu(fw_ddb_entry->ip_options); 3792 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3793 3794 conn->max_recv_dlength = BYTE_UNITS * 3795 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3796 conn->max_xmit_dlength = BYTE_UNITS * 3797 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3798 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3799 sess->first_burst = BYTE_UNITS * 3800 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3801 sess->max_burst = BYTE_UNITS * 3802 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3803 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3804 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3805 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3806 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3807 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3808 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3809 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3810 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3811 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3812 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3813 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3814 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3815 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3816 3817 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3818 if (ddb_link == DDB_ISNS) 3819 disc_parent = ISCSI_DISC_PARENT_ISNS; 3820 else if (ddb_link == DDB_NO_LINK) 3821 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3822 else if (ddb_link < MAX_DDB_ENTRIES) 3823 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3824 else 3825 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3826 3827 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3828 iscsi_get_discovery_parent_name(disc_parent), 0); 3829 3830 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3831 (char *)fw_ddb_entry->iscsi_alias, 0); 3832 3833 options = le16_to_cpu(fw_ddb_entry->options); 3834 if (options & DDB_OPT_IPV6_DEVICE) { 3835 memset(ip_addr, 0, sizeof(ip_addr)); 3836 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3837 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3838 (char *)ip_addr, 0); 3839 } 3840 } 3841 3842 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3843 struct dev_db_entry *fw_ddb_entry, 3844 struct iscsi_cls_session *cls_sess, 3845 struct iscsi_cls_conn *cls_conn) 3846 { 3847 int buflen = 0; 3848 struct iscsi_session *sess; 3849 struct ddb_entry *ddb_entry; 3850 struct ql4_chap_table chap_tbl; 3851 struct iscsi_conn *conn; 3852 char ip_addr[DDB_IPADDR_LEN]; 3853 uint16_t options = 0; 3854 3855 sess = cls_sess->dd_data; 3856 ddb_entry = sess->dd_data; 3857 conn = cls_conn->dd_data; 3858 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3859 3860 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3861 3862 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3863 3864 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3865 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3866 3867 memset(ip_addr, 0, sizeof(ip_addr)); 3868 options = le16_to_cpu(fw_ddb_entry->options); 3869 if (options & DDB_OPT_IPV6_DEVICE) { 3870 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3871 3872 memset(ip_addr, 0, sizeof(ip_addr)); 3873 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3874 } else { 3875 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3876 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3877 } 3878 3879 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3880 (char *)ip_addr, buflen); 3881 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3882 (char *)fw_ddb_entry->iscsi_name, buflen); 3883 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3884 (char *)ha->name_string, buflen); 3885 3886 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3887 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3888 chap_tbl.secret, 3889 ddb_entry->chap_tbl_idx)) { 3890 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3891 (char *)chap_tbl.name, 3892 strlen((char *)chap_tbl.name)); 3893 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3894 (char *)chap_tbl.secret, 3895 chap_tbl.secret_len); 3896 } 3897 } 3898 } 3899 3900 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3901 struct ddb_entry *ddb_entry) 3902 { 3903 struct iscsi_cls_session *cls_sess; 3904 struct iscsi_cls_conn *cls_conn; 3905 uint32_t ddb_state; 3906 dma_addr_t fw_ddb_entry_dma; 3907 struct dev_db_entry *fw_ddb_entry; 3908 3909 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3910 &fw_ddb_entry_dma, GFP_KERNEL); 3911 if (!fw_ddb_entry) { 3912 ql4_printk(KERN_ERR, ha, 3913 "%s: Unable to allocate dma buffer\n", __func__); 3914 goto exit_session_conn_fwddb_param; 3915 } 3916 3917 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3918 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3919 NULL, NULL, NULL) == QLA_ERROR) { 3920 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3921 "get_ddb_entry for fw_ddb_index %d\n", 3922 ha->host_no, __func__, 3923 ddb_entry->fw_ddb_index)); 3924 goto exit_session_conn_fwddb_param; 3925 } 3926 3927 cls_sess = ddb_entry->sess; 3928 3929 cls_conn = ddb_entry->conn; 3930 3931 /* Update params */ 3932 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3933 3934 exit_session_conn_fwddb_param: 3935 if (fw_ddb_entry) 3936 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3937 fw_ddb_entry, fw_ddb_entry_dma); 3938 } 3939 3940 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3941 struct ddb_entry *ddb_entry) 3942 { 3943 struct iscsi_cls_session *cls_sess; 3944 struct iscsi_cls_conn *cls_conn; 3945 struct iscsi_session *sess; 3946 struct iscsi_conn *conn; 3947 uint32_t ddb_state; 3948 dma_addr_t fw_ddb_entry_dma; 3949 struct dev_db_entry *fw_ddb_entry; 3950 3951 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3952 &fw_ddb_entry_dma, GFP_KERNEL); 3953 if (!fw_ddb_entry) { 3954 ql4_printk(KERN_ERR, ha, 3955 "%s: Unable to allocate dma buffer\n", __func__); 3956 goto exit_session_conn_param; 3957 } 3958 3959 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3960 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3961 NULL, NULL, NULL) == QLA_ERROR) { 3962 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3963 "get_ddb_entry for fw_ddb_index %d\n", 3964 ha->host_no, __func__, 3965 ddb_entry->fw_ddb_index)); 3966 goto exit_session_conn_param; 3967 } 3968 3969 cls_sess = ddb_entry->sess; 3970 sess = cls_sess->dd_data; 3971 3972 cls_conn = ddb_entry->conn; 3973 conn = cls_conn->dd_data; 3974 3975 /* Update timers after login */ 3976 ddb_entry->default_relogin_timeout = 3977 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3978 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3979 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3980 ddb_entry->default_time2wait = 3981 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3982 3983 /* Update params */ 3984 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3985 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3986 3987 memcpy(sess->initiatorname, ha->name_string, 3988 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3989 3990 exit_session_conn_param: 3991 if (fw_ddb_entry) 3992 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3993 fw_ddb_entry, fw_ddb_entry_dma); 3994 } 3995 3996 /* 3997 * Timer routines 3998 */ 3999 static void qla4xxx_timer(struct timer_list *t); 4000 4001 static void qla4xxx_start_timer(struct scsi_qla_host *ha, 4002 unsigned long interval) 4003 { 4004 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 4005 __func__, ha->host->host_no)); 4006 timer_setup(&ha->timer, qla4xxx_timer, 0); 4007 ha->timer.expires = jiffies + interval * HZ; 4008 add_timer(&ha->timer); 4009 ha->timer_active = 1; 4010 } 4011 4012 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 4013 { 4014 del_timer_sync(&ha->timer); 4015 ha->timer_active = 0; 4016 } 4017 4018 /*** 4019 * qla4xxx_mark_device_missing - blocks the session 4020 * @cls_session: Pointer to the session to be blocked 4021 * @ddb_entry: Pointer to device database entry 4022 * 4023 * This routine marks a device missing and close connection. 4024 **/ 4025 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 4026 { 4027 iscsi_block_session(cls_session); 4028 } 4029 4030 /** 4031 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 4032 * @ha: Pointer to host adapter structure. 4033 * 4034 * This routine marks a device missing and resets the relogin retry count. 4035 **/ 4036 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 4037 { 4038 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 4039 } 4040 4041 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4042 struct ddb_entry *ddb_entry, 4043 struct scsi_cmnd *cmd) 4044 { 4045 struct srb *srb; 4046 4047 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4048 if (!srb) 4049 return srb; 4050 4051 kref_init(&srb->srb_ref); 4052 srb->ha = ha; 4053 srb->ddb = ddb_entry; 4054 srb->cmd = cmd; 4055 srb->flags = 0; 4056 qla4xxx_cmd_priv(cmd)->srb = srb; 4057 4058 return srb; 4059 } 4060 4061 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4062 { 4063 struct scsi_cmnd *cmd = srb->cmd; 4064 4065 if (srb->flags & SRB_DMA_VALID) { 4066 scsi_dma_unmap(cmd); 4067 srb->flags &= ~SRB_DMA_VALID; 4068 } 4069 qla4xxx_cmd_priv(cmd)->srb = NULL; 4070 } 4071 4072 void qla4xxx_srb_compl(struct kref *ref) 4073 { 4074 struct srb *srb = container_of(ref, struct srb, srb_ref); 4075 struct scsi_cmnd *cmd = srb->cmd; 4076 struct scsi_qla_host *ha = srb->ha; 4077 4078 qla4xxx_srb_free_dma(ha, srb); 4079 4080 mempool_free(srb, ha->srb_mempool); 4081 4082 scsi_done(cmd); 4083 } 4084 4085 /** 4086 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4087 * @host: scsi host 4088 * @cmd: Pointer to Linux's SCSI command structure 4089 * 4090 * Remarks: 4091 * This routine is invoked by Linux to send a SCSI command to the driver. 4092 * The mid-level driver tries to ensure that queuecommand never gets 4093 * invoked concurrently with itself or the interrupt handler (although 4094 * the interrupt handler may call this routine as part of request- 4095 * completion handling). Unfortunely, it sometimes calls the scheduler 4096 * in interrupt context which is a big NO! NO!. 4097 **/ 4098 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4099 { 4100 struct scsi_qla_host *ha = to_qla_host(host); 4101 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4102 struct iscsi_cls_session *sess = ddb_entry->sess; 4103 struct srb *srb; 4104 int rval; 4105 4106 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4107 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4108 cmd->result = DID_NO_CONNECT << 16; 4109 else 4110 cmd->result = DID_REQUEUE << 16; 4111 goto qc_fail_command; 4112 } 4113 4114 if (!sess) { 4115 cmd->result = DID_IMM_RETRY << 16; 4116 goto qc_fail_command; 4117 } 4118 4119 rval = iscsi_session_chkready(sess); 4120 if (rval) { 4121 cmd->result = rval; 4122 goto qc_fail_command; 4123 } 4124 4125 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4126 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4127 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4128 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4129 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4130 !test_bit(AF_ONLINE, &ha->flags) || 4131 !test_bit(AF_LINK_UP, &ha->flags) || 4132 test_bit(AF_LOOPBACK, &ha->flags) || 4133 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4134 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4135 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4136 goto qc_host_busy; 4137 4138 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4139 if (!srb) 4140 goto qc_host_busy; 4141 4142 rval = qla4xxx_send_command_to_isp(ha, srb); 4143 if (rval != QLA_SUCCESS) 4144 goto qc_host_busy_free_sp; 4145 4146 return 0; 4147 4148 qc_host_busy_free_sp: 4149 qla4xxx_srb_free_dma(ha, srb); 4150 mempool_free(srb, ha->srb_mempool); 4151 4152 qc_host_busy: 4153 return SCSI_MLQUEUE_HOST_BUSY; 4154 4155 qc_fail_command: 4156 scsi_done(cmd); 4157 4158 return 0; 4159 } 4160 4161 /** 4162 * qla4xxx_mem_free - frees memory allocated to adapter 4163 * @ha: Pointer to host adapter structure. 4164 * 4165 * Frees memory previously allocated by qla4xxx_mem_alloc 4166 **/ 4167 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4168 { 4169 if (ha->queues) 4170 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4171 ha->queues_dma); 4172 4173 vfree(ha->fw_dump); 4174 4175 ha->queues_len = 0; 4176 ha->queues = NULL; 4177 ha->queues_dma = 0; 4178 ha->request_ring = NULL; 4179 ha->request_dma = 0; 4180 ha->response_ring = NULL; 4181 ha->response_dma = 0; 4182 ha->shadow_regs = NULL; 4183 ha->shadow_regs_dma = 0; 4184 ha->fw_dump = NULL; 4185 ha->fw_dump_size = 0; 4186 4187 /* Free srb pool. */ 4188 mempool_destroy(ha->srb_mempool); 4189 ha->srb_mempool = NULL; 4190 4191 dma_pool_destroy(ha->chap_dma_pool); 4192 4193 vfree(ha->chap_list); 4194 ha->chap_list = NULL; 4195 4196 dma_pool_destroy(ha->fw_ddb_dma_pool); 4197 4198 /* release io space registers */ 4199 if (is_qla8022(ha)) { 4200 if (ha->nx_pcibase) 4201 iounmap( 4202 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4203 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4204 if (ha->nx_pcibase) 4205 iounmap( 4206 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4207 } else if (ha->reg) { 4208 iounmap(ha->reg); 4209 } 4210 4211 vfree(ha->reset_tmplt.buff); 4212 4213 pci_release_regions(ha->pdev); 4214 } 4215 4216 /** 4217 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4218 * @ha: Pointer to host adapter structure 4219 * 4220 * Allocates DMA memory for request and response queues. Also allocates memory 4221 * for srbs. 4222 **/ 4223 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4224 { 4225 unsigned long align; 4226 4227 /* Allocate contiguous block of DMA memory for queues. */ 4228 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4229 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4230 sizeof(struct shadow_regs) + 4231 MEM_ALIGN_VALUE + 4232 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4233 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4234 &ha->queues_dma, GFP_KERNEL); 4235 if (ha->queues == NULL) { 4236 ql4_printk(KERN_WARNING, ha, 4237 "Memory Allocation failed - queues.\n"); 4238 4239 goto mem_alloc_error_exit; 4240 } 4241 4242 /* 4243 * As per RISC alignment requirements -- the bus-address must be a 4244 * multiple of the request-ring size (in bytes). 4245 */ 4246 align = 0; 4247 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4248 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4249 (MEM_ALIGN_VALUE - 1)); 4250 4251 /* Update request and response queue pointers. */ 4252 ha->request_dma = ha->queues_dma + align; 4253 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4254 ha->response_dma = ha->queues_dma + align + 4255 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4256 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4257 (REQUEST_QUEUE_DEPTH * 4258 QUEUE_SIZE)); 4259 ha->shadow_regs_dma = ha->queues_dma + align + 4260 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4261 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4262 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4263 (REQUEST_QUEUE_DEPTH * 4264 QUEUE_SIZE) + 4265 (RESPONSE_QUEUE_DEPTH * 4266 QUEUE_SIZE)); 4267 4268 /* Allocate memory for srb pool. */ 4269 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4270 mempool_free_slab, srb_cachep); 4271 if (ha->srb_mempool == NULL) { 4272 ql4_printk(KERN_WARNING, ha, 4273 "Memory Allocation failed - SRB Pool.\n"); 4274 4275 goto mem_alloc_error_exit; 4276 } 4277 4278 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4279 CHAP_DMA_BLOCK_SIZE, 8, 0); 4280 4281 if (ha->chap_dma_pool == NULL) { 4282 ql4_printk(KERN_WARNING, ha, 4283 "%s: chap_dma_pool allocation failed..\n", __func__); 4284 goto mem_alloc_error_exit; 4285 } 4286 4287 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4288 DDB_DMA_BLOCK_SIZE, 8, 0); 4289 4290 if (ha->fw_ddb_dma_pool == NULL) { 4291 ql4_printk(KERN_WARNING, ha, 4292 "%s: fw_ddb_dma_pool allocation failed..\n", 4293 __func__); 4294 goto mem_alloc_error_exit; 4295 } 4296 4297 return QLA_SUCCESS; 4298 4299 mem_alloc_error_exit: 4300 return QLA_ERROR; 4301 } 4302 4303 /** 4304 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4305 * @ha: adapter block pointer. 4306 * 4307 * Note: The caller should not hold the idc lock. 4308 **/ 4309 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4310 { 4311 uint32_t temp, temp_state, temp_val; 4312 int status = QLA_SUCCESS; 4313 4314 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4315 4316 temp_state = qla82xx_get_temp_state(temp); 4317 temp_val = qla82xx_get_temp_val(temp); 4318 4319 if (temp_state == QLA82XX_TEMP_PANIC) { 4320 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4321 " exceeds maximum allowed. Hardware has been shut" 4322 " down.\n", temp_val); 4323 status = QLA_ERROR; 4324 } else if (temp_state == QLA82XX_TEMP_WARN) { 4325 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4326 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4327 " degrees C exceeds operating range." 4328 " Immediate action needed.\n", temp_val); 4329 } else { 4330 if (ha->temperature == QLA82XX_TEMP_WARN) 4331 ql4_printk(KERN_INFO, ha, "Device temperature is" 4332 " now %d degrees C in normal range.\n", 4333 temp_val); 4334 } 4335 ha->temperature = temp_state; 4336 return status; 4337 } 4338 4339 /** 4340 * qla4_8xxx_check_fw_alive - Check firmware health 4341 * @ha: Pointer to host adapter structure. 4342 * 4343 * Context: Interrupt 4344 **/ 4345 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4346 { 4347 uint32_t fw_heartbeat_counter; 4348 int status = QLA_SUCCESS; 4349 4350 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4351 QLA8XXX_PEG_ALIVE_COUNTER); 4352 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4353 if (fw_heartbeat_counter == 0xffffffff) { 4354 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4355 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4356 ha->host_no, __func__)); 4357 return status; 4358 } 4359 4360 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4361 ha->seconds_since_last_heartbeat++; 4362 /* FW not alive after 2 seconds */ 4363 if (ha->seconds_since_last_heartbeat == 2) { 4364 ha->seconds_since_last_heartbeat = 0; 4365 qla4_8xxx_dump_peg_reg(ha); 4366 status = QLA_ERROR; 4367 } 4368 } else 4369 ha->seconds_since_last_heartbeat = 0; 4370 4371 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4372 return status; 4373 } 4374 4375 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4376 { 4377 uint32_t halt_status; 4378 int halt_status_unrecoverable = 0; 4379 4380 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4381 4382 if (is_qla8022(ha)) { 4383 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4384 __func__); 4385 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4386 CRB_NIU_XG_PAUSE_CTL_P0 | 4387 CRB_NIU_XG_PAUSE_CTL_P1); 4388 4389 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4390 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4391 __func__); 4392 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4393 halt_status_unrecoverable = 1; 4394 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4395 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4396 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4397 __func__); 4398 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4399 halt_status_unrecoverable = 1; 4400 } 4401 4402 /* 4403 * Since we cannot change dev_state in interrupt context, 4404 * set appropriate DPC flag then wakeup DPC 4405 */ 4406 if (halt_status_unrecoverable) { 4407 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4408 } else { 4409 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4410 __func__); 4411 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4412 } 4413 qla4xxx_mailbox_premature_completion(ha); 4414 qla4xxx_wake_dpc(ha); 4415 } 4416 4417 /** 4418 * qla4_8xxx_watchdog - Poll dev state 4419 * @ha: Pointer to host adapter structure. 4420 * 4421 * Context: Interrupt 4422 **/ 4423 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4424 { 4425 uint32_t dev_state; 4426 uint32_t idc_ctrl; 4427 4428 if (is_qla8032(ha) && 4429 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4430 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4431 __func__, ha->func_num); 4432 4433 /* don't poll if reset is going on */ 4434 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4435 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4436 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4437 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4438 4439 if (qla4_8xxx_check_temp(ha)) { 4440 if (is_qla8022(ha)) { 4441 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4442 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4443 CRB_NIU_XG_PAUSE_CTL_P0 | 4444 CRB_NIU_XG_PAUSE_CTL_P1); 4445 } 4446 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4447 qla4xxx_wake_dpc(ha); 4448 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4449 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4450 4451 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4452 __func__); 4453 4454 if (is_qla8032(ha) || is_qla8042(ha)) { 4455 idc_ctrl = qla4_83xx_rd_reg(ha, 4456 QLA83XX_IDC_DRV_CTRL); 4457 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4458 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4459 __func__); 4460 qla4xxx_mailbox_premature_completion( 4461 ha); 4462 } 4463 } 4464 4465 if ((is_qla8032(ha) || is_qla8042(ha)) || 4466 (is_qla8022(ha) && !ql4xdontresethba)) { 4467 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4468 qla4xxx_wake_dpc(ha); 4469 } 4470 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4471 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4472 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4473 __func__); 4474 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4475 qla4xxx_wake_dpc(ha); 4476 } else { 4477 /* Check firmware health */ 4478 if (qla4_8xxx_check_fw_alive(ha)) 4479 qla4_8xxx_process_fw_error(ha); 4480 } 4481 } 4482 } 4483 4484 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4485 { 4486 struct iscsi_session *sess; 4487 struct ddb_entry *ddb_entry; 4488 struct scsi_qla_host *ha; 4489 4490 sess = cls_sess->dd_data; 4491 ddb_entry = sess->dd_data; 4492 ha = ddb_entry->ha; 4493 4494 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4495 return; 4496 4497 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4498 !iscsi_is_session_online(cls_sess)) { 4499 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4500 INVALID_ENTRY) { 4501 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4502 0) { 4503 atomic_set(&ddb_entry->retry_relogin_timer, 4504 INVALID_ENTRY); 4505 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4506 set_bit(DF_RELOGIN, &ddb_entry->flags); 4507 DEBUG2(ql4_printk(KERN_INFO, ha, 4508 "%s: index [%d] login device\n", 4509 __func__, ddb_entry->fw_ddb_index)); 4510 } else 4511 atomic_dec(&ddb_entry->retry_relogin_timer); 4512 } 4513 } 4514 4515 /* Wait for relogin to timeout */ 4516 if (atomic_read(&ddb_entry->relogin_timer) && 4517 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4518 /* 4519 * If the relogin times out and the device is 4520 * still NOT ONLINE then try and relogin again. 4521 */ 4522 if (!iscsi_is_session_online(cls_sess)) { 4523 /* Reset retry relogin timer */ 4524 atomic_inc(&ddb_entry->relogin_retry_count); 4525 DEBUG2(ql4_printk(KERN_INFO, ha, 4526 "%s: index[%d] relogin timed out-retrying" 4527 " relogin (%d), retry (%d)\n", __func__, 4528 ddb_entry->fw_ddb_index, 4529 atomic_read(&ddb_entry->relogin_retry_count), 4530 ddb_entry->default_time2wait + 4)); 4531 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4532 atomic_set(&ddb_entry->retry_relogin_timer, 4533 ddb_entry->default_time2wait + 4); 4534 } 4535 } 4536 } 4537 4538 /** 4539 * qla4xxx_timer - checks every second for work to do. 4540 * @t: Context to obtain pointer to host adapter structure. 4541 **/ 4542 static void qla4xxx_timer(struct timer_list *t) 4543 { 4544 struct scsi_qla_host *ha = from_timer(ha, t, timer); 4545 int start_dpc = 0; 4546 uint16_t w; 4547 4548 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4549 4550 /* If we are in the middle of AER/EEH processing 4551 * skip any processing and reschedule the timer 4552 */ 4553 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4554 mod_timer(&ha->timer, jiffies + HZ); 4555 return; 4556 } 4557 4558 /* Hardware read to trigger an EEH error during mailbox waits. */ 4559 if (!pci_channel_offline(ha->pdev)) 4560 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4561 4562 if (is_qla80XX(ha)) 4563 qla4_8xxx_watchdog(ha); 4564 4565 if (is_qla40XX(ha)) { 4566 /* Check for heartbeat interval. */ 4567 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4568 ha->heartbeat_interval != 0) { 4569 ha->seconds_since_last_heartbeat++; 4570 if (ha->seconds_since_last_heartbeat > 4571 ha->heartbeat_interval + 2) 4572 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4573 } 4574 } 4575 4576 /* Process any deferred work. */ 4577 if (!list_empty(&ha->work_list)) 4578 start_dpc++; 4579 4580 /* Wakeup the dpc routine for this adapter, if needed. */ 4581 if (start_dpc || 4582 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4583 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4584 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4585 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4586 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4587 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4588 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4589 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4590 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4591 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4592 test_bit(DPC_AEN, &ha->dpc_flags)) { 4593 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4594 " - dpc flags = 0x%lx\n", 4595 ha->host_no, __func__, ha->dpc_flags)); 4596 qla4xxx_wake_dpc(ha); 4597 } 4598 4599 /* Reschedule timer thread to call us back in one second */ 4600 mod_timer(&ha->timer, jiffies + HZ); 4601 4602 DEBUG2(ha->seconds_since_last_intr++); 4603 } 4604 4605 /** 4606 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4607 * @ha: Pointer to host adapter structure. 4608 * 4609 * This routine stalls the driver until all outstanding commands are returned. 4610 * Caller must release the Hardware Lock prior to calling this routine. 4611 **/ 4612 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4613 { 4614 uint32_t index = 0; 4615 unsigned long flags; 4616 struct scsi_cmnd *cmd; 4617 unsigned long wtime; 4618 uint32_t wtmo; 4619 4620 if (is_qla40XX(ha)) 4621 wtmo = WAIT_CMD_TOV; 4622 else 4623 wtmo = ha->nx_reset_timeout / 2; 4624 4625 wtime = jiffies + (wtmo * HZ); 4626 4627 DEBUG2(ql4_printk(KERN_INFO, ha, 4628 "Wait up to %u seconds for cmds to complete\n", 4629 wtmo)); 4630 4631 while (!time_after_eq(jiffies, wtime)) { 4632 spin_lock_irqsave(&ha->hardware_lock, flags); 4633 /* Find a command that hasn't completed. */ 4634 for (index = 0; index < ha->host->can_queue; index++) { 4635 cmd = scsi_host_find_tag(ha->host, index); 4636 /* 4637 * We cannot just check if the index is valid, 4638 * becase if we are run from the scsi eh, then 4639 * the scsi/block layer is going to prevent 4640 * the tag from being released. 4641 */ 4642 if (cmd != NULL && qla4xxx_cmd_priv(cmd)->srb) 4643 break; 4644 } 4645 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4646 4647 /* If No Commands are pending, wait is complete */ 4648 if (index == ha->host->can_queue) 4649 return QLA_SUCCESS; 4650 4651 msleep(1000); 4652 } 4653 /* If we timed out on waiting for commands to come back 4654 * return ERROR. */ 4655 return QLA_ERROR; 4656 } 4657 4658 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4659 { 4660 uint32_t ctrl_status; 4661 unsigned long flags = 0; 4662 4663 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4664 4665 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4666 return QLA_ERROR; 4667 4668 spin_lock_irqsave(&ha->hardware_lock, flags); 4669 4670 /* 4671 * If the SCSI Reset Interrupt bit is set, clear it. 4672 * Otherwise, the Soft Reset won't work. 4673 */ 4674 ctrl_status = readw(&ha->reg->ctrl_status); 4675 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4676 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4677 4678 /* Issue Soft Reset */ 4679 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4680 readl(&ha->reg->ctrl_status); 4681 4682 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4683 return QLA_SUCCESS; 4684 } 4685 4686 /** 4687 * qla4xxx_soft_reset - performs soft reset. 4688 * @ha: Pointer to host adapter structure. 4689 **/ 4690 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4691 { 4692 uint32_t max_wait_time; 4693 unsigned long flags = 0; 4694 int status; 4695 uint32_t ctrl_status; 4696 4697 status = qla4xxx_hw_reset(ha); 4698 if (status != QLA_SUCCESS) 4699 return status; 4700 4701 status = QLA_ERROR; 4702 /* Wait until the Network Reset Intr bit is cleared */ 4703 max_wait_time = RESET_INTR_TOV; 4704 do { 4705 spin_lock_irqsave(&ha->hardware_lock, flags); 4706 ctrl_status = readw(&ha->reg->ctrl_status); 4707 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4708 4709 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4710 break; 4711 4712 msleep(1000); 4713 } while ((--max_wait_time)); 4714 4715 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4716 DEBUG2(printk(KERN_WARNING 4717 "scsi%ld: Network Reset Intr not cleared by " 4718 "Network function, clearing it now!\n", 4719 ha->host_no)); 4720 spin_lock_irqsave(&ha->hardware_lock, flags); 4721 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4722 readl(&ha->reg->ctrl_status); 4723 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4724 } 4725 4726 /* Wait until the firmware tells us the Soft Reset is done */ 4727 max_wait_time = SOFT_RESET_TOV; 4728 do { 4729 spin_lock_irqsave(&ha->hardware_lock, flags); 4730 ctrl_status = readw(&ha->reg->ctrl_status); 4731 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4732 4733 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4734 status = QLA_SUCCESS; 4735 break; 4736 } 4737 4738 msleep(1000); 4739 } while ((--max_wait_time)); 4740 4741 /* 4742 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4743 * after the soft reset has taken place. 4744 */ 4745 spin_lock_irqsave(&ha->hardware_lock, flags); 4746 ctrl_status = readw(&ha->reg->ctrl_status); 4747 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4748 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4749 readl(&ha->reg->ctrl_status); 4750 } 4751 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4752 4753 /* If soft reset fails then most probably the bios on other 4754 * function is also enabled. 4755 * Since the initialization is sequential the other fn 4756 * wont be able to acknowledge the soft reset. 4757 * Issue a force soft reset to workaround this scenario. 4758 */ 4759 if (max_wait_time == 0) { 4760 /* Issue Force Soft Reset */ 4761 spin_lock_irqsave(&ha->hardware_lock, flags); 4762 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4763 readl(&ha->reg->ctrl_status); 4764 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4765 /* Wait until the firmware tells us the Soft Reset is done */ 4766 max_wait_time = SOFT_RESET_TOV; 4767 do { 4768 spin_lock_irqsave(&ha->hardware_lock, flags); 4769 ctrl_status = readw(&ha->reg->ctrl_status); 4770 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4771 4772 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4773 status = QLA_SUCCESS; 4774 break; 4775 } 4776 4777 msleep(1000); 4778 } while ((--max_wait_time)); 4779 } 4780 4781 return status; 4782 } 4783 4784 /** 4785 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4786 * @ha: Pointer to host adapter structure. 4787 * @res: returned scsi status 4788 * 4789 * This routine is called just prior to a HARD RESET to return all 4790 * outstanding commands back to the Operating System. 4791 * Caller should make sure that the following locks are released 4792 * before this calling routine: Hardware lock, and io_request_lock. 4793 **/ 4794 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4795 { 4796 struct srb *srb; 4797 int i; 4798 unsigned long flags; 4799 4800 spin_lock_irqsave(&ha->hardware_lock, flags); 4801 for (i = 0; i < ha->host->can_queue; i++) { 4802 srb = qla4xxx_del_from_active_array(ha, i); 4803 if (srb != NULL) { 4804 srb->cmd->result = res; 4805 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4806 } 4807 } 4808 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4809 } 4810 4811 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4812 { 4813 clear_bit(AF_ONLINE, &ha->flags); 4814 4815 /* Disable the board */ 4816 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4817 4818 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4819 qla4xxx_mark_all_devices_missing(ha); 4820 clear_bit(AF_INIT_DONE, &ha->flags); 4821 } 4822 4823 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4824 { 4825 struct iscsi_session *sess; 4826 struct ddb_entry *ddb_entry; 4827 4828 sess = cls_session->dd_data; 4829 ddb_entry = sess->dd_data; 4830 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4831 4832 if (ddb_entry->ddb_type == FLASH_DDB) 4833 iscsi_block_session(ddb_entry->sess); 4834 else 4835 iscsi_session_failure(cls_session->dd_data, 4836 ISCSI_ERR_CONN_FAILED); 4837 } 4838 4839 /** 4840 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4841 * @ha: Pointer to host adapter structure. 4842 **/ 4843 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4844 { 4845 int status = QLA_ERROR; 4846 uint8_t reset_chip = 0; 4847 uint32_t dev_state; 4848 unsigned long wait; 4849 4850 /* Stall incoming I/O until we are done */ 4851 scsi_block_requests(ha->host); 4852 clear_bit(AF_ONLINE, &ha->flags); 4853 clear_bit(AF_LINK_UP, &ha->flags); 4854 4855 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4856 4857 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4858 4859 if ((is_qla8032(ha) || is_qla8042(ha)) && 4860 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4861 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4862 __func__); 4863 /* disable pause frame for ISP83xx */ 4864 qla4_83xx_disable_pause(ha); 4865 } 4866 4867 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4868 4869 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4870 reset_chip = 1; 4871 4872 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4873 * do not reset adapter, jump to initialize_adapter */ 4874 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4875 status = QLA_SUCCESS; 4876 goto recover_ha_init_adapter; 4877 } 4878 4879 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4880 * from eh_host_reset or ioctl module */ 4881 if (is_qla80XX(ha) && !reset_chip && 4882 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4883 4884 DEBUG2(ql4_printk(KERN_INFO, ha, 4885 "scsi%ld: %s - Performing stop_firmware...\n", 4886 ha->host_no, __func__)); 4887 status = ha->isp_ops->reset_firmware(ha); 4888 if (status == QLA_SUCCESS) { 4889 ha->isp_ops->disable_intrs(ha); 4890 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4891 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4892 } else { 4893 /* If the stop_firmware fails then 4894 * reset the entire chip */ 4895 reset_chip = 1; 4896 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4897 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4898 } 4899 } 4900 4901 /* Issue full chip reset if recovering from a catastrophic error, 4902 * or if stop_firmware fails for ISP-8xxx. 4903 * This is the default case for ISP-4xxx */ 4904 if (is_qla40XX(ha) || reset_chip) { 4905 if (is_qla40XX(ha)) 4906 goto chip_reset; 4907 4908 /* Check if 8XXX firmware is alive or not 4909 * We may have arrived here from NEED_RESET 4910 * detection only */ 4911 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4912 goto chip_reset; 4913 4914 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4915 while (time_before(jiffies, wait)) { 4916 if (qla4_8xxx_check_fw_alive(ha)) { 4917 qla4xxx_mailbox_premature_completion(ha); 4918 break; 4919 } 4920 4921 set_current_state(TASK_UNINTERRUPTIBLE); 4922 schedule_timeout(HZ); 4923 } 4924 chip_reset: 4925 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4926 qla4xxx_cmd_wait(ha); 4927 4928 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4929 DEBUG2(ql4_printk(KERN_INFO, ha, 4930 "scsi%ld: %s - Performing chip reset..\n", 4931 ha->host_no, __func__)); 4932 status = ha->isp_ops->reset_chip(ha); 4933 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4934 } 4935 4936 /* Flush any pending ddb changed AENs */ 4937 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4938 4939 recover_ha_init_adapter: 4940 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4941 if (status == QLA_SUCCESS) { 4942 /* For ISP-4xxx, force function 1 to always initialize 4943 * before function 3 to prevent both funcions from 4944 * stepping on top of the other */ 4945 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4946 ssleep(6); 4947 4948 /* NOTE: AF_ONLINE flag set upon successful completion of 4949 * qla4xxx_initialize_adapter */ 4950 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4951 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4952 status = qla4_8xxx_check_init_adapter_retry(ha); 4953 if (status == QLA_ERROR) { 4954 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4955 ha->host_no, __func__); 4956 qla4xxx_dead_adapter_cleanup(ha); 4957 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4958 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4959 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4960 &ha->dpc_flags); 4961 goto exit_recover; 4962 } 4963 } 4964 } 4965 4966 /* Retry failed adapter initialization, if necessary 4967 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4968 * case to prevent ping-pong resets between functions */ 4969 if (!test_bit(AF_ONLINE, &ha->flags) && 4970 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4971 /* Adapter initialization failed, see if we can retry 4972 * resetting the ha. 4973 * Since we don't want to block the DPC for too long 4974 * with multiple resets in the same thread, 4975 * utilize DPC to retry */ 4976 if (is_qla80XX(ha)) { 4977 ha->isp_ops->idc_lock(ha); 4978 dev_state = qla4_8xxx_rd_direct(ha, 4979 QLA8XXX_CRB_DEV_STATE); 4980 ha->isp_ops->idc_unlock(ha); 4981 if (dev_state == QLA8XXX_DEV_FAILED) { 4982 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4983 "recover adapter. H/W is in Failed " 4984 "state\n", __func__); 4985 qla4xxx_dead_adapter_cleanup(ha); 4986 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4987 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4988 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4989 &ha->dpc_flags); 4990 status = QLA_ERROR; 4991 4992 goto exit_recover; 4993 } 4994 } 4995 4996 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 4997 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 4998 DEBUG2(printk("scsi%ld: recover adapter - retrying " 4999 "(%d) more times\n", ha->host_no, 5000 ha->retry_reset_ha_cnt)); 5001 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5002 status = QLA_ERROR; 5003 } else { 5004 if (ha->retry_reset_ha_cnt > 0) { 5005 /* Schedule another Reset HA--DPC will retry */ 5006 ha->retry_reset_ha_cnt--; 5007 DEBUG2(printk("scsi%ld: recover adapter - " 5008 "retry remaining %d\n", 5009 ha->host_no, 5010 ha->retry_reset_ha_cnt)); 5011 status = QLA_ERROR; 5012 } 5013 5014 if (ha->retry_reset_ha_cnt == 0) { 5015 /* Recover adapter retries have been exhausted. 5016 * Adapter DEAD */ 5017 DEBUG2(printk("scsi%ld: recover adapter " 5018 "failed - board disabled\n", 5019 ha->host_no)); 5020 qla4xxx_dead_adapter_cleanup(ha); 5021 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5022 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5023 clear_bit(DPC_RESET_HA_FW_CONTEXT, 5024 &ha->dpc_flags); 5025 status = QLA_ERROR; 5026 } 5027 } 5028 } else { 5029 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5030 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5031 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5032 } 5033 5034 exit_recover: 5035 ha->adapter_error_count++; 5036 5037 if (test_bit(AF_ONLINE, &ha->flags)) 5038 ha->isp_ops->enable_intrs(ha); 5039 5040 scsi_unblock_requests(ha->host); 5041 5042 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5043 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5044 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5045 5046 return status; 5047 } 5048 5049 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5050 { 5051 struct iscsi_session *sess; 5052 struct ddb_entry *ddb_entry; 5053 struct scsi_qla_host *ha; 5054 5055 sess = cls_session->dd_data; 5056 ddb_entry = sess->dd_data; 5057 ha = ddb_entry->ha; 5058 if (!iscsi_is_session_online(cls_session)) { 5059 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5060 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5061 " unblock session\n", ha->host_no, __func__, 5062 ddb_entry->fw_ddb_index); 5063 iscsi_unblock_session(ddb_entry->sess); 5064 } else { 5065 /* Trigger relogin */ 5066 if (ddb_entry->ddb_type == FLASH_DDB) { 5067 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5068 test_bit(DF_DISABLE_RELOGIN, 5069 &ddb_entry->flags))) 5070 qla4xxx_arm_relogin_timer(ddb_entry); 5071 } else 5072 iscsi_session_failure(cls_session->dd_data, 5073 ISCSI_ERR_CONN_FAILED); 5074 } 5075 } 5076 } 5077 5078 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5079 { 5080 struct iscsi_session *sess; 5081 struct ddb_entry *ddb_entry; 5082 struct scsi_qla_host *ha; 5083 5084 sess = cls_session->dd_data; 5085 ddb_entry = sess->dd_data; 5086 ha = ddb_entry->ha; 5087 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5088 " unblock session\n", ha->host_no, __func__, 5089 ddb_entry->fw_ddb_index); 5090 5091 iscsi_unblock_session(ddb_entry->sess); 5092 5093 /* Start scan target */ 5094 if (test_bit(AF_ONLINE, &ha->flags)) { 5095 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5096 " start scan\n", ha->host_no, __func__, 5097 ddb_entry->fw_ddb_index); 5098 queue_work(ddb_entry->sess->workq, &ddb_entry->sess->scan_work); 5099 } 5100 return QLA_SUCCESS; 5101 } 5102 5103 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5104 { 5105 struct iscsi_session *sess; 5106 struct ddb_entry *ddb_entry; 5107 struct scsi_qla_host *ha; 5108 int status = QLA_SUCCESS; 5109 5110 sess = cls_session->dd_data; 5111 ddb_entry = sess->dd_data; 5112 ha = ddb_entry->ha; 5113 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5114 " unblock user space session\n", ha->host_no, __func__, 5115 ddb_entry->fw_ddb_index); 5116 5117 if (!iscsi_is_session_online(cls_session)) { 5118 iscsi_conn_start(ddb_entry->conn); 5119 iscsi_conn_login_event(ddb_entry->conn, 5120 ISCSI_CONN_STATE_LOGGED_IN); 5121 } else { 5122 ql4_printk(KERN_INFO, ha, 5123 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5124 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5125 cls_session->sid); 5126 status = QLA_ERROR; 5127 } 5128 5129 return status; 5130 } 5131 5132 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5133 { 5134 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5135 } 5136 5137 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5138 { 5139 uint16_t relogin_timer; 5140 struct iscsi_session *sess; 5141 struct ddb_entry *ddb_entry; 5142 struct scsi_qla_host *ha; 5143 5144 sess = cls_sess->dd_data; 5145 ddb_entry = sess->dd_data; 5146 ha = ddb_entry->ha; 5147 5148 relogin_timer = max(ddb_entry->default_relogin_timeout, 5149 (uint16_t)RELOGIN_TOV); 5150 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5151 5152 DEBUG2(ql4_printk(KERN_INFO, ha, 5153 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5154 ddb_entry->fw_ddb_index, relogin_timer)); 5155 5156 qla4xxx_login_flash_ddb(cls_sess); 5157 } 5158 5159 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5160 { 5161 struct iscsi_session *sess; 5162 struct ddb_entry *ddb_entry; 5163 struct scsi_qla_host *ha; 5164 5165 sess = cls_sess->dd_data; 5166 ddb_entry = sess->dd_data; 5167 ha = ddb_entry->ha; 5168 5169 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5170 return; 5171 5172 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5173 return; 5174 5175 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5176 !iscsi_is_session_online(cls_sess)) { 5177 DEBUG2(ql4_printk(KERN_INFO, ha, 5178 "relogin issued\n")); 5179 qla4xxx_relogin_flash_ddb(cls_sess); 5180 } 5181 } 5182 5183 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5184 { 5185 if (ha->dpc_thread) 5186 queue_work(ha->dpc_thread, &ha->dpc_work); 5187 } 5188 5189 static struct qla4_work_evt * 5190 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5191 enum qla4_work_type type) 5192 { 5193 struct qla4_work_evt *e; 5194 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5195 5196 e = kzalloc(size, GFP_ATOMIC); 5197 if (!e) 5198 return NULL; 5199 5200 INIT_LIST_HEAD(&e->list); 5201 e->type = type; 5202 return e; 5203 } 5204 5205 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5206 struct qla4_work_evt *e) 5207 { 5208 unsigned long flags; 5209 5210 spin_lock_irqsave(&ha->work_lock, flags); 5211 list_add_tail(&e->list, &ha->work_list); 5212 spin_unlock_irqrestore(&ha->work_lock, flags); 5213 qla4xxx_wake_dpc(ha); 5214 } 5215 5216 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5217 enum iscsi_host_event_code aen_code, 5218 uint32_t data_size, uint8_t *data) 5219 { 5220 struct qla4_work_evt *e; 5221 5222 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5223 if (!e) 5224 return QLA_ERROR; 5225 5226 e->u.aen.code = aen_code; 5227 e->u.aen.data_size = data_size; 5228 memcpy(e->u.aen.data, data, data_size); 5229 5230 qla4xxx_post_work(ha, e); 5231 5232 return QLA_SUCCESS; 5233 } 5234 5235 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5236 uint32_t status, uint32_t pid, 5237 uint32_t data_size, uint8_t *data) 5238 { 5239 struct qla4_work_evt *e; 5240 5241 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5242 if (!e) 5243 return QLA_ERROR; 5244 5245 e->u.ping.status = status; 5246 e->u.ping.pid = pid; 5247 e->u.ping.data_size = data_size; 5248 memcpy(e->u.ping.data, data, data_size); 5249 5250 qla4xxx_post_work(ha, e); 5251 5252 return QLA_SUCCESS; 5253 } 5254 5255 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5256 { 5257 struct qla4_work_evt *e, *tmp; 5258 unsigned long flags; 5259 LIST_HEAD(work); 5260 5261 spin_lock_irqsave(&ha->work_lock, flags); 5262 list_splice_init(&ha->work_list, &work); 5263 spin_unlock_irqrestore(&ha->work_lock, flags); 5264 5265 list_for_each_entry_safe(e, tmp, &work, list) { 5266 list_del_init(&e->list); 5267 5268 switch (e->type) { 5269 case QLA4_EVENT_AEN: 5270 iscsi_post_host_event(ha->host_no, 5271 &qla4xxx_iscsi_transport, 5272 e->u.aen.code, 5273 e->u.aen.data_size, 5274 e->u.aen.data); 5275 break; 5276 case QLA4_EVENT_PING_STATUS: 5277 iscsi_ping_comp_event(ha->host_no, 5278 &qla4xxx_iscsi_transport, 5279 e->u.ping.status, 5280 e->u.ping.pid, 5281 e->u.ping.data_size, 5282 e->u.ping.data); 5283 break; 5284 default: 5285 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5286 "supported", e->type); 5287 } 5288 kfree(e); 5289 } 5290 } 5291 5292 /** 5293 * qla4xxx_do_dpc - dpc routine 5294 * @work: Context to obtain pointer to host adapter structure. 5295 * 5296 * This routine is a task that is schedule by the interrupt handler 5297 * to perform the background processing for interrupts. We put it 5298 * on a task queue that is consumed whenever the scheduler runs; that's 5299 * so you can do anything (i.e. put the process to sleep etc). In fact, 5300 * the mid-level tries to sleep when it reaches the driver threshold 5301 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5302 **/ 5303 static void qla4xxx_do_dpc(struct work_struct *work) 5304 { 5305 struct scsi_qla_host *ha = 5306 container_of(work, struct scsi_qla_host, dpc_work); 5307 int status = QLA_ERROR; 5308 5309 DEBUG2(ql4_printk(KERN_INFO, ha, 5310 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5311 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5312 5313 /* Initialization not yet finished. Don't do anything yet. */ 5314 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5315 return; 5316 5317 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5318 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5319 ha->host_no, __func__, ha->flags)); 5320 return; 5321 } 5322 5323 /* post events to application */ 5324 qla4xxx_do_work(ha); 5325 5326 if (is_qla80XX(ha)) { 5327 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5328 if (is_qla8032(ha) || is_qla8042(ha)) { 5329 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5330 __func__); 5331 /* disable pause frame for ISP83xx */ 5332 qla4_83xx_disable_pause(ha); 5333 } 5334 5335 ha->isp_ops->idc_lock(ha); 5336 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5337 QLA8XXX_DEV_FAILED); 5338 ha->isp_ops->idc_unlock(ha); 5339 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5340 qla4_8xxx_device_state_handler(ha); 5341 } 5342 5343 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5344 if (is_qla8042(ha)) { 5345 if (ha->idc_info.info2 & 5346 ENABLE_INTERNAL_LOOPBACK) { 5347 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5348 __func__); 5349 status = qla4_84xx_config_acb(ha, 5350 ACB_CONFIG_DISABLE); 5351 if (status != QLA_SUCCESS) { 5352 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5353 __func__); 5354 } 5355 } 5356 } 5357 qla4_83xx_post_idc_ack(ha); 5358 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5359 } 5360 5361 if (is_qla8042(ha) && 5362 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5363 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5364 __func__); 5365 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5366 QLA_SUCCESS) { 5367 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5368 __func__); 5369 } 5370 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5371 } 5372 5373 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5374 qla4_8xxx_need_qsnt_handler(ha); 5375 } 5376 } 5377 5378 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5379 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5380 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5381 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5382 if ((is_qla8022(ha) && ql4xdontresethba) || 5383 ((is_qla8032(ha) || is_qla8042(ha)) && 5384 qla4_83xx_idc_dontreset(ha))) { 5385 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5386 ha->host_no, __func__)); 5387 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5388 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5389 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5390 goto dpc_post_reset_ha; 5391 } 5392 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5393 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5394 qla4xxx_recover_adapter(ha); 5395 5396 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5397 uint8_t wait_time = RESET_INTR_TOV; 5398 5399 while ((readw(&ha->reg->ctrl_status) & 5400 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5401 if (--wait_time == 0) 5402 break; 5403 msleep(1000); 5404 } 5405 if (wait_time == 0) 5406 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5407 "bit not cleared-- resetting\n", 5408 ha->host_no, __func__)); 5409 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5410 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5411 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5412 status = qla4xxx_recover_adapter(ha); 5413 } 5414 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5415 if (status == QLA_SUCCESS) 5416 ha->isp_ops->enable_intrs(ha); 5417 } 5418 } 5419 5420 dpc_post_reset_ha: 5421 /* ---- process AEN? --- */ 5422 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5423 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5424 5425 /* ---- Get DHCP IP Address? --- */ 5426 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5427 qla4xxx_get_dhcp_ip_address(ha); 5428 5429 /* ---- relogin device? --- */ 5430 if (adapter_up(ha) && 5431 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5432 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5433 } 5434 5435 /* ---- link change? --- */ 5436 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5437 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5438 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5439 /* ---- link down? --- */ 5440 qla4xxx_mark_all_devices_missing(ha); 5441 } else { 5442 /* ---- link up? --- * 5443 * F/W will auto login to all devices ONLY ONCE after 5444 * link up during driver initialization and runtime 5445 * fatal error recovery. Therefore, the driver must 5446 * manually relogin to devices when recovering from 5447 * connection failures, logouts, expired KATO, etc. */ 5448 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5449 qla4xxx_build_ddb_list(ha, ha->is_reset); 5450 iscsi_host_for_each_session(ha->host, 5451 qla4xxx_login_flash_ddb); 5452 } else 5453 qla4xxx_relogin_all_devices(ha); 5454 } 5455 } 5456 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5457 if (qla4xxx_sysfs_ddb_export(ha)) 5458 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5459 __func__); 5460 } 5461 } 5462 5463 /** 5464 * qla4xxx_free_adapter - release the adapter 5465 * @ha: pointer to adapter structure 5466 **/ 5467 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5468 { 5469 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5470 5471 /* Turn-off interrupts on the card. */ 5472 ha->isp_ops->disable_intrs(ha); 5473 5474 if (is_qla40XX(ha)) { 5475 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5476 &ha->reg->ctrl_status); 5477 readl(&ha->reg->ctrl_status); 5478 } else if (is_qla8022(ha)) { 5479 writel(0, &ha->qla4_82xx_reg->host_int); 5480 readl(&ha->qla4_82xx_reg->host_int); 5481 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5482 writel(0, &ha->qla4_83xx_reg->risc_intr); 5483 readl(&ha->qla4_83xx_reg->risc_intr); 5484 } 5485 5486 /* Remove timer thread, if present */ 5487 if (ha->timer_active) 5488 qla4xxx_stop_timer(ha); 5489 5490 /* Kill the kernel thread for this host */ 5491 if (ha->dpc_thread) 5492 destroy_workqueue(ha->dpc_thread); 5493 5494 /* Kill the kernel thread for this host */ 5495 if (ha->task_wq) 5496 destroy_workqueue(ha->task_wq); 5497 5498 /* Put firmware in known state */ 5499 ha->isp_ops->reset_firmware(ha); 5500 5501 if (is_qla80XX(ha)) { 5502 ha->isp_ops->idc_lock(ha); 5503 qla4_8xxx_clear_drv_active(ha); 5504 ha->isp_ops->idc_unlock(ha); 5505 } 5506 5507 /* Detach interrupts */ 5508 qla4xxx_free_irqs(ha); 5509 5510 /* free extra memory */ 5511 qla4xxx_mem_free(ha); 5512 } 5513 5514 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5515 { 5516 int status = 0; 5517 unsigned long mem_base, mem_len; 5518 struct pci_dev *pdev = ha->pdev; 5519 5520 status = pci_request_regions(pdev, DRIVER_NAME); 5521 if (status) { 5522 printk(KERN_WARNING 5523 "scsi(%ld) Failed to reserve PIO regions (%s) " 5524 "status=%d\n", ha->host_no, pci_name(pdev), status); 5525 goto iospace_error_exit; 5526 } 5527 5528 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5529 __func__, pdev->revision)); 5530 ha->revision_id = pdev->revision; 5531 5532 /* remap phys address */ 5533 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5534 mem_len = pci_resource_len(pdev, 0); 5535 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5536 __func__, mem_base, mem_len)); 5537 5538 /* mapping of pcibase pointer */ 5539 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5540 if (!ha->nx_pcibase) { 5541 printk(KERN_ERR 5542 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5543 pci_release_regions(ha->pdev); 5544 goto iospace_error_exit; 5545 } 5546 5547 /* Mapping of IO base pointer, door bell read and write pointer */ 5548 5549 /* mapping of IO base pointer */ 5550 if (is_qla8022(ha)) { 5551 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5552 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5553 (ha->pdev->devfn << 11)); 5554 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5555 QLA82XX_CAM_RAM_DB2); 5556 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5557 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5558 ((uint8_t *)ha->nx_pcibase); 5559 } 5560 5561 return 0; 5562 iospace_error_exit: 5563 return -ENOMEM; 5564 } 5565 5566 /*** 5567 * qla4xxx_iospace_config - maps registers 5568 * @ha: pointer to adapter structure 5569 * 5570 * This routines maps HBA's registers from the pci address space 5571 * into the kernel virtual address space for memory mapped i/o. 5572 **/ 5573 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5574 { 5575 unsigned long pio, pio_len, pio_flags; 5576 unsigned long mmio, mmio_len, mmio_flags; 5577 5578 pio = pci_resource_start(ha->pdev, 0); 5579 pio_len = pci_resource_len(ha->pdev, 0); 5580 pio_flags = pci_resource_flags(ha->pdev, 0); 5581 if (pio_flags & IORESOURCE_IO) { 5582 if (pio_len < MIN_IOBASE_LEN) { 5583 ql4_printk(KERN_WARNING, ha, 5584 "Invalid PCI I/O region size\n"); 5585 pio = 0; 5586 } 5587 } else { 5588 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5589 pio = 0; 5590 } 5591 5592 /* Use MMIO operations for all accesses. */ 5593 mmio = pci_resource_start(ha->pdev, 1); 5594 mmio_len = pci_resource_len(ha->pdev, 1); 5595 mmio_flags = pci_resource_flags(ha->pdev, 1); 5596 5597 if (!(mmio_flags & IORESOURCE_MEM)) { 5598 ql4_printk(KERN_ERR, ha, 5599 "region #0 not an MMIO resource, aborting\n"); 5600 5601 goto iospace_error_exit; 5602 } 5603 5604 if (mmio_len < MIN_IOBASE_LEN) { 5605 ql4_printk(KERN_ERR, ha, 5606 "Invalid PCI mem region size, aborting\n"); 5607 goto iospace_error_exit; 5608 } 5609 5610 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5611 ql4_printk(KERN_WARNING, ha, 5612 "Failed to reserve PIO/MMIO regions\n"); 5613 5614 goto iospace_error_exit; 5615 } 5616 5617 ha->pio_address = pio; 5618 ha->pio_length = pio_len; 5619 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5620 if (!ha->reg) { 5621 ql4_printk(KERN_ERR, ha, 5622 "cannot remap MMIO, aborting\n"); 5623 5624 goto iospace_error_exit; 5625 } 5626 5627 return 0; 5628 5629 iospace_error_exit: 5630 return -ENOMEM; 5631 } 5632 5633 static struct isp_operations qla4xxx_isp_ops = { 5634 .iospace_config = qla4xxx_iospace_config, 5635 .pci_config = qla4xxx_pci_config, 5636 .disable_intrs = qla4xxx_disable_intrs, 5637 .enable_intrs = qla4xxx_enable_intrs, 5638 .start_firmware = qla4xxx_start_firmware, 5639 .intr_handler = qla4xxx_intr_handler, 5640 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5641 .reset_chip = qla4xxx_soft_reset, 5642 .reset_firmware = qla4xxx_hw_reset, 5643 .queue_iocb = qla4xxx_queue_iocb, 5644 .complete_iocb = qla4xxx_complete_iocb, 5645 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5646 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5647 .get_sys_info = qla4xxx_get_sys_info, 5648 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5649 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5650 }; 5651 5652 static struct isp_operations qla4_82xx_isp_ops = { 5653 .iospace_config = qla4_8xxx_iospace_config, 5654 .pci_config = qla4_8xxx_pci_config, 5655 .disable_intrs = qla4_82xx_disable_intrs, 5656 .enable_intrs = qla4_82xx_enable_intrs, 5657 .start_firmware = qla4_8xxx_load_risc, 5658 .restart_firmware = qla4_82xx_try_start_fw, 5659 .intr_handler = qla4_82xx_intr_handler, 5660 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5661 .need_reset = qla4_8xxx_need_reset, 5662 .reset_chip = qla4_82xx_isp_reset, 5663 .reset_firmware = qla4_8xxx_stop_firmware, 5664 .queue_iocb = qla4_82xx_queue_iocb, 5665 .complete_iocb = qla4_82xx_complete_iocb, 5666 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5667 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5668 .get_sys_info = qla4_8xxx_get_sys_info, 5669 .rd_reg_direct = qla4_82xx_rd_32, 5670 .wr_reg_direct = qla4_82xx_wr_32, 5671 .rd_reg_indirect = qla4_82xx_md_rd_32, 5672 .wr_reg_indirect = qla4_82xx_md_wr_32, 5673 .idc_lock = qla4_82xx_idc_lock, 5674 .idc_unlock = qla4_82xx_idc_unlock, 5675 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5676 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5677 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5678 }; 5679 5680 static struct isp_operations qla4_83xx_isp_ops = { 5681 .iospace_config = qla4_8xxx_iospace_config, 5682 .pci_config = qla4_8xxx_pci_config, 5683 .disable_intrs = qla4_83xx_disable_intrs, 5684 .enable_intrs = qla4_83xx_enable_intrs, 5685 .start_firmware = qla4_8xxx_load_risc, 5686 .restart_firmware = qla4_83xx_start_firmware, 5687 .intr_handler = qla4_83xx_intr_handler, 5688 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5689 .need_reset = qla4_8xxx_need_reset, 5690 .reset_chip = qla4_83xx_isp_reset, 5691 .reset_firmware = qla4_8xxx_stop_firmware, 5692 .queue_iocb = qla4_83xx_queue_iocb, 5693 .complete_iocb = qla4_83xx_complete_iocb, 5694 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5695 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5696 .get_sys_info = qla4_8xxx_get_sys_info, 5697 .rd_reg_direct = qla4_83xx_rd_reg, 5698 .wr_reg_direct = qla4_83xx_wr_reg, 5699 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5700 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5701 .idc_lock = qla4_83xx_drv_lock, 5702 .idc_unlock = qla4_83xx_drv_unlock, 5703 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5704 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5705 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5706 }; 5707 5708 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5709 { 5710 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5711 } 5712 5713 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5714 { 5715 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5716 } 5717 5718 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5719 { 5720 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5721 } 5722 5723 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5724 { 5725 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5726 } 5727 5728 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5729 { 5730 struct scsi_qla_host *ha = data; 5731 char *str = buf; 5732 int rc; 5733 5734 switch (type) { 5735 case ISCSI_BOOT_ETH_FLAGS: 5736 rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); 5737 break; 5738 case ISCSI_BOOT_ETH_INDEX: 5739 rc = sprintf(str, "0\n"); 5740 break; 5741 case ISCSI_BOOT_ETH_MAC: 5742 rc = sysfs_format_mac(str, ha->my_mac, 5743 MAC_ADDR_LEN); 5744 break; 5745 default: 5746 rc = -ENOSYS; 5747 break; 5748 } 5749 return rc; 5750 } 5751 5752 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5753 { 5754 int rc; 5755 5756 switch (type) { 5757 case ISCSI_BOOT_ETH_FLAGS: 5758 case ISCSI_BOOT_ETH_MAC: 5759 case ISCSI_BOOT_ETH_INDEX: 5760 rc = S_IRUGO; 5761 break; 5762 default: 5763 rc = 0; 5764 break; 5765 } 5766 return rc; 5767 } 5768 5769 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5770 { 5771 struct scsi_qla_host *ha = data; 5772 char *str = buf; 5773 int rc; 5774 5775 switch (type) { 5776 case ISCSI_BOOT_INI_INITIATOR_NAME: 5777 rc = sprintf(str, "%s\n", ha->name_string); 5778 break; 5779 default: 5780 rc = -ENOSYS; 5781 break; 5782 } 5783 return rc; 5784 } 5785 5786 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5787 { 5788 int rc; 5789 5790 switch (type) { 5791 case ISCSI_BOOT_INI_INITIATOR_NAME: 5792 rc = S_IRUGO; 5793 break; 5794 default: 5795 rc = 0; 5796 break; 5797 } 5798 return rc; 5799 } 5800 5801 static ssize_t 5802 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5803 char *buf) 5804 { 5805 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5806 char *str = buf; 5807 int rc; 5808 5809 switch (type) { 5810 case ISCSI_BOOT_TGT_NAME: 5811 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5812 break; 5813 case ISCSI_BOOT_TGT_IP_ADDR: 5814 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5815 rc = sprintf(buf, "%pI4\n", 5816 &boot_conn->dest_ipaddr.ip_address); 5817 else 5818 rc = sprintf(str, "%pI6\n", 5819 &boot_conn->dest_ipaddr.ip_address); 5820 break; 5821 case ISCSI_BOOT_TGT_PORT: 5822 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5823 break; 5824 case ISCSI_BOOT_TGT_CHAP_NAME: 5825 rc = sprintf(str, "%.*s\n", 5826 boot_conn->chap.target_chap_name_length, 5827 (char *)&boot_conn->chap.target_chap_name); 5828 break; 5829 case ISCSI_BOOT_TGT_CHAP_SECRET: 5830 rc = sprintf(str, "%.*s\n", 5831 boot_conn->chap.target_secret_length, 5832 (char *)&boot_conn->chap.target_secret); 5833 break; 5834 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5835 rc = sprintf(str, "%.*s\n", 5836 boot_conn->chap.intr_chap_name_length, 5837 (char *)&boot_conn->chap.intr_chap_name); 5838 break; 5839 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5840 rc = sprintf(str, "%.*s\n", 5841 boot_conn->chap.intr_secret_length, 5842 (char *)&boot_conn->chap.intr_secret); 5843 break; 5844 case ISCSI_BOOT_TGT_FLAGS: 5845 rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); 5846 break; 5847 case ISCSI_BOOT_TGT_NIC_ASSOC: 5848 rc = sprintf(str, "0\n"); 5849 break; 5850 default: 5851 rc = -ENOSYS; 5852 break; 5853 } 5854 return rc; 5855 } 5856 5857 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5858 { 5859 struct scsi_qla_host *ha = data; 5860 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5861 5862 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5863 } 5864 5865 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5866 { 5867 struct scsi_qla_host *ha = data; 5868 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5869 5870 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5871 } 5872 5873 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5874 { 5875 int rc; 5876 5877 switch (type) { 5878 case ISCSI_BOOT_TGT_NAME: 5879 case ISCSI_BOOT_TGT_IP_ADDR: 5880 case ISCSI_BOOT_TGT_PORT: 5881 case ISCSI_BOOT_TGT_CHAP_NAME: 5882 case ISCSI_BOOT_TGT_CHAP_SECRET: 5883 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5884 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5885 case ISCSI_BOOT_TGT_NIC_ASSOC: 5886 case ISCSI_BOOT_TGT_FLAGS: 5887 rc = S_IRUGO; 5888 break; 5889 default: 5890 rc = 0; 5891 break; 5892 } 5893 return rc; 5894 } 5895 5896 static void qla4xxx_boot_release(void *data) 5897 { 5898 struct scsi_qla_host *ha = data; 5899 5900 scsi_host_put(ha->host); 5901 } 5902 5903 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5904 { 5905 dma_addr_t buf_dma; 5906 uint32_t addr, pri_addr, sec_addr; 5907 uint32_t offset; 5908 uint16_t func_num; 5909 uint8_t val; 5910 uint8_t *buf = NULL; 5911 size_t size = 13 * sizeof(uint8_t); 5912 int ret = QLA_SUCCESS; 5913 5914 func_num = PCI_FUNC(ha->pdev->devfn); 5915 5916 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5917 __func__, ha->pdev->device, func_num); 5918 5919 if (is_qla40XX(ha)) { 5920 if (func_num == 1) { 5921 addr = NVRAM_PORT0_BOOT_MODE; 5922 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5923 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5924 } else if (func_num == 3) { 5925 addr = NVRAM_PORT1_BOOT_MODE; 5926 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5927 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5928 } else { 5929 ret = QLA_ERROR; 5930 goto exit_boot_info; 5931 } 5932 5933 /* Check Boot Mode */ 5934 val = rd_nvram_byte(ha, addr); 5935 if (!(val & 0x07)) { 5936 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5937 "options : 0x%x\n", __func__, val)); 5938 ret = QLA_ERROR; 5939 goto exit_boot_info; 5940 } 5941 5942 /* get primary valid target index */ 5943 val = rd_nvram_byte(ha, pri_addr); 5944 if (val & BIT_7) 5945 ddb_index[0] = (val & 0x7f); 5946 5947 /* get secondary valid target index */ 5948 val = rd_nvram_byte(ha, sec_addr); 5949 if (val & BIT_7) 5950 ddb_index[1] = (val & 0x7f); 5951 goto exit_boot_info; 5952 } else if (is_qla80XX(ha)) { 5953 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5954 &buf_dma, GFP_KERNEL); 5955 if (!buf) { 5956 DEBUG2(ql4_printk(KERN_ERR, ha, 5957 "%s: Unable to allocate dma buffer\n", 5958 __func__)); 5959 ret = QLA_ERROR; 5960 goto exit_boot_info; 5961 } 5962 5963 if (ha->port_num == 0) 5964 offset = BOOT_PARAM_OFFSET_PORT0; 5965 else if (ha->port_num == 1) 5966 offset = BOOT_PARAM_OFFSET_PORT1; 5967 else { 5968 ret = QLA_ERROR; 5969 goto exit_boot_info_free; 5970 } 5971 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5972 offset; 5973 if (qla4xxx_get_flash(ha, buf_dma, addr, 5974 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5975 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5976 " failed\n", ha->host_no, __func__)); 5977 ret = QLA_ERROR; 5978 goto exit_boot_info_free; 5979 } 5980 /* Check Boot Mode */ 5981 if (!(buf[1] & 0x07)) { 5982 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5983 " : 0x%x\n", buf[1])); 5984 ret = QLA_ERROR; 5985 goto exit_boot_info_free; 5986 } 5987 5988 /* get primary valid target index */ 5989 if (buf[2] & BIT_7) 5990 ddb_index[0] = buf[2] & 0x7f; 5991 5992 /* get secondary valid target index */ 5993 if (buf[11] & BIT_7) 5994 ddb_index[1] = buf[11] & 0x7f; 5995 } else { 5996 ret = QLA_ERROR; 5997 goto exit_boot_info; 5998 } 5999 6000 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 6001 " target ID %d\n", __func__, ddb_index[0], 6002 ddb_index[1])); 6003 6004 exit_boot_info_free: 6005 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 6006 exit_boot_info: 6007 ha->pri_ddb_idx = ddb_index[0]; 6008 ha->sec_ddb_idx = ddb_index[1]; 6009 return ret; 6010 } 6011 6012 /** 6013 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 6014 * @ha: pointer to adapter structure 6015 * @username: CHAP username to be returned 6016 * @password: CHAP password to be returned 6017 * 6018 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 6019 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 6020 * So from the CHAP cache find the first BIDI CHAP entry and set it 6021 * to the boot record in sysfs. 6022 **/ 6023 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 6024 char *password) 6025 { 6026 int i, ret = -EINVAL; 6027 int max_chap_entries = 0; 6028 struct ql4_chap_table *chap_table; 6029 6030 if (is_qla80XX(ha)) 6031 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6032 sizeof(struct ql4_chap_table); 6033 else 6034 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6035 6036 if (!ha->chap_list) { 6037 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6038 return ret; 6039 } 6040 6041 mutex_lock(&ha->chap_sem); 6042 for (i = 0; i < max_chap_entries; i++) { 6043 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6044 if (chap_table->cookie != 6045 cpu_to_le16(CHAP_VALID_COOKIE)) { 6046 continue; 6047 } 6048 6049 if (chap_table->flags & BIT_7) /* local */ 6050 continue; 6051 6052 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6053 continue; 6054 6055 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6056 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6057 ret = 0; 6058 break; 6059 } 6060 mutex_unlock(&ha->chap_sem); 6061 6062 return ret; 6063 } 6064 6065 6066 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6067 struct ql4_boot_session_info *boot_sess, 6068 uint16_t ddb_index) 6069 { 6070 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6071 struct dev_db_entry *fw_ddb_entry; 6072 dma_addr_t fw_ddb_entry_dma; 6073 uint16_t idx; 6074 uint16_t options; 6075 int ret = QLA_SUCCESS; 6076 6077 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6078 &fw_ddb_entry_dma, GFP_KERNEL); 6079 if (!fw_ddb_entry) { 6080 DEBUG2(ql4_printk(KERN_ERR, ha, 6081 "%s: Unable to allocate dma buffer.\n", 6082 __func__)); 6083 ret = QLA_ERROR; 6084 return ret; 6085 } 6086 6087 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6088 fw_ddb_entry_dma, ddb_index)) { 6089 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6090 "index [%d]\n", __func__, ddb_index)); 6091 ret = QLA_ERROR; 6092 goto exit_boot_target; 6093 } 6094 6095 /* Update target name and IP from DDB */ 6096 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6097 min(sizeof(boot_sess->target_name), 6098 sizeof(fw_ddb_entry->iscsi_name))); 6099 6100 options = le16_to_cpu(fw_ddb_entry->options); 6101 if (options & DDB_OPT_IPV6_DEVICE) { 6102 memcpy(&boot_conn->dest_ipaddr.ip_address, 6103 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6104 } else { 6105 boot_conn->dest_ipaddr.ip_type = 0x1; 6106 memcpy(&boot_conn->dest_ipaddr.ip_address, 6107 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6108 } 6109 6110 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6111 6112 /* update chap information */ 6113 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6114 6115 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6116 6117 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6118 6119 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6120 target_chap_name, 6121 (char *)&boot_conn->chap.target_secret, 6122 idx); 6123 if (ret) { 6124 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6125 ret = QLA_ERROR; 6126 goto exit_boot_target; 6127 } 6128 6129 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6130 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6131 } 6132 6133 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6134 6135 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6136 6137 ret = qla4xxx_get_bidi_chap(ha, 6138 (char *)&boot_conn->chap.intr_chap_name, 6139 (char *)&boot_conn->chap.intr_secret); 6140 6141 if (ret) { 6142 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6143 ret = QLA_ERROR; 6144 goto exit_boot_target; 6145 } 6146 6147 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6148 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6149 } 6150 6151 exit_boot_target: 6152 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6153 fw_ddb_entry, fw_ddb_entry_dma); 6154 return ret; 6155 } 6156 6157 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6158 { 6159 uint16_t ddb_index[2]; 6160 int ret = QLA_ERROR; 6161 int rval; 6162 6163 memset(ddb_index, 0, sizeof(ddb_index)); 6164 ddb_index[0] = 0xffff; 6165 ddb_index[1] = 0xffff; 6166 ret = get_fw_boot_info(ha, ddb_index); 6167 if (ret != QLA_SUCCESS) { 6168 DEBUG2(ql4_printk(KERN_INFO, ha, 6169 "%s: No boot target configured.\n", __func__)); 6170 return ret; 6171 } 6172 6173 if (ql4xdisablesysfsboot) 6174 return QLA_SUCCESS; 6175 6176 if (ddb_index[0] == 0xffff) 6177 goto sec_target; 6178 6179 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6180 ddb_index[0]); 6181 if (rval != QLA_SUCCESS) { 6182 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6183 "configured\n", __func__)); 6184 } else 6185 ret = QLA_SUCCESS; 6186 6187 sec_target: 6188 if (ddb_index[1] == 0xffff) 6189 goto exit_get_boot_info; 6190 6191 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6192 ddb_index[1]); 6193 if (rval != QLA_SUCCESS) { 6194 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6195 " configured\n", __func__)); 6196 } else 6197 ret = QLA_SUCCESS; 6198 6199 exit_get_boot_info: 6200 return ret; 6201 } 6202 6203 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6204 { 6205 struct iscsi_boot_kobj *boot_kobj; 6206 6207 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6208 return QLA_ERROR; 6209 6210 if (ql4xdisablesysfsboot) { 6211 ql4_printk(KERN_INFO, ha, 6212 "%s: syfsboot disabled - driver will trigger login " 6213 "and publish session for discovery .\n", __func__); 6214 return QLA_SUCCESS; 6215 } 6216 6217 6218 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6219 if (!ha->boot_kset) 6220 goto kset_free; 6221 6222 if (!scsi_host_get(ha->host)) 6223 goto kset_free; 6224 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6225 qla4xxx_show_boot_tgt_pri_info, 6226 qla4xxx_tgt_get_attr_visibility, 6227 qla4xxx_boot_release); 6228 if (!boot_kobj) 6229 goto put_host; 6230 6231 if (!scsi_host_get(ha->host)) 6232 goto kset_free; 6233 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6234 qla4xxx_show_boot_tgt_sec_info, 6235 qla4xxx_tgt_get_attr_visibility, 6236 qla4xxx_boot_release); 6237 if (!boot_kobj) 6238 goto put_host; 6239 6240 if (!scsi_host_get(ha->host)) 6241 goto kset_free; 6242 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6243 qla4xxx_show_boot_ini_info, 6244 qla4xxx_ini_get_attr_visibility, 6245 qla4xxx_boot_release); 6246 if (!boot_kobj) 6247 goto put_host; 6248 6249 if (!scsi_host_get(ha->host)) 6250 goto kset_free; 6251 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6252 qla4xxx_show_boot_eth_info, 6253 qla4xxx_eth_get_attr_visibility, 6254 qla4xxx_boot_release); 6255 if (!boot_kobj) 6256 goto put_host; 6257 6258 return QLA_SUCCESS; 6259 6260 put_host: 6261 scsi_host_put(ha->host); 6262 kset_free: 6263 iscsi_boot_destroy_kset(ha->boot_kset); 6264 return -ENOMEM; 6265 } 6266 6267 6268 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6269 struct ql4_tuple_ddb *tddb) 6270 { 6271 struct iscsi_cls_session *cls_sess; 6272 struct iscsi_cls_conn *cls_conn; 6273 struct iscsi_session *sess; 6274 struct iscsi_conn *conn; 6275 6276 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6277 cls_sess = ddb_entry->sess; 6278 sess = cls_sess->dd_data; 6279 cls_conn = ddb_entry->conn; 6280 conn = cls_conn->dd_data; 6281 6282 tddb->tpgt = sess->tpgt; 6283 tddb->port = conn->persistent_port; 6284 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6285 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6286 } 6287 6288 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6289 struct ql4_tuple_ddb *tddb, 6290 uint8_t *flash_isid) 6291 { 6292 uint16_t options = 0; 6293 6294 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6295 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6296 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6297 6298 options = le16_to_cpu(fw_ddb_entry->options); 6299 if (options & DDB_OPT_IPV6_DEVICE) 6300 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6301 else 6302 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6303 6304 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6305 6306 if (flash_isid == NULL) 6307 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6308 sizeof(tddb->isid)); 6309 else 6310 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6311 } 6312 6313 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6314 struct ql4_tuple_ddb *old_tddb, 6315 struct ql4_tuple_ddb *new_tddb, 6316 uint8_t is_isid_compare) 6317 { 6318 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6319 return QLA_ERROR; 6320 6321 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6322 return QLA_ERROR; 6323 6324 if (old_tddb->port != new_tddb->port) 6325 return QLA_ERROR; 6326 6327 /* For multi sessions, driver generates the ISID, so do not compare 6328 * ISID in reset path since it would be a comparison between the 6329 * driver generated ISID and firmware generated ISID. This could 6330 * lead to adding duplicated DDBs in the list as driver generated 6331 * ISID would not match firmware generated ISID. 6332 */ 6333 if (is_isid_compare) { 6334 DEBUG2(ql4_printk(KERN_INFO, ha, 6335 "%s: old ISID [%pmR] New ISID [%pmR]\n", 6336 __func__, old_tddb->isid, new_tddb->isid)); 6337 6338 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6339 sizeof(old_tddb->isid))) 6340 return QLA_ERROR; 6341 } 6342 6343 DEBUG2(ql4_printk(KERN_INFO, ha, 6344 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6345 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6346 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6347 new_tddb->ip_addr, new_tddb->iscsi_name)); 6348 6349 return QLA_SUCCESS; 6350 } 6351 6352 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6353 struct dev_db_entry *fw_ddb_entry, 6354 uint32_t *index) 6355 { 6356 struct ddb_entry *ddb_entry; 6357 struct ql4_tuple_ddb *fw_tddb = NULL; 6358 struct ql4_tuple_ddb *tmp_tddb = NULL; 6359 int idx; 6360 int ret = QLA_ERROR; 6361 6362 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6363 if (!fw_tddb) { 6364 DEBUG2(ql4_printk(KERN_WARNING, ha, 6365 "Memory Allocation failed.\n")); 6366 ret = QLA_SUCCESS; 6367 goto exit_check; 6368 } 6369 6370 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6371 if (!tmp_tddb) { 6372 DEBUG2(ql4_printk(KERN_WARNING, ha, 6373 "Memory Allocation failed.\n")); 6374 ret = QLA_SUCCESS; 6375 goto exit_check; 6376 } 6377 6378 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6379 6380 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6381 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6382 if (ddb_entry == NULL) 6383 continue; 6384 6385 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6386 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6387 ret = QLA_SUCCESS; /* found */ 6388 if (index != NULL) 6389 *index = idx; 6390 goto exit_check; 6391 } 6392 } 6393 6394 exit_check: 6395 vfree(fw_tddb); 6396 vfree(tmp_tddb); 6397 return ret; 6398 } 6399 6400 /** 6401 * qla4xxx_check_existing_isid - check if target with same isid exist 6402 * in target list 6403 * @list_nt: list of target 6404 * @isid: isid to check 6405 * 6406 * This routine return QLA_SUCCESS if target with same isid exist 6407 **/ 6408 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6409 { 6410 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6411 struct dev_db_entry *fw_ddb_entry; 6412 6413 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6414 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6415 6416 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6417 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6418 return QLA_SUCCESS; 6419 } 6420 } 6421 return QLA_ERROR; 6422 } 6423 6424 /** 6425 * qla4xxx_update_isid - compare ddbs and updated isid 6426 * @ha: Pointer to host adapter structure. 6427 * @list_nt: list of nt target 6428 * @fw_ddb_entry: firmware ddb entry 6429 * 6430 * This routine update isid if ddbs have same iqn, same isid and 6431 * different IP addr. 6432 * Return QLA_SUCCESS if isid is updated. 6433 **/ 6434 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6435 struct list_head *list_nt, 6436 struct dev_db_entry *fw_ddb_entry) 6437 { 6438 uint8_t base_value, i; 6439 6440 base_value = fw_ddb_entry->isid[1] & 0x1f; 6441 for (i = 0; i < 8; i++) { 6442 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6443 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6444 break; 6445 } 6446 6447 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6448 return QLA_ERROR; 6449 6450 return QLA_SUCCESS; 6451 } 6452 6453 /** 6454 * qla4xxx_should_update_isid - check if isid need to update 6455 * @ha: Pointer to host adapter structure. 6456 * @old_tddb: ddb tuple 6457 * @new_tddb: ddb tuple 6458 * 6459 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6460 * same isid 6461 **/ 6462 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6463 struct ql4_tuple_ddb *old_tddb, 6464 struct ql4_tuple_ddb *new_tddb) 6465 { 6466 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6467 /* Same ip */ 6468 if (old_tddb->port == new_tddb->port) 6469 return QLA_ERROR; 6470 } 6471 6472 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6473 /* different iqn */ 6474 return QLA_ERROR; 6475 6476 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6477 sizeof(old_tddb->isid))) 6478 /* different isid */ 6479 return QLA_ERROR; 6480 6481 return QLA_SUCCESS; 6482 } 6483 6484 /** 6485 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6486 * @ha: Pointer to host adapter structure. 6487 * @list_nt: list of nt target. 6488 * @fw_ddb_entry: firmware ddb entry. 6489 * 6490 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6491 * duplicate ddb in list_nt. 6492 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6493 * Note: This function also update isid of DDB if required. 6494 **/ 6495 6496 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6497 struct list_head *list_nt, 6498 struct dev_db_entry *fw_ddb_entry) 6499 { 6500 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6501 struct ql4_tuple_ddb *fw_tddb = NULL; 6502 struct ql4_tuple_ddb *tmp_tddb = NULL; 6503 int rval, ret = QLA_ERROR; 6504 6505 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6506 if (!fw_tddb) { 6507 DEBUG2(ql4_printk(KERN_WARNING, ha, 6508 "Memory Allocation failed.\n")); 6509 ret = QLA_SUCCESS; 6510 goto exit_check; 6511 } 6512 6513 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6514 if (!tmp_tddb) { 6515 DEBUG2(ql4_printk(KERN_WARNING, ha, 6516 "Memory Allocation failed.\n")); 6517 ret = QLA_SUCCESS; 6518 goto exit_check; 6519 } 6520 6521 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6522 6523 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6524 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6525 nt_ddb_idx->flash_isid); 6526 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6527 /* found duplicate ddb */ 6528 if (ret == QLA_SUCCESS) 6529 goto exit_check; 6530 } 6531 6532 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6533 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6534 6535 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6536 if (ret == QLA_SUCCESS) { 6537 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6538 if (rval == QLA_SUCCESS) 6539 ret = QLA_ERROR; 6540 else 6541 ret = QLA_SUCCESS; 6542 6543 goto exit_check; 6544 } 6545 } 6546 6547 exit_check: 6548 vfree(fw_tddb); 6549 vfree(tmp_tddb); 6550 return ret; 6551 } 6552 6553 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6554 { 6555 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6556 6557 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6558 list_del_init(&ddb_idx->list); 6559 vfree(ddb_idx); 6560 } 6561 } 6562 6563 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6564 struct dev_db_entry *fw_ddb_entry) 6565 { 6566 struct iscsi_endpoint *ep; 6567 struct sockaddr_in *addr; 6568 struct sockaddr_in6 *addr6; 6569 struct sockaddr *t_addr; 6570 struct sockaddr_storage *dst_addr; 6571 char *ip; 6572 6573 /* TODO: need to destroy on unload iscsi_endpoint*/ 6574 dst_addr = vmalloc(sizeof(*dst_addr)); 6575 if (!dst_addr) 6576 return NULL; 6577 6578 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6579 t_addr = (struct sockaddr *)dst_addr; 6580 t_addr->sa_family = AF_INET6; 6581 addr6 = (struct sockaddr_in6 *)dst_addr; 6582 ip = (char *)&addr6->sin6_addr; 6583 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6584 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6585 6586 } else { 6587 t_addr = (struct sockaddr *)dst_addr; 6588 t_addr->sa_family = AF_INET; 6589 addr = (struct sockaddr_in *)dst_addr; 6590 ip = (char *)&addr->sin_addr; 6591 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6592 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6593 } 6594 6595 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6596 vfree(dst_addr); 6597 return ep; 6598 } 6599 6600 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6601 { 6602 if (ql4xdisablesysfsboot) 6603 return QLA_SUCCESS; 6604 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6605 return QLA_ERROR; 6606 return QLA_SUCCESS; 6607 } 6608 6609 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6610 struct ddb_entry *ddb_entry, 6611 uint16_t idx) 6612 { 6613 uint16_t def_timeout; 6614 6615 ddb_entry->ddb_type = FLASH_DDB; 6616 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6617 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6618 ddb_entry->ha = ha; 6619 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6620 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6621 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6622 6623 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6624 atomic_set(&ddb_entry->relogin_timer, 0); 6625 atomic_set(&ddb_entry->relogin_retry_count, 0); 6626 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6627 ddb_entry->default_relogin_timeout = 6628 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6629 def_timeout : LOGIN_TOV; 6630 ddb_entry->default_time2wait = 6631 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6632 6633 if (ql4xdisablesysfsboot && 6634 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6635 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6636 } 6637 6638 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6639 { 6640 uint32_t idx = 0; 6641 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6642 uint32_t sts[MBOX_REG_COUNT]; 6643 uint32_t ip_state; 6644 unsigned long wtime; 6645 int ret; 6646 6647 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6648 do { 6649 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6650 if (ip_idx[idx] == -1) 6651 continue; 6652 6653 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6654 6655 if (ret == QLA_ERROR) { 6656 ip_idx[idx] = -1; 6657 continue; 6658 } 6659 6660 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6661 6662 DEBUG2(ql4_printk(KERN_INFO, ha, 6663 "Waiting for IP state for idx = %d, state = 0x%x\n", 6664 ip_idx[idx], ip_state)); 6665 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6666 ip_state == IP_ADDRSTATE_INVALID || 6667 ip_state == IP_ADDRSTATE_PREFERRED || 6668 ip_state == IP_ADDRSTATE_DEPRICATED || 6669 ip_state == IP_ADDRSTATE_DISABLING) 6670 ip_idx[idx] = -1; 6671 } 6672 6673 /* Break if all IP states checked */ 6674 if ((ip_idx[0] == -1) && 6675 (ip_idx[1] == -1) && 6676 (ip_idx[2] == -1) && 6677 (ip_idx[3] == -1)) 6678 break; 6679 schedule_timeout_uninterruptible(HZ); 6680 } while (time_after(wtime, jiffies)); 6681 } 6682 6683 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6684 struct dev_db_entry *flash_ddb_entry) 6685 { 6686 uint16_t options = 0; 6687 size_t ip_len = IP_ADDR_LEN; 6688 6689 options = le16_to_cpu(fw_ddb_entry->options); 6690 if (options & DDB_OPT_IPV6_DEVICE) 6691 ip_len = IPv6_ADDR_LEN; 6692 6693 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6694 return QLA_ERROR; 6695 6696 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6697 sizeof(fw_ddb_entry->isid))) 6698 return QLA_ERROR; 6699 6700 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6701 sizeof(fw_ddb_entry->port))) 6702 return QLA_ERROR; 6703 6704 return QLA_SUCCESS; 6705 } 6706 6707 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6708 struct dev_db_entry *fw_ddb_entry, 6709 uint32_t fw_idx, uint32_t *flash_index) 6710 { 6711 struct dev_db_entry *flash_ddb_entry; 6712 dma_addr_t flash_ddb_entry_dma; 6713 uint32_t idx = 0; 6714 int max_ddbs; 6715 int ret = QLA_ERROR, status; 6716 6717 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6718 MAX_DEV_DB_ENTRIES; 6719 6720 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6721 &flash_ddb_entry_dma); 6722 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6723 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6724 goto exit_find_st_idx; 6725 } 6726 6727 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6728 flash_ddb_entry_dma, fw_idx); 6729 if (status == QLA_SUCCESS) { 6730 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6731 if (status == QLA_SUCCESS) { 6732 *flash_index = fw_idx; 6733 ret = QLA_SUCCESS; 6734 goto exit_find_st_idx; 6735 } 6736 } 6737 6738 for (idx = 0; idx < max_ddbs; idx++) { 6739 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6740 flash_ddb_entry_dma, idx); 6741 if (status == QLA_ERROR) 6742 continue; 6743 6744 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6745 if (status == QLA_SUCCESS) { 6746 *flash_index = idx; 6747 ret = QLA_SUCCESS; 6748 goto exit_find_st_idx; 6749 } 6750 } 6751 6752 if (idx == max_ddbs) 6753 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6754 fw_idx); 6755 6756 exit_find_st_idx: 6757 if (flash_ddb_entry) 6758 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6759 flash_ddb_entry_dma); 6760 6761 return ret; 6762 } 6763 6764 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6765 struct list_head *list_st) 6766 { 6767 struct qla_ddb_index *st_ddb_idx; 6768 int max_ddbs; 6769 int fw_idx_size; 6770 struct dev_db_entry *fw_ddb_entry; 6771 dma_addr_t fw_ddb_dma; 6772 int ret; 6773 uint32_t idx = 0, next_idx = 0; 6774 uint32_t state = 0, conn_err = 0; 6775 uint32_t flash_index = -1; 6776 uint16_t conn_id = 0; 6777 6778 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6779 &fw_ddb_dma); 6780 if (fw_ddb_entry == NULL) { 6781 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6782 goto exit_st_list; 6783 } 6784 6785 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6786 MAX_DEV_DB_ENTRIES; 6787 fw_idx_size = sizeof(struct qla_ddb_index); 6788 6789 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6790 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6791 NULL, &next_idx, &state, 6792 &conn_err, NULL, &conn_id); 6793 if (ret == QLA_ERROR) 6794 break; 6795 6796 /* Ignore DDB if invalid state (unassigned) */ 6797 if (state == DDB_DS_UNASSIGNED) 6798 goto continue_next_st; 6799 6800 /* Check if ST, add to the list_st */ 6801 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6802 goto continue_next_st; 6803 6804 st_ddb_idx = vzalloc(fw_idx_size); 6805 if (!st_ddb_idx) 6806 break; 6807 6808 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6809 &flash_index); 6810 if (ret == QLA_ERROR) { 6811 ql4_printk(KERN_ERR, ha, 6812 "No flash entry for ST at idx [%d]\n", idx); 6813 st_ddb_idx->flash_ddb_idx = idx; 6814 } else { 6815 ql4_printk(KERN_INFO, ha, 6816 "ST at idx [%d] is stored at flash [%d]\n", 6817 idx, flash_index); 6818 st_ddb_idx->flash_ddb_idx = flash_index; 6819 } 6820 6821 st_ddb_idx->fw_ddb_idx = idx; 6822 6823 list_add_tail(&st_ddb_idx->list, list_st); 6824 continue_next_st: 6825 if (next_idx == 0) 6826 break; 6827 } 6828 6829 exit_st_list: 6830 if (fw_ddb_entry) 6831 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6832 } 6833 6834 /** 6835 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6836 * @ha: pointer to adapter structure 6837 * @list_ddb: List from which failed ddb to be removed 6838 * 6839 * Iterate over the list of DDBs and find and remove DDBs that are either in 6840 * no connection active state or failed state 6841 **/ 6842 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6843 struct list_head *list_ddb) 6844 { 6845 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6846 uint32_t next_idx = 0; 6847 uint32_t state = 0, conn_err = 0; 6848 int ret; 6849 6850 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6851 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6852 NULL, 0, NULL, &next_idx, &state, 6853 &conn_err, NULL, NULL); 6854 if (ret == QLA_ERROR) 6855 continue; 6856 6857 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6858 state == DDB_DS_SESSION_FAILED) { 6859 list_del_init(&ddb_idx->list); 6860 vfree(ddb_idx); 6861 } 6862 } 6863 } 6864 6865 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6866 struct ddb_entry *ddb_entry, 6867 struct dev_db_entry *fw_ddb_entry) 6868 { 6869 struct iscsi_cls_session *cls_sess; 6870 struct iscsi_session *sess; 6871 uint32_t max_ddbs = 0; 6872 uint16_t ddb_link = -1; 6873 6874 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6875 MAX_DEV_DB_ENTRIES; 6876 6877 cls_sess = ddb_entry->sess; 6878 sess = cls_sess->dd_data; 6879 6880 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6881 if (ddb_link < max_ddbs) 6882 sess->discovery_parent_idx = ddb_link; 6883 else 6884 sess->discovery_parent_idx = DDB_NO_LINK; 6885 } 6886 6887 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6888 struct dev_db_entry *fw_ddb_entry, 6889 int is_reset, uint16_t idx) 6890 { 6891 struct iscsi_cls_session *cls_sess; 6892 struct iscsi_session *sess; 6893 struct iscsi_cls_conn *cls_conn; 6894 struct iscsi_endpoint *ep; 6895 uint16_t cmds_max = 32; 6896 uint16_t conn_id = 0; 6897 uint32_t initial_cmdsn = 0; 6898 int ret = QLA_SUCCESS; 6899 6900 struct ddb_entry *ddb_entry = NULL; 6901 6902 /* Create session object, with INVALID_ENTRY, 6903 * the targer_id would get set when we issue the login 6904 */ 6905 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6906 cmds_max, sizeof(struct ddb_entry), 6907 sizeof(struct ql4_task_data), 6908 initial_cmdsn, INVALID_ENTRY); 6909 if (!cls_sess) { 6910 ret = QLA_ERROR; 6911 goto exit_setup; 6912 } 6913 6914 /* 6915 * so calling module_put function to decrement the 6916 * reference count. 6917 **/ 6918 module_put(qla4xxx_iscsi_transport.owner); 6919 sess = cls_sess->dd_data; 6920 ddb_entry = sess->dd_data; 6921 ddb_entry->sess = cls_sess; 6922 6923 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6924 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6925 sizeof(struct dev_db_entry)); 6926 6927 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6928 6929 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6930 6931 if (!cls_conn) { 6932 ret = QLA_ERROR; 6933 goto exit_setup; 6934 } 6935 6936 ddb_entry->conn = cls_conn; 6937 6938 /* Setup ep, for displaying attributes in sysfs */ 6939 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6940 if (ep) { 6941 ep->conn = cls_conn; 6942 cls_conn->ep = ep; 6943 } else { 6944 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6945 ret = QLA_ERROR; 6946 goto exit_setup; 6947 } 6948 6949 /* Update sess/conn params */ 6950 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6951 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6952 6953 if (is_reset == RESET_ADAPTER) { 6954 iscsi_block_session(cls_sess); 6955 /* Use the relogin path to discover new devices 6956 * by short-circuiting the logic of setting 6957 * timer to relogin - instead set the flags 6958 * to initiate login right away. 6959 */ 6960 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6961 set_bit(DF_RELOGIN, &ddb_entry->flags); 6962 } 6963 6964 exit_setup: 6965 return ret; 6966 } 6967 6968 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6969 struct list_head *list_ddb, 6970 struct dev_db_entry *fw_ddb_entry) 6971 { 6972 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6973 uint16_t ddb_link; 6974 6975 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6976 6977 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6978 if (ddb_idx->fw_ddb_idx == ddb_link) { 6979 DEBUG2(ql4_printk(KERN_INFO, ha, 6980 "Updating NT parent idx from [%d] to [%d]\n", 6981 ddb_link, ddb_idx->flash_ddb_idx)); 6982 fw_ddb_entry->ddb_link = 6983 cpu_to_le16(ddb_idx->flash_ddb_idx); 6984 return; 6985 } 6986 } 6987 } 6988 6989 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 6990 struct list_head *list_nt, 6991 struct list_head *list_st, 6992 int is_reset) 6993 { 6994 struct dev_db_entry *fw_ddb_entry; 6995 struct ddb_entry *ddb_entry = NULL; 6996 dma_addr_t fw_ddb_dma; 6997 int max_ddbs; 6998 int fw_idx_size; 6999 int ret; 7000 uint32_t idx = 0, next_idx = 0; 7001 uint32_t state = 0, conn_err = 0; 7002 uint32_t ddb_idx = -1; 7003 uint16_t conn_id = 0; 7004 uint16_t ddb_link = -1; 7005 struct qla_ddb_index *nt_ddb_idx; 7006 7007 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7008 &fw_ddb_dma); 7009 if (fw_ddb_entry == NULL) { 7010 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7011 goto exit_nt_list; 7012 } 7013 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7014 MAX_DEV_DB_ENTRIES; 7015 fw_idx_size = sizeof(struct qla_ddb_index); 7016 7017 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7018 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7019 NULL, &next_idx, &state, 7020 &conn_err, NULL, &conn_id); 7021 if (ret == QLA_ERROR) 7022 break; 7023 7024 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7025 goto continue_next_nt; 7026 7027 /* Check if NT, then add to list it */ 7028 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7029 goto continue_next_nt; 7030 7031 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7032 if (ddb_link < max_ddbs) 7033 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7034 7035 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7036 state == DDB_DS_SESSION_FAILED) && 7037 (is_reset == INIT_ADAPTER)) 7038 goto continue_next_nt; 7039 7040 DEBUG2(ql4_printk(KERN_INFO, ha, 7041 "Adding DDB to session = 0x%x\n", idx)); 7042 7043 if (is_reset == INIT_ADAPTER) { 7044 nt_ddb_idx = vmalloc(fw_idx_size); 7045 if (!nt_ddb_idx) 7046 break; 7047 7048 nt_ddb_idx->fw_ddb_idx = idx; 7049 7050 /* Copy original isid as it may get updated in function 7051 * qla4xxx_update_isid(). We need original isid in 7052 * function qla4xxx_compare_tuple_ddb to find duplicate 7053 * target */ 7054 memcpy(&nt_ddb_idx->flash_isid[0], 7055 &fw_ddb_entry->isid[0], 7056 sizeof(nt_ddb_idx->flash_isid)); 7057 7058 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7059 fw_ddb_entry); 7060 if (ret == QLA_SUCCESS) { 7061 /* free nt_ddb_idx and do not add to list_nt */ 7062 vfree(nt_ddb_idx); 7063 goto continue_next_nt; 7064 } 7065 7066 /* Copy updated isid */ 7067 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7068 sizeof(struct dev_db_entry)); 7069 7070 list_add_tail(&nt_ddb_idx->list, list_nt); 7071 } else if (is_reset == RESET_ADAPTER) { 7072 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7073 &ddb_idx); 7074 if (ret == QLA_SUCCESS) { 7075 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7076 ddb_idx); 7077 if (ddb_entry != NULL) 7078 qla4xxx_update_sess_disc_idx(ha, 7079 ddb_entry, 7080 fw_ddb_entry); 7081 goto continue_next_nt; 7082 } 7083 } 7084 7085 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7086 if (ret == QLA_ERROR) 7087 goto exit_nt_list; 7088 7089 continue_next_nt: 7090 if (next_idx == 0) 7091 break; 7092 } 7093 7094 exit_nt_list: 7095 if (fw_ddb_entry) 7096 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7097 } 7098 7099 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7100 struct list_head *list_nt, 7101 uint16_t target_id) 7102 { 7103 struct dev_db_entry *fw_ddb_entry; 7104 dma_addr_t fw_ddb_dma; 7105 int max_ddbs; 7106 int fw_idx_size; 7107 int ret; 7108 uint32_t idx = 0, next_idx = 0; 7109 uint32_t state = 0, conn_err = 0; 7110 uint16_t conn_id = 0; 7111 struct qla_ddb_index *nt_ddb_idx; 7112 7113 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7114 &fw_ddb_dma); 7115 if (fw_ddb_entry == NULL) { 7116 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7117 goto exit_new_nt_list; 7118 } 7119 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7120 MAX_DEV_DB_ENTRIES; 7121 fw_idx_size = sizeof(struct qla_ddb_index); 7122 7123 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7124 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7125 NULL, &next_idx, &state, 7126 &conn_err, NULL, &conn_id); 7127 if (ret == QLA_ERROR) 7128 break; 7129 7130 /* Check if NT, then add it to list */ 7131 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7132 goto continue_next_new_nt; 7133 7134 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7135 goto continue_next_new_nt; 7136 7137 DEBUG2(ql4_printk(KERN_INFO, ha, 7138 "Adding DDB to session = 0x%x\n", idx)); 7139 7140 nt_ddb_idx = vmalloc(fw_idx_size); 7141 if (!nt_ddb_idx) 7142 break; 7143 7144 nt_ddb_idx->fw_ddb_idx = idx; 7145 7146 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7147 if (ret == QLA_SUCCESS) { 7148 /* free nt_ddb_idx and do not add to list_nt */ 7149 vfree(nt_ddb_idx); 7150 goto continue_next_new_nt; 7151 } 7152 7153 if (target_id < max_ddbs) 7154 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7155 7156 list_add_tail(&nt_ddb_idx->list, list_nt); 7157 7158 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7159 idx); 7160 if (ret == QLA_ERROR) 7161 goto exit_new_nt_list; 7162 7163 continue_next_new_nt: 7164 if (next_idx == 0) 7165 break; 7166 } 7167 7168 exit_new_nt_list: 7169 if (fw_ddb_entry) 7170 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7171 } 7172 7173 /** 7174 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7175 * @dev: dev associated with the sysfs entry 7176 * @data: pointer to flashnode session object 7177 * 7178 * Returns: 7179 * 1: if flashnode entry is non-persistent 7180 * 0: if flashnode entry is persistent 7181 **/ 7182 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7183 { 7184 struct iscsi_bus_flash_session *fnode_sess; 7185 7186 if (!iscsi_flashnode_bus_match(dev, NULL)) 7187 return 0; 7188 7189 fnode_sess = iscsi_dev_to_flash_session(dev); 7190 7191 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7192 } 7193 7194 /** 7195 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7196 * @ha: pointer to host 7197 * @fw_ddb_entry: flash ddb data 7198 * @idx: target index 7199 * @user: if set then this call is made from userland else from kernel 7200 * 7201 * Returns: 7202 * On sucess: QLA_SUCCESS 7203 * On failure: QLA_ERROR 7204 * 7205 * This create separate sysfs entries for session and connection attributes of 7206 * the given fw ddb entry. 7207 * If this is invoked as a result of a userspace call then the entry is marked 7208 * as nonpersistent using flash_state field. 7209 **/ 7210 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7211 struct dev_db_entry *fw_ddb_entry, 7212 uint16_t *idx, int user) 7213 { 7214 struct iscsi_bus_flash_session *fnode_sess = NULL; 7215 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7216 int rc = QLA_ERROR; 7217 7218 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7219 &qla4xxx_iscsi_transport, 0); 7220 if (!fnode_sess) { 7221 ql4_printk(KERN_ERR, ha, 7222 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7223 __func__, *idx, ha->host_no); 7224 goto exit_tgt_create; 7225 } 7226 7227 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7228 &qla4xxx_iscsi_transport, 0); 7229 if (!fnode_conn) { 7230 ql4_printk(KERN_ERR, ha, 7231 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7232 __func__, *idx, ha->host_no); 7233 goto free_sess; 7234 } 7235 7236 if (user) { 7237 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7238 } else { 7239 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7240 7241 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7242 fnode_sess->is_boot_target = 1; 7243 else 7244 fnode_sess->is_boot_target = 0; 7245 } 7246 7247 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7248 fw_ddb_entry); 7249 if (rc) 7250 goto free_sess; 7251 7252 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7253 __func__, fnode_sess->dev.kobj.name); 7254 7255 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7256 __func__, fnode_conn->dev.kobj.name); 7257 7258 return QLA_SUCCESS; 7259 7260 free_sess: 7261 iscsi_destroy_flashnode_sess(fnode_sess); 7262 7263 exit_tgt_create: 7264 return QLA_ERROR; 7265 } 7266 7267 /** 7268 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7269 * @shost: pointer to host 7270 * @buf: type of ddb entry (ipv4/ipv6) 7271 * @len: length of buf 7272 * 7273 * This creates new ddb entry in the flash by finding first free index and 7274 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7275 **/ 7276 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7277 int len) 7278 { 7279 struct scsi_qla_host *ha = to_qla_host(shost); 7280 struct dev_db_entry *fw_ddb_entry = NULL; 7281 dma_addr_t fw_ddb_entry_dma; 7282 struct device *dev; 7283 uint16_t idx = 0; 7284 uint16_t max_ddbs = 0; 7285 uint32_t options = 0; 7286 uint32_t rval = QLA_ERROR; 7287 7288 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7289 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7290 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7291 __func__)); 7292 goto exit_ddb_add; 7293 } 7294 7295 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7296 MAX_DEV_DB_ENTRIES; 7297 7298 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7299 &fw_ddb_entry_dma, GFP_KERNEL); 7300 if (!fw_ddb_entry) { 7301 DEBUG2(ql4_printk(KERN_ERR, ha, 7302 "%s: Unable to allocate dma buffer\n", 7303 __func__)); 7304 goto exit_ddb_add; 7305 } 7306 7307 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7308 qla4xxx_sysfs_ddb_is_non_persistent); 7309 if (dev) { 7310 ql4_printk(KERN_ERR, ha, 7311 "%s: A non-persistent entry %s found\n", 7312 __func__, dev->kobj.name); 7313 put_device(dev); 7314 goto exit_ddb_add; 7315 } 7316 7317 /* Index 0 and 1 are reserved for boot target entries */ 7318 for (idx = 2; idx < max_ddbs; idx++) { 7319 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7320 fw_ddb_entry_dma, idx)) 7321 break; 7322 } 7323 7324 if (idx == max_ddbs) 7325 goto exit_ddb_add; 7326 7327 if (!strncasecmp("ipv6", buf, 4)) 7328 options |= IPV6_DEFAULT_DDB_ENTRY; 7329 7330 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7331 if (rval == QLA_ERROR) 7332 goto exit_ddb_add; 7333 7334 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7335 7336 exit_ddb_add: 7337 if (fw_ddb_entry) 7338 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7339 fw_ddb_entry, fw_ddb_entry_dma); 7340 if (rval == QLA_SUCCESS) 7341 return idx; 7342 else 7343 return -EIO; 7344 } 7345 7346 /** 7347 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7348 * @fnode_sess: pointer to session attrs of flash ddb entry 7349 * @fnode_conn: pointer to connection attrs of flash ddb entry 7350 * 7351 * This writes the contents of target ddb buffer to Flash with a valid cookie 7352 * value in order to make the ddb entry persistent. 7353 **/ 7354 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7355 struct iscsi_bus_flash_conn *fnode_conn) 7356 { 7357 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7358 struct scsi_qla_host *ha = to_qla_host(shost); 7359 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7360 struct dev_db_entry *fw_ddb_entry = NULL; 7361 dma_addr_t fw_ddb_entry_dma; 7362 uint32_t options = 0; 7363 int rval = 0; 7364 7365 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7366 &fw_ddb_entry_dma, GFP_KERNEL); 7367 if (!fw_ddb_entry) { 7368 DEBUG2(ql4_printk(KERN_ERR, ha, 7369 "%s: Unable to allocate dma buffer\n", 7370 __func__)); 7371 rval = -ENOMEM; 7372 goto exit_ddb_apply; 7373 } 7374 7375 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7376 options |= IPV6_DEFAULT_DDB_ENTRY; 7377 7378 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7379 if (rval == QLA_ERROR) 7380 goto exit_ddb_apply; 7381 7382 dev_db_start_offset += (fnode_sess->target_id * 7383 sizeof(*fw_ddb_entry)); 7384 7385 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7386 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7387 7388 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7389 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7390 7391 if (rval == QLA_SUCCESS) { 7392 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7393 ql4_printk(KERN_INFO, ha, 7394 "%s: flash node %u of host %lu written to flash\n", 7395 __func__, fnode_sess->target_id, ha->host_no); 7396 } else { 7397 rval = -EIO; 7398 ql4_printk(KERN_ERR, ha, 7399 "%s: Error while writing flash node %u of host %lu to flash\n", 7400 __func__, fnode_sess->target_id, ha->host_no); 7401 } 7402 7403 exit_ddb_apply: 7404 if (fw_ddb_entry) 7405 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7406 fw_ddb_entry, fw_ddb_entry_dma); 7407 return rval; 7408 } 7409 7410 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7411 struct dev_db_entry *fw_ddb_entry, 7412 uint16_t idx) 7413 { 7414 struct dev_db_entry *ddb_entry = NULL; 7415 dma_addr_t ddb_entry_dma; 7416 unsigned long wtime; 7417 uint32_t mbx_sts = 0; 7418 uint32_t state = 0, conn_err = 0; 7419 uint16_t tmo = 0; 7420 int ret = 0; 7421 7422 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7423 &ddb_entry_dma, GFP_KERNEL); 7424 if (!ddb_entry) { 7425 DEBUG2(ql4_printk(KERN_ERR, ha, 7426 "%s: Unable to allocate dma buffer\n", 7427 __func__)); 7428 return QLA_ERROR; 7429 } 7430 7431 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7432 7433 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7434 if (ret != QLA_SUCCESS) { 7435 DEBUG2(ql4_printk(KERN_ERR, ha, 7436 "%s: Unable to set ddb entry for index %d\n", 7437 __func__, idx)); 7438 goto exit_ddb_conn_open; 7439 } 7440 7441 qla4xxx_conn_open(ha, idx); 7442 7443 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7444 tmo = ((ha->def_timeout > LOGIN_TOV) && 7445 (ha->def_timeout < LOGIN_TOV * 10) ? 7446 ha->def_timeout : LOGIN_TOV); 7447 7448 DEBUG2(ql4_printk(KERN_INFO, ha, 7449 "Default time to wait for login to ddb %d\n", tmo)); 7450 7451 wtime = jiffies + (HZ * tmo); 7452 do { 7453 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7454 NULL, &state, &conn_err, NULL, 7455 NULL); 7456 if (ret == QLA_ERROR) 7457 continue; 7458 7459 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7460 state == DDB_DS_SESSION_FAILED) 7461 break; 7462 7463 schedule_timeout_uninterruptible(HZ / 10); 7464 } while (time_after(wtime, jiffies)); 7465 7466 exit_ddb_conn_open: 7467 if (ddb_entry) 7468 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7469 ddb_entry, ddb_entry_dma); 7470 return ret; 7471 } 7472 7473 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7474 struct dev_db_entry *fw_ddb_entry, 7475 uint16_t target_id) 7476 { 7477 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7478 struct list_head list_nt; 7479 uint16_t ddb_index; 7480 int ret = 0; 7481 7482 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7483 ql4_printk(KERN_WARNING, ha, 7484 "%s: A discovery already in progress!\n", __func__); 7485 return QLA_ERROR; 7486 } 7487 7488 INIT_LIST_HEAD(&list_nt); 7489 7490 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7491 7492 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7493 if (ret == QLA_ERROR) 7494 goto exit_login_st_clr_bit; 7495 7496 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7497 if (ret == QLA_ERROR) 7498 goto exit_login_st; 7499 7500 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7501 7502 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7503 list_del_init(&ddb_idx->list); 7504 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7505 vfree(ddb_idx); 7506 } 7507 7508 exit_login_st: 7509 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7510 ql4_printk(KERN_ERR, ha, 7511 "Unable to clear DDB index = 0x%x\n", ddb_index); 7512 } 7513 7514 clear_bit(ddb_index, ha->ddb_idx_map); 7515 7516 exit_login_st_clr_bit: 7517 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7518 return ret; 7519 } 7520 7521 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7522 struct dev_db_entry *fw_ddb_entry, 7523 uint16_t idx) 7524 { 7525 int ret = QLA_ERROR; 7526 7527 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7528 if (ret != QLA_SUCCESS) 7529 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7530 idx); 7531 else 7532 ret = -EPERM; 7533 7534 return ret; 7535 } 7536 7537 /** 7538 * qla4xxx_sysfs_ddb_login - Login to the specified target 7539 * @fnode_sess: pointer to session attrs of flash ddb entry 7540 * @fnode_conn: pointer to connection attrs of flash ddb entry 7541 * 7542 * This logs in to the specified target 7543 **/ 7544 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7545 struct iscsi_bus_flash_conn *fnode_conn) 7546 { 7547 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7548 struct scsi_qla_host *ha = to_qla_host(shost); 7549 struct dev_db_entry *fw_ddb_entry = NULL; 7550 dma_addr_t fw_ddb_entry_dma; 7551 uint32_t options = 0; 7552 int ret = 0; 7553 7554 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7555 ql4_printk(KERN_ERR, ha, 7556 "%s: Target info is not persistent\n", __func__); 7557 ret = -EIO; 7558 goto exit_ddb_login; 7559 } 7560 7561 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7562 &fw_ddb_entry_dma, GFP_KERNEL); 7563 if (!fw_ddb_entry) { 7564 DEBUG2(ql4_printk(KERN_ERR, ha, 7565 "%s: Unable to allocate dma buffer\n", 7566 __func__)); 7567 ret = -ENOMEM; 7568 goto exit_ddb_login; 7569 } 7570 7571 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7572 options |= IPV6_DEFAULT_DDB_ENTRY; 7573 7574 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7575 if (ret == QLA_ERROR) 7576 goto exit_ddb_login; 7577 7578 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7579 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7580 7581 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7582 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7583 fnode_sess->target_id); 7584 else 7585 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7586 fnode_sess->target_id); 7587 7588 if (ret > 0) 7589 ret = -EIO; 7590 7591 exit_ddb_login: 7592 if (fw_ddb_entry) 7593 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7594 fw_ddb_entry, fw_ddb_entry_dma); 7595 return ret; 7596 } 7597 7598 /** 7599 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7600 * @cls_sess: pointer to session to be logged out 7601 * 7602 * This performs session log out from the specified target 7603 **/ 7604 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7605 { 7606 struct iscsi_session *sess; 7607 struct ddb_entry *ddb_entry = NULL; 7608 struct scsi_qla_host *ha; 7609 struct dev_db_entry *fw_ddb_entry = NULL; 7610 dma_addr_t fw_ddb_entry_dma; 7611 unsigned long flags; 7612 unsigned long wtime; 7613 uint32_t ddb_state; 7614 int options; 7615 int ret = 0; 7616 7617 sess = cls_sess->dd_data; 7618 ddb_entry = sess->dd_data; 7619 ha = ddb_entry->ha; 7620 7621 if (ddb_entry->ddb_type != FLASH_DDB) { 7622 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7623 __func__); 7624 ret = -ENXIO; 7625 goto exit_ddb_logout; 7626 } 7627 7628 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7629 ql4_printk(KERN_ERR, ha, 7630 "%s: Logout from boot target entry is not permitted.\n", 7631 __func__); 7632 ret = -EPERM; 7633 goto exit_ddb_logout; 7634 } 7635 7636 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7637 &fw_ddb_entry_dma, GFP_KERNEL); 7638 if (!fw_ddb_entry) { 7639 ql4_printk(KERN_ERR, ha, 7640 "%s: Unable to allocate dma buffer\n", __func__); 7641 ret = -ENOMEM; 7642 goto exit_ddb_logout; 7643 } 7644 7645 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7646 goto ddb_logout_init; 7647 7648 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7649 fw_ddb_entry, fw_ddb_entry_dma, 7650 NULL, NULL, &ddb_state, NULL, 7651 NULL, NULL); 7652 if (ret == QLA_ERROR) 7653 goto ddb_logout_init; 7654 7655 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7656 goto ddb_logout_init; 7657 7658 /* wait until next relogin is triggered using DF_RELOGIN and 7659 * clear DF_RELOGIN to avoid invocation of further relogin 7660 */ 7661 wtime = jiffies + (HZ * RELOGIN_TOV); 7662 do { 7663 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7664 goto ddb_logout_init; 7665 7666 schedule_timeout_uninterruptible(HZ); 7667 } while ((time_after(wtime, jiffies))); 7668 7669 ddb_logout_init: 7670 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7671 atomic_set(&ddb_entry->relogin_timer, 0); 7672 7673 options = LOGOUT_OPTION_CLOSE_SESSION; 7674 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7675 7676 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7677 wtime = jiffies + (HZ * LOGOUT_TOV); 7678 do { 7679 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7680 fw_ddb_entry, fw_ddb_entry_dma, 7681 NULL, NULL, &ddb_state, NULL, 7682 NULL, NULL); 7683 if (ret == QLA_ERROR) 7684 goto ddb_logout_clr_sess; 7685 7686 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7687 (ddb_state == DDB_DS_SESSION_FAILED)) 7688 goto ddb_logout_clr_sess; 7689 7690 schedule_timeout_uninterruptible(HZ); 7691 } while ((time_after(wtime, jiffies))); 7692 7693 ddb_logout_clr_sess: 7694 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7695 /* 7696 * we have decremented the reference count of the driver 7697 * when we setup the session to have the driver unload 7698 * to be seamless without actually destroying the 7699 * session 7700 **/ 7701 try_module_get(qla4xxx_iscsi_transport.owner); 7702 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7703 7704 spin_lock_irqsave(&ha->hardware_lock, flags); 7705 qla4xxx_free_ddb(ha, ddb_entry); 7706 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7707 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7708 7709 iscsi_session_teardown(ddb_entry->sess); 7710 7711 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7712 ret = QLA_SUCCESS; 7713 7714 exit_ddb_logout: 7715 if (fw_ddb_entry) 7716 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7717 fw_ddb_entry, fw_ddb_entry_dma); 7718 return ret; 7719 } 7720 7721 /** 7722 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7723 * @fnode_sess: pointer to session attrs of flash ddb entry 7724 * @fnode_conn: pointer to connection attrs of flash ddb entry 7725 * 7726 * This performs log out from the specified target 7727 **/ 7728 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7729 struct iscsi_bus_flash_conn *fnode_conn) 7730 { 7731 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7732 struct scsi_qla_host *ha = to_qla_host(shost); 7733 struct ql4_tuple_ddb *flash_tddb = NULL; 7734 struct ql4_tuple_ddb *tmp_tddb = NULL; 7735 struct dev_db_entry *fw_ddb_entry = NULL; 7736 struct ddb_entry *ddb_entry = NULL; 7737 dma_addr_t fw_ddb_dma; 7738 uint32_t next_idx = 0; 7739 uint32_t state = 0, conn_err = 0; 7740 uint16_t conn_id = 0; 7741 int idx, index; 7742 int status, ret = 0; 7743 7744 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7745 &fw_ddb_dma); 7746 if (fw_ddb_entry == NULL) { 7747 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7748 ret = -ENOMEM; 7749 goto exit_ddb_logout; 7750 } 7751 7752 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7753 if (!flash_tddb) { 7754 ql4_printk(KERN_WARNING, ha, 7755 "%s:Memory Allocation failed.\n", __func__); 7756 ret = -ENOMEM; 7757 goto exit_ddb_logout; 7758 } 7759 7760 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7761 if (!tmp_tddb) { 7762 ql4_printk(KERN_WARNING, ha, 7763 "%s:Memory Allocation failed.\n", __func__); 7764 ret = -ENOMEM; 7765 goto exit_ddb_logout; 7766 } 7767 7768 if (!fnode_sess->targetname) { 7769 ql4_printk(KERN_ERR, ha, 7770 "%s:Cannot logout from SendTarget entry\n", 7771 __func__); 7772 ret = -EPERM; 7773 goto exit_ddb_logout; 7774 } 7775 7776 if (fnode_sess->is_boot_target) { 7777 ql4_printk(KERN_ERR, ha, 7778 "%s: Logout from boot target entry is not permitted.\n", 7779 __func__); 7780 ret = -EPERM; 7781 goto exit_ddb_logout; 7782 } 7783 7784 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7785 ISCSI_NAME_SIZE); 7786 7787 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7788 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7789 else 7790 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7791 7792 flash_tddb->tpgt = fnode_sess->tpgt; 7793 flash_tddb->port = fnode_conn->port; 7794 7795 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7796 7797 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7798 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7799 if (ddb_entry == NULL) 7800 continue; 7801 7802 if (ddb_entry->ddb_type != FLASH_DDB) 7803 continue; 7804 7805 index = ddb_entry->sess->target_id; 7806 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7807 fw_ddb_dma, NULL, &next_idx, 7808 &state, &conn_err, NULL, 7809 &conn_id); 7810 if (status == QLA_ERROR) { 7811 ret = -ENOMEM; 7812 break; 7813 } 7814 7815 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7816 7817 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7818 true); 7819 if (status == QLA_SUCCESS) { 7820 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7821 break; 7822 } 7823 } 7824 7825 if (idx == MAX_DDB_ENTRIES) 7826 ret = -ESRCH; 7827 7828 exit_ddb_logout: 7829 vfree(flash_tddb); 7830 vfree(tmp_tddb); 7831 if (fw_ddb_entry) 7832 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7833 7834 return ret; 7835 } 7836 7837 static int 7838 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7839 int param, char *buf) 7840 { 7841 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7842 struct scsi_qla_host *ha = to_qla_host(shost); 7843 struct iscsi_bus_flash_conn *fnode_conn; 7844 struct ql4_chap_table chap_tbl; 7845 struct device *dev; 7846 int parent_type; 7847 int rc = 0; 7848 7849 dev = iscsi_find_flashnode_conn(fnode_sess); 7850 if (!dev) 7851 return -EIO; 7852 7853 fnode_conn = iscsi_dev_to_flash_conn(dev); 7854 7855 switch (param) { 7856 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7857 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7858 break; 7859 case ISCSI_FLASHNODE_PORTAL_TYPE: 7860 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7861 break; 7862 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7863 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7864 break; 7865 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7866 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7867 break; 7868 case ISCSI_FLASHNODE_ENTRY_EN: 7869 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7870 break; 7871 case ISCSI_FLASHNODE_HDR_DGST_EN: 7872 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7873 break; 7874 case ISCSI_FLASHNODE_DATA_DGST_EN: 7875 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7876 break; 7877 case ISCSI_FLASHNODE_IMM_DATA_EN: 7878 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7879 break; 7880 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7881 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7882 break; 7883 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7884 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7885 break; 7886 case ISCSI_FLASHNODE_PDU_INORDER: 7887 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7888 break; 7889 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7890 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7891 break; 7892 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7893 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7894 break; 7895 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7896 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7897 break; 7898 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7899 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7900 break; 7901 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7902 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7903 break; 7904 case ISCSI_FLASHNODE_ERL: 7905 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7906 break; 7907 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7908 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7909 break; 7910 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7911 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7912 break; 7913 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7914 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7915 break; 7916 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7917 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7918 break; 7919 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7920 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7921 break; 7922 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7923 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7924 break; 7925 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7926 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7927 break; 7928 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7929 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7930 break; 7931 case ISCSI_FLASHNODE_FIRST_BURST: 7932 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7933 break; 7934 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7935 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7936 break; 7937 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7938 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7939 break; 7940 case ISCSI_FLASHNODE_MAX_R2T: 7941 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7942 break; 7943 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7944 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7945 break; 7946 case ISCSI_FLASHNODE_ISID: 7947 rc = sprintf(buf, "%pm\n", fnode_sess->isid); 7948 break; 7949 case ISCSI_FLASHNODE_TSID: 7950 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7951 break; 7952 case ISCSI_FLASHNODE_PORT: 7953 rc = sprintf(buf, "%d\n", fnode_conn->port); 7954 break; 7955 case ISCSI_FLASHNODE_MAX_BURST: 7956 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7957 break; 7958 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7959 rc = sprintf(buf, "%u\n", 7960 fnode_sess->default_taskmgmt_timeout); 7961 break; 7962 case ISCSI_FLASHNODE_IPADDR: 7963 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7964 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7965 else 7966 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7967 break; 7968 case ISCSI_FLASHNODE_ALIAS: 7969 if (fnode_sess->targetalias) 7970 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7971 else 7972 rc = sprintf(buf, "\n"); 7973 break; 7974 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7975 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7976 rc = sprintf(buf, "%pI6\n", 7977 fnode_conn->redirect_ipaddr); 7978 else 7979 rc = sprintf(buf, "%pI4\n", 7980 fnode_conn->redirect_ipaddr); 7981 break; 7982 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7983 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7984 break; 7985 case ISCSI_FLASHNODE_LOCAL_PORT: 7986 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 7987 break; 7988 case ISCSI_FLASHNODE_IPV4_TOS: 7989 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 7990 break; 7991 case ISCSI_FLASHNODE_IPV6_TC: 7992 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7993 rc = sprintf(buf, "%u\n", 7994 fnode_conn->ipv6_traffic_class); 7995 else 7996 rc = sprintf(buf, "\n"); 7997 break; 7998 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 7999 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 8000 break; 8001 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8002 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8003 rc = sprintf(buf, "%pI6\n", 8004 fnode_conn->link_local_ipv6_addr); 8005 else 8006 rc = sprintf(buf, "\n"); 8007 break; 8008 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8009 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 8010 break; 8011 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 8012 if (fnode_sess->discovery_parent_type == DDB_ISNS) 8013 parent_type = ISCSI_DISC_PARENT_ISNS; 8014 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 8015 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8016 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8017 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8018 else 8019 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8020 8021 rc = sprintf(buf, "%s\n", 8022 iscsi_get_discovery_parent_name(parent_type)); 8023 break; 8024 case ISCSI_FLASHNODE_NAME: 8025 if (fnode_sess->targetname) 8026 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8027 else 8028 rc = sprintf(buf, "\n"); 8029 break; 8030 case ISCSI_FLASHNODE_TPGT: 8031 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8032 break; 8033 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8034 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8035 break; 8036 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8037 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8038 break; 8039 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8040 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8041 break; 8042 case ISCSI_FLASHNODE_USERNAME: 8043 if (fnode_sess->chap_auth_en) { 8044 qla4xxx_get_uni_chap_at_index(ha, 8045 chap_tbl.name, 8046 chap_tbl.secret, 8047 fnode_sess->chap_out_idx); 8048 rc = sprintf(buf, "%s\n", chap_tbl.name); 8049 } else { 8050 rc = sprintf(buf, "\n"); 8051 } 8052 break; 8053 case ISCSI_FLASHNODE_PASSWORD: 8054 if (fnode_sess->chap_auth_en) { 8055 qla4xxx_get_uni_chap_at_index(ha, 8056 chap_tbl.name, 8057 chap_tbl.secret, 8058 fnode_sess->chap_out_idx); 8059 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8060 } else { 8061 rc = sprintf(buf, "\n"); 8062 } 8063 break; 8064 case ISCSI_FLASHNODE_STATSN: 8065 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8066 break; 8067 case ISCSI_FLASHNODE_EXP_STATSN: 8068 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8069 break; 8070 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8071 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8072 break; 8073 default: 8074 rc = -ENOSYS; 8075 break; 8076 } 8077 8078 put_device(dev); 8079 return rc; 8080 } 8081 8082 /** 8083 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8084 * @fnode_sess: pointer to session attrs of flash ddb entry 8085 * @fnode_conn: pointer to connection attrs of flash ddb entry 8086 * @data: Parameters and their values to update 8087 * @len: len of data 8088 * 8089 * This sets the parameter of flash ddb entry and writes them to flash 8090 **/ 8091 static int 8092 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8093 struct iscsi_bus_flash_conn *fnode_conn, 8094 void *data, int len) 8095 { 8096 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8097 struct scsi_qla_host *ha = to_qla_host(shost); 8098 struct iscsi_flashnode_param_info *fnode_param; 8099 struct ql4_chap_table chap_tbl; 8100 struct nlattr *attr; 8101 uint16_t chap_out_idx = INVALID_ENTRY; 8102 int rc = QLA_ERROR; 8103 uint32_t rem = len; 8104 8105 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8106 nla_for_each_attr(attr, data, len, rem) { 8107 fnode_param = nla_data(attr); 8108 8109 switch (fnode_param->param) { 8110 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8111 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8112 break; 8113 case ISCSI_FLASHNODE_PORTAL_TYPE: 8114 memcpy(fnode_sess->portal_type, fnode_param->value, 8115 strlen(fnode_sess->portal_type)); 8116 break; 8117 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8118 fnode_sess->auto_snd_tgt_disable = 8119 fnode_param->value[0]; 8120 break; 8121 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8122 fnode_sess->discovery_sess = fnode_param->value[0]; 8123 break; 8124 case ISCSI_FLASHNODE_ENTRY_EN: 8125 fnode_sess->entry_state = fnode_param->value[0]; 8126 break; 8127 case ISCSI_FLASHNODE_HDR_DGST_EN: 8128 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8129 break; 8130 case ISCSI_FLASHNODE_DATA_DGST_EN: 8131 fnode_conn->datadgst_en = fnode_param->value[0]; 8132 break; 8133 case ISCSI_FLASHNODE_IMM_DATA_EN: 8134 fnode_sess->imm_data_en = fnode_param->value[0]; 8135 break; 8136 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8137 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8138 break; 8139 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8140 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8141 break; 8142 case ISCSI_FLASHNODE_PDU_INORDER: 8143 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8144 break; 8145 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8146 fnode_sess->chap_auth_en = fnode_param->value[0]; 8147 /* Invalidate chap index if chap auth is disabled */ 8148 if (!fnode_sess->chap_auth_en) 8149 fnode_sess->chap_out_idx = INVALID_ENTRY; 8150 8151 break; 8152 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8153 fnode_conn->snack_req_en = fnode_param->value[0]; 8154 break; 8155 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8156 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8157 break; 8158 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8159 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8160 break; 8161 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8162 fnode_sess->discovery_auth_optional = 8163 fnode_param->value[0]; 8164 break; 8165 case ISCSI_FLASHNODE_ERL: 8166 fnode_sess->erl = fnode_param->value[0]; 8167 break; 8168 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8169 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8170 break; 8171 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8172 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8173 break; 8174 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8175 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8176 break; 8177 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8178 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8179 break; 8180 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8181 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8182 break; 8183 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8184 fnode_conn->fragment_disable = fnode_param->value[0]; 8185 break; 8186 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8187 fnode_conn->max_recv_dlength = 8188 *(unsigned *)fnode_param->value; 8189 break; 8190 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8191 fnode_conn->max_xmit_dlength = 8192 *(unsigned *)fnode_param->value; 8193 break; 8194 case ISCSI_FLASHNODE_FIRST_BURST: 8195 fnode_sess->first_burst = 8196 *(unsigned *)fnode_param->value; 8197 break; 8198 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8199 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8200 break; 8201 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8202 fnode_sess->time2retain = 8203 *(uint16_t *)fnode_param->value; 8204 break; 8205 case ISCSI_FLASHNODE_MAX_R2T: 8206 fnode_sess->max_r2t = 8207 *(uint16_t *)fnode_param->value; 8208 break; 8209 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8210 fnode_conn->keepalive_timeout = 8211 *(uint16_t *)fnode_param->value; 8212 break; 8213 case ISCSI_FLASHNODE_ISID: 8214 memcpy(fnode_sess->isid, fnode_param->value, 8215 sizeof(fnode_sess->isid)); 8216 break; 8217 case ISCSI_FLASHNODE_TSID: 8218 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8219 break; 8220 case ISCSI_FLASHNODE_PORT: 8221 fnode_conn->port = *(uint16_t *)fnode_param->value; 8222 break; 8223 case ISCSI_FLASHNODE_MAX_BURST: 8224 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8225 break; 8226 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8227 fnode_sess->default_taskmgmt_timeout = 8228 *(uint16_t *)fnode_param->value; 8229 break; 8230 case ISCSI_FLASHNODE_IPADDR: 8231 memcpy(fnode_conn->ipaddress, fnode_param->value, 8232 IPv6_ADDR_LEN); 8233 break; 8234 case ISCSI_FLASHNODE_ALIAS: 8235 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8236 (char *)fnode_param->value); 8237 break; 8238 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8239 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8240 IPv6_ADDR_LEN); 8241 break; 8242 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8243 fnode_conn->max_segment_size = 8244 *(unsigned *)fnode_param->value; 8245 break; 8246 case ISCSI_FLASHNODE_LOCAL_PORT: 8247 fnode_conn->local_port = 8248 *(uint16_t *)fnode_param->value; 8249 break; 8250 case ISCSI_FLASHNODE_IPV4_TOS: 8251 fnode_conn->ipv4_tos = fnode_param->value[0]; 8252 break; 8253 case ISCSI_FLASHNODE_IPV6_TC: 8254 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8255 break; 8256 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8257 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8258 break; 8259 case ISCSI_FLASHNODE_NAME: 8260 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8261 (char *)fnode_param->value); 8262 break; 8263 case ISCSI_FLASHNODE_TPGT: 8264 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8265 break; 8266 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8267 memcpy(fnode_conn->link_local_ipv6_addr, 8268 fnode_param->value, IPv6_ADDR_LEN); 8269 break; 8270 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8271 fnode_sess->discovery_parent_idx = 8272 *(uint16_t *)fnode_param->value; 8273 break; 8274 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8275 fnode_conn->tcp_xmit_wsf = 8276 *(uint8_t *)fnode_param->value; 8277 break; 8278 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8279 fnode_conn->tcp_recv_wsf = 8280 *(uint8_t *)fnode_param->value; 8281 break; 8282 case ISCSI_FLASHNODE_STATSN: 8283 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8284 break; 8285 case ISCSI_FLASHNODE_EXP_STATSN: 8286 fnode_conn->exp_statsn = 8287 *(uint32_t *)fnode_param->value; 8288 break; 8289 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8290 chap_out_idx = *(uint16_t *)fnode_param->value; 8291 if (!qla4xxx_get_uni_chap_at_index(ha, 8292 chap_tbl.name, 8293 chap_tbl.secret, 8294 chap_out_idx)) { 8295 fnode_sess->chap_out_idx = chap_out_idx; 8296 /* Enable chap auth if chap index is valid */ 8297 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8298 } 8299 break; 8300 default: 8301 ql4_printk(KERN_ERR, ha, 8302 "%s: No such sysfs attribute\n", __func__); 8303 rc = -ENOSYS; 8304 goto exit_set_param; 8305 } 8306 } 8307 8308 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8309 8310 exit_set_param: 8311 return rc; 8312 } 8313 8314 /** 8315 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8316 * @fnode_sess: pointer to session attrs of flash ddb entry 8317 * 8318 * This invalidates the flash ddb entry at the given index 8319 **/ 8320 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8321 { 8322 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8323 struct scsi_qla_host *ha = to_qla_host(shost); 8324 uint32_t dev_db_start_offset; 8325 uint32_t dev_db_end_offset; 8326 struct dev_db_entry *fw_ddb_entry = NULL; 8327 dma_addr_t fw_ddb_entry_dma; 8328 uint16_t *ddb_cookie = NULL; 8329 size_t ddb_size = 0; 8330 void *pddb = NULL; 8331 int target_id; 8332 int rc = 0; 8333 8334 if (fnode_sess->is_boot_target) { 8335 rc = -EPERM; 8336 DEBUG2(ql4_printk(KERN_ERR, ha, 8337 "%s: Deletion of boot target entry is not permitted.\n", 8338 __func__)); 8339 goto exit_ddb_del; 8340 } 8341 8342 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8343 goto sysfs_ddb_del; 8344 8345 if (is_qla40XX(ha)) { 8346 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8347 dev_db_end_offset = FLASH_OFFSET_DB_END; 8348 dev_db_start_offset += (fnode_sess->target_id * 8349 sizeof(*fw_ddb_entry)); 8350 ddb_size = sizeof(*fw_ddb_entry); 8351 } else { 8352 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8353 (ha->hw.flt_region_ddb << 2); 8354 /* flt_ddb_size is DDB table size for both ports 8355 * so divide it by 2 to calculate the offset for second port 8356 */ 8357 if (ha->port_num == 1) 8358 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8359 8360 dev_db_end_offset = dev_db_start_offset + 8361 (ha->hw.flt_ddb_size / 2); 8362 8363 dev_db_start_offset += (fnode_sess->target_id * 8364 sizeof(*fw_ddb_entry)); 8365 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8366 8367 ddb_size = sizeof(*ddb_cookie); 8368 } 8369 8370 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8371 __func__, dev_db_start_offset, dev_db_end_offset)); 8372 8373 if (dev_db_start_offset > dev_db_end_offset) { 8374 rc = -EIO; 8375 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8376 __func__, fnode_sess->target_id)); 8377 goto exit_ddb_del; 8378 } 8379 8380 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8381 &fw_ddb_entry_dma, GFP_KERNEL); 8382 if (!pddb) { 8383 rc = -ENOMEM; 8384 DEBUG2(ql4_printk(KERN_ERR, ha, 8385 "%s: Unable to allocate dma buffer\n", 8386 __func__)); 8387 goto exit_ddb_del; 8388 } 8389 8390 if (is_qla40XX(ha)) { 8391 fw_ddb_entry = pddb; 8392 memset(fw_ddb_entry, 0, ddb_size); 8393 ddb_cookie = &fw_ddb_entry->cookie; 8394 } else { 8395 ddb_cookie = pddb; 8396 } 8397 8398 /* invalidate the cookie */ 8399 *ddb_cookie = 0xFFEE; 8400 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8401 ddb_size, FLASH_OPT_RMW_COMMIT); 8402 8403 sysfs_ddb_del: 8404 target_id = fnode_sess->target_id; 8405 iscsi_destroy_flashnode_sess(fnode_sess); 8406 ql4_printk(KERN_INFO, ha, 8407 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8408 __func__, target_id, ha->host_no); 8409 exit_ddb_del: 8410 if (pddb) 8411 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8412 fw_ddb_entry_dma); 8413 return rc; 8414 } 8415 8416 /** 8417 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8418 * @ha: pointer to adapter structure 8419 * 8420 * Export the firmware DDB for all send targets and normal targets to sysfs. 8421 **/ 8422 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8423 { 8424 struct dev_db_entry *fw_ddb_entry = NULL; 8425 dma_addr_t fw_ddb_entry_dma; 8426 uint16_t max_ddbs; 8427 uint16_t idx = 0; 8428 int ret = QLA_SUCCESS; 8429 8430 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8431 sizeof(*fw_ddb_entry), 8432 &fw_ddb_entry_dma, GFP_KERNEL); 8433 if (!fw_ddb_entry) { 8434 DEBUG2(ql4_printk(KERN_ERR, ha, 8435 "%s: Unable to allocate dma buffer\n", 8436 __func__)); 8437 return -ENOMEM; 8438 } 8439 8440 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8441 MAX_DEV_DB_ENTRIES; 8442 8443 for (idx = 0; idx < max_ddbs; idx++) { 8444 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8445 idx)) 8446 continue; 8447 8448 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8449 if (ret) { 8450 ret = -EIO; 8451 break; 8452 } 8453 } 8454 8455 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8456 fw_ddb_entry_dma); 8457 8458 return ret; 8459 } 8460 8461 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8462 { 8463 iscsi_destroy_all_flashnode(ha->host); 8464 } 8465 8466 /** 8467 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8468 * @ha: pointer to adapter structure 8469 * @is_reset: Is this init path or reset path 8470 * 8471 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8472 * using connection open, then create the list of normal targets (nt) 8473 * from firmware DDBs. Based on the list of nt setup session and connection 8474 * objects. 8475 **/ 8476 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8477 { 8478 uint16_t tmo = 0; 8479 struct list_head list_st, list_nt; 8480 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8481 unsigned long wtime; 8482 8483 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8484 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8485 ha->is_reset = is_reset; 8486 return; 8487 } 8488 8489 INIT_LIST_HEAD(&list_st); 8490 INIT_LIST_HEAD(&list_nt); 8491 8492 qla4xxx_build_st_list(ha, &list_st); 8493 8494 /* Before issuing conn open mbox, ensure all IPs states are configured 8495 * Note, conn open fails if IPs are not configured 8496 */ 8497 qla4xxx_wait_for_ip_configuration(ha); 8498 8499 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8500 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8501 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8502 } 8503 8504 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8505 tmo = ((ha->def_timeout > LOGIN_TOV) && 8506 (ha->def_timeout < LOGIN_TOV * 10) ? 8507 ha->def_timeout : LOGIN_TOV); 8508 8509 DEBUG2(ql4_printk(KERN_INFO, ha, 8510 "Default time to wait for build ddb %d\n", tmo)); 8511 8512 wtime = jiffies + (HZ * tmo); 8513 do { 8514 if (list_empty(&list_st)) 8515 break; 8516 8517 qla4xxx_remove_failed_ddb(ha, &list_st); 8518 schedule_timeout_uninterruptible(HZ / 10); 8519 } while (time_after(wtime, jiffies)); 8520 8521 8522 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8523 8524 qla4xxx_free_ddb_list(&list_st); 8525 qla4xxx_free_ddb_list(&list_nt); 8526 8527 qla4xxx_free_ddb_index(ha); 8528 } 8529 8530 /** 8531 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8532 * response. 8533 * @ha: pointer to adapter structure 8534 * 8535 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8536 * set in DDB and we will wait for login response of boot targets during 8537 * probe. 8538 **/ 8539 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8540 { 8541 struct ddb_entry *ddb_entry; 8542 struct dev_db_entry *fw_ddb_entry = NULL; 8543 dma_addr_t fw_ddb_entry_dma; 8544 unsigned long wtime; 8545 uint32_t ddb_state; 8546 int max_ddbs, idx, ret; 8547 8548 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8549 MAX_DEV_DB_ENTRIES; 8550 8551 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8552 &fw_ddb_entry_dma, GFP_KERNEL); 8553 if (!fw_ddb_entry) { 8554 ql4_printk(KERN_ERR, ha, 8555 "%s: Unable to allocate dma buffer\n", __func__); 8556 goto exit_login_resp; 8557 } 8558 8559 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8560 8561 for (idx = 0; idx < max_ddbs; idx++) { 8562 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8563 if (ddb_entry == NULL) 8564 continue; 8565 8566 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8567 DEBUG2(ql4_printk(KERN_INFO, ha, 8568 "%s: DDB index [%d]\n", __func__, 8569 ddb_entry->fw_ddb_index)); 8570 do { 8571 ret = qla4xxx_get_fwddb_entry(ha, 8572 ddb_entry->fw_ddb_index, 8573 fw_ddb_entry, fw_ddb_entry_dma, 8574 NULL, NULL, &ddb_state, NULL, 8575 NULL, NULL); 8576 if (ret == QLA_ERROR) 8577 goto exit_login_resp; 8578 8579 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8580 (ddb_state == DDB_DS_SESSION_FAILED)) 8581 break; 8582 8583 schedule_timeout_uninterruptible(HZ); 8584 8585 } while ((time_after(wtime, jiffies))); 8586 8587 if (!time_after(wtime, jiffies)) { 8588 DEBUG2(ql4_printk(KERN_INFO, ha, 8589 "%s: Login response wait timer expired\n", 8590 __func__)); 8591 goto exit_login_resp; 8592 } 8593 } 8594 } 8595 8596 exit_login_resp: 8597 if (fw_ddb_entry) 8598 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8599 fw_ddb_entry, fw_ddb_entry_dma); 8600 } 8601 8602 /** 8603 * qla4xxx_probe_adapter - callback function to probe HBA 8604 * @pdev: pointer to pci_dev structure 8605 * @ent: pointer to pci_device entry 8606 * 8607 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8608 * It returns zero if successful. It also initializes all data necessary for 8609 * the driver. 8610 **/ 8611 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8612 const struct pci_device_id *ent) 8613 { 8614 int ret = -ENODEV, status; 8615 struct Scsi_Host *host; 8616 struct scsi_qla_host *ha; 8617 uint8_t init_retry_count = 0; 8618 char buf[34]; 8619 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8620 uint32_t dev_state; 8621 8622 if (pci_enable_device(pdev)) 8623 return -1; 8624 8625 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8626 if (host == NULL) { 8627 printk(KERN_WARNING 8628 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8629 goto probe_disable_device; 8630 } 8631 8632 /* Clear our data area */ 8633 ha = to_qla_host(host); 8634 memset(ha, 0, sizeof(*ha)); 8635 8636 /* Save the information from PCI BIOS. */ 8637 ha->pdev = pdev; 8638 ha->host = host; 8639 ha->host_no = host->host_no; 8640 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8641 8642 pci_enable_pcie_error_reporting(pdev); 8643 8644 /* Setup Runtime configurable options */ 8645 if (is_qla8022(ha)) { 8646 ha->isp_ops = &qla4_82xx_isp_ops; 8647 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8648 ha->qdr_sn_window = -1; 8649 ha->ddr_mn_window = -1; 8650 ha->curr_window = 255; 8651 nx_legacy_intr = &legacy_intr[ha->func_num]; 8652 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8653 ha->nx_legacy_intr.tgt_status_reg = 8654 nx_legacy_intr->tgt_status_reg; 8655 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8656 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8657 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8658 ha->isp_ops = &qla4_83xx_isp_ops; 8659 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8660 } else { 8661 ha->isp_ops = &qla4xxx_isp_ops; 8662 } 8663 8664 if (is_qla80XX(ha)) { 8665 rwlock_init(&ha->hw_lock); 8666 ha->pf_bit = ha->func_num << 16; 8667 /* Set EEH reset type to fundamental if required by hba */ 8668 pdev->needs_freset = 1; 8669 } 8670 8671 /* Configure PCI I/O space. */ 8672 ret = ha->isp_ops->iospace_config(ha); 8673 if (ret) 8674 goto probe_failed_ioconfig; 8675 8676 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8677 pdev->device, pdev->irq, ha->reg); 8678 8679 qla4xxx_config_dma_addressing(ha); 8680 8681 /* Initialize lists and spinlocks. */ 8682 INIT_LIST_HEAD(&ha->free_srb_q); 8683 8684 mutex_init(&ha->mbox_sem); 8685 mutex_init(&ha->chap_sem); 8686 init_completion(&ha->mbx_intr_comp); 8687 init_completion(&ha->disable_acb_comp); 8688 init_completion(&ha->idc_comp); 8689 init_completion(&ha->link_up_comp); 8690 8691 spin_lock_init(&ha->hardware_lock); 8692 spin_lock_init(&ha->work_lock); 8693 8694 /* Initialize work list */ 8695 INIT_LIST_HEAD(&ha->work_list); 8696 8697 /* Allocate dma buffers */ 8698 if (qla4xxx_mem_alloc(ha)) { 8699 ql4_printk(KERN_WARNING, ha, 8700 "[ERROR] Failed to allocate memory for adapter\n"); 8701 8702 ret = -ENOMEM; 8703 goto probe_failed; 8704 } 8705 8706 host->cmd_per_lun = 3; 8707 host->max_channel = 0; 8708 host->max_lun = MAX_LUNS - 1; 8709 host->max_id = MAX_TARGETS; 8710 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8711 host->can_queue = MAX_SRBS ; 8712 host->transportt = qla4xxx_scsi_transport; 8713 8714 pci_set_drvdata(pdev, ha); 8715 8716 ret = scsi_add_host(host, &pdev->dev); 8717 if (ret) 8718 goto probe_failed; 8719 8720 if (is_qla80XX(ha)) 8721 qla4_8xxx_get_flash_info(ha); 8722 8723 if (is_qla8032(ha) || is_qla8042(ha)) { 8724 qla4_83xx_read_reset_template(ha); 8725 /* 8726 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8727 * If DONRESET_BIT0 is set, drivers should not set dev_state 8728 * to NEED_RESET. But if NEED_RESET is set, drivers should 8729 * should honor the reset. 8730 */ 8731 if (ql4xdontresethba == 1) 8732 qla4_83xx_set_idc_dontreset(ha); 8733 } 8734 8735 /* 8736 * Initialize the Host adapter request/response queues and 8737 * firmware 8738 * NOTE: interrupts enabled upon successful completion 8739 */ 8740 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8741 8742 /* Dont retry adapter initialization if IRQ allocation failed */ 8743 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8744 goto skip_retry_init; 8745 8746 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8747 init_retry_count++ < MAX_INIT_RETRIES) { 8748 8749 if (is_qla80XX(ha)) { 8750 ha->isp_ops->idc_lock(ha); 8751 dev_state = qla4_8xxx_rd_direct(ha, 8752 QLA8XXX_CRB_DEV_STATE); 8753 ha->isp_ops->idc_unlock(ha); 8754 if (dev_state == QLA8XXX_DEV_FAILED) { 8755 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8756 "initialize adapter. H/W is in failed state\n", 8757 __func__); 8758 break; 8759 } 8760 } 8761 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8762 "(%d)\n", __func__, init_retry_count)); 8763 8764 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8765 continue; 8766 8767 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8768 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8769 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8770 goto skip_retry_init; 8771 } 8772 } 8773 8774 skip_retry_init: 8775 if (!test_bit(AF_ONLINE, &ha->flags)) { 8776 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8777 8778 if ((is_qla8022(ha) && ql4xdontresethba) || 8779 ((is_qla8032(ha) || is_qla8042(ha)) && 8780 qla4_83xx_idc_dontreset(ha))) { 8781 /* Put the device in failed state. */ 8782 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8783 ha->isp_ops->idc_lock(ha); 8784 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8785 QLA8XXX_DEV_FAILED); 8786 ha->isp_ops->idc_unlock(ha); 8787 } 8788 ret = -ENODEV; 8789 goto remove_host; 8790 } 8791 8792 /* Startup the kernel thread for this host adapter. */ 8793 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8794 "qla4xxx_dpc\n", __func__)); 8795 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8796 ha->dpc_thread = create_singlethread_workqueue(buf); 8797 if (!ha->dpc_thread) { 8798 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8799 ret = -ENODEV; 8800 goto remove_host; 8801 } 8802 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8803 8804 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8805 ha->host_no); 8806 if (!ha->task_wq) { 8807 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8808 ret = -ENODEV; 8809 goto remove_host; 8810 } 8811 8812 /* 8813 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8814 * (which is called indirectly by qla4xxx_initialize_adapter), 8815 * so that irqs will be registered after crbinit but before 8816 * mbx_intr_enable. 8817 */ 8818 if (is_qla40XX(ha)) { 8819 ret = qla4xxx_request_irqs(ha); 8820 if (ret) { 8821 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8822 "interrupt %d already in use.\n", pdev->irq); 8823 goto remove_host; 8824 } 8825 } 8826 8827 pci_save_state(ha->pdev); 8828 ha->isp_ops->enable_intrs(ha); 8829 8830 /* Start timer thread. */ 8831 qla4xxx_start_timer(ha, 1); 8832 8833 set_bit(AF_INIT_DONE, &ha->flags); 8834 8835 qla4_8xxx_alloc_sysfs_attr(ha); 8836 8837 printk(KERN_INFO 8838 " QLogic iSCSI HBA Driver version: %s\n" 8839 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8840 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8841 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8842 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8843 8844 /* Set the driver version */ 8845 if (is_qla80XX(ha)) 8846 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8847 8848 if (qla4xxx_setup_boot_info(ha)) 8849 ql4_printk(KERN_ERR, ha, 8850 "%s: No iSCSI boot target configured\n", __func__); 8851 8852 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8853 /* Perform the build ddb list and login to each */ 8854 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8855 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8856 qla4xxx_wait_login_resp_boot_tgt(ha); 8857 8858 qla4xxx_create_chap_list(ha); 8859 8860 qla4xxx_create_ifaces(ha); 8861 return 0; 8862 8863 remove_host: 8864 scsi_remove_host(ha->host); 8865 8866 probe_failed: 8867 qla4xxx_free_adapter(ha); 8868 8869 probe_failed_ioconfig: 8870 pci_disable_pcie_error_reporting(pdev); 8871 scsi_host_put(ha->host); 8872 8873 probe_disable_device: 8874 pci_disable_device(pdev); 8875 8876 return ret; 8877 } 8878 8879 /** 8880 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8881 * @ha: pointer to adapter structure 8882 * 8883 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8884 * so that the other port will not re-initialize while in the process of 8885 * removing the ha due to driver unload or hba hotplug. 8886 **/ 8887 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8888 { 8889 struct scsi_qla_host *other_ha = NULL; 8890 struct pci_dev *other_pdev = NULL; 8891 int fn = ISP4XXX_PCI_FN_2; 8892 8893 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8894 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8895 fn = ISP4XXX_PCI_FN_1; 8896 8897 other_pdev = 8898 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8899 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8900 fn)); 8901 8902 /* Get other_ha if other_pdev is valid and state is enable*/ 8903 if (other_pdev) { 8904 if (atomic_read(&other_pdev->enable_cnt)) { 8905 other_ha = pci_get_drvdata(other_pdev); 8906 if (other_ha) { 8907 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8908 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8909 "Prevent %s reinit\n", __func__, 8910 dev_name(&other_ha->pdev->dev))); 8911 } 8912 } 8913 pci_dev_put(other_pdev); 8914 } 8915 } 8916 8917 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8918 struct ddb_entry *ddb_entry) 8919 { 8920 struct dev_db_entry *fw_ddb_entry = NULL; 8921 dma_addr_t fw_ddb_entry_dma; 8922 unsigned long wtime; 8923 uint32_t ddb_state; 8924 int options; 8925 int status; 8926 8927 options = LOGOUT_OPTION_CLOSE_SESSION; 8928 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8929 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8930 goto clear_ddb; 8931 } 8932 8933 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8934 &fw_ddb_entry_dma, GFP_KERNEL); 8935 if (!fw_ddb_entry) { 8936 ql4_printk(KERN_ERR, ha, 8937 "%s: Unable to allocate dma buffer\n", __func__); 8938 goto clear_ddb; 8939 } 8940 8941 wtime = jiffies + (HZ * LOGOUT_TOV); 8942 do { 8943 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8944 fw_ddb_entry, fw_ddb_entry_dma, 8945 NULL, NULL, &ddb_state, NULL, 8946 NULL, NULL); 8947 if (status == QLA_ERROR) 8948 goto free_ddb; 8949 8950 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8951 (ddb_state == DDB_DS_SESSION_FAILED)) 8952 goto free_ddb; 8953 8954 schedule_timeout_uninterruptible(HZ); 8955 } while ((time_after(wtime, jiffies))); 8956 8957 free_ddb: 8958 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8959 fw_ddb_entry, fw_ddb_entry_dma); 8960 clear_ddb: 8961 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8962 } 8963 8964 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8965 { 8966 struct ddb_entry *ddb_entry; 8967 int idx; 8968 8969 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8970 8971 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8972 if ((ddb_entry != NULL) && 8973 (ddb_entry->ddb_type == FLASH_DDB)) { 8974 8975 qla4xxx_destroy_ddb(ha, ddb_entry); 8976 /* 8977 * we have decremented the reference count of the driver 8978 * when we setup the session to have the driver unload 8979 * to be seamless without actually destroying the 8980 * session 8981 **/ 8982 try_module_get(qla4xxx_iscsi_transport.owner); 8983 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8984 qla4xxx_free_ddb(ha, ddb_entry); 8985 iscsi_session_teardown(ddb_entry->sess); 8986 } 8987 } 8988 } 8989 /** 8990 * qla4xxx_remove_adapter - callback function to remove adapter. 8991 * @pdev: PCI device pointer 8992 **/ 8993 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 8994 { 8995 struct scsi_qla_host *ha; 8996 8997 /* 8998 * If the PCI device is disabled then it means probe_adapter had 8999 * failed and resources already cleaned up on probe_adapter exit. 9000 */ 9001 if (!pci_is_enabled(pdev)) 9002 return; 9003 9004 ha = pci_get_drvdata(pdev); 9005 9006 if (is_qla40XX(ha)) 9007 qla4xxx_prevent_other_port_reinit(ha); 9008 9009 /* destroy iface from sysfs */ 9010 qla4xxx_destroy_ifaces(ha); 9011 9012 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 9013 iscsi_boot_destroy_kset(ha->boot_kset); 9014 9015 qla4xxx_destroy_fw_ddb_session(ha); 9016 qla4_8xxx_free_sysfs_attr(ha); 9017 9018 qla4xxx_sysfs_ddb_remove(ha); 9019 scsi_remove_host(ha->host); 9020 9021 qla4xxx_free_adapter(ha); 9022 9023 scsi_host_put(ha->host); 9024 9025 pci_disable_pcie_error_reporting(pdev); 9026 pci_disable_device(pdev); 9027 } 9028 9029 /** 9030 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9031 * @ha: HA context 9032 */ 9033 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9034 { 9035 /* Update our PCI device dma_mask for full 64 bit mask */ 9036 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { 9037 dev_dbg(&ha->pdev->dev, 9038 "Failed to set 64 bit PCI consistent mask; " 9039 "using 32 bit.\n"); 9040 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32)); 9041 } 9042 } 9043 9044 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9045 { 9046 struct iscsi_cls_session *cls_sess; 9047 struct iscsi_session *sess; 9048 struct ddb_entry *ddb; 9049 int queue_depth = QL4_DEF_QDEPTH; 9050 9051 cls_sess = starget_to_session(sdev->sdev_target); 9052 sess = cls_sess->dd_data; 9053 ddb = sess->dd_data; 9054 9055 sdev->hostdata = ddb; 9056 9057 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9058 queue_depth = ql4xmaxqdepth; 9059 9060 scsi_change_queue_depth(sdev, queue_depth); 9061 return 0; 9062 } 9063 9064 /** 9065 * qla4xxx_del_from_active_array - returns an active srb 9066 * @ha: Pointer to host adapter structure. 9067 * @index: index into the active_array 9068 * 9069 * This routine removes and returns the srb at the specified index 9070 **/ 9071 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9072 uint32_t index) 9073 { 9074 struct srb *srb = NULL; 9075 struct scsi_cmnd *cmd = NULL; 9076 9077 cmd = scsi_host_find_tag(ha->host, index); 9078 if (!cmd) 9079 return srb; 9080 9081 srb = qla4xxx_cmd_priv(cmd)->srb; 9082 if (!srb) 9083 return srb; 9084 9085 /* update counters */ 9086 if (srb->flags & SRB_DMA_VALID) { 9087 ha->iocb_cnt -= srb->iocb_cnt; 9088 if (srb->cmd) 9089 srb->cmd->host_scribble = 9090 (unsigned char *)(unsigned long) MAX_SRBS; 9091 } 9092 return srb; 9093 } 9094 9095 /** 9096 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9097 * @ha: Pointer to host adapter structure. 9098 * @cmd: Scsi Command to wait on. 9099 * 9100 * This routine waits for the command to be returned by the Firmware 9101 * for some max time. 9102 **/ 9103 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9104 struct scsi_cmnd *cmd) 9105 { 9106 int done = 0; 9107 struct srb *rp; 9108 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9109 int ret = SUCCESS; 9110 9111 /* Dont wait on command if PCI error is being handled 9112 * by PCI AER driver 9113 */ 9114 if (unlikely(pci_channel_offline(ha->pdev)) || 9115 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9116 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9117 ha->host_no, __func__); 9118 return ret; 9119 } 9120 9121 do { 9122 /* Checking to see if its returned to OS */ 9123 rp = qla4xxx_cmd_priv(cmd)->srb; 9124 if (rp == NULL) { 9125 done++; 9126 break; 9127 } 9128 9129 msleep(2000); 9130 } while (max_wait_time--); 9131 9132 return done; 9133 } 9134 9135 /** 9136 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9137 * @ha: Pointer to host adapter structure 9138 **/ 9139 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9140 { 9141 unsigned long wait_online; 9142 9143 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9144 while (time_before(jiffies, wait_online)) { 9145 9146 if (adapter_up(ha)) 9147 return QLA_SUCCESS; 9148 9149 msleep(2000); 9150 } 9151 9152 return QLA_ERROR; 9153 } 9154 9155 /** 9156 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9157 * @ha: pointer to HBA 9158 * @stgt: pointer to SCSI target 9159 * @sdev: pointer to SCSI device 9160 * 9161 * This function waits for all outstanding commands to a lun to complete. It 9162 * returns 0 if all pending commands are returned and 1 otherwise. 9163 **/ 9164 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9165 struct scsi_target *stgt, 9166 struct scsi_device *sdev) 9167 { 9168 int cnt; 9169 int status = 0; 9170 struct scsi_cmnd *cmd; 9171 9172 /* 9173 * Waiting for all commands for the designated target or dev 9174 * in the active array 9175 */ 9176 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9177 cmd = scsi_host_find_tag(ha->host, cnt); 9178 if (cmd && stgt == scsi_target(cmd->device) && 9179 (!sdev || sdev == cmd->device)) { 9180 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9181 status++; 9182 break; 9183 } 9184 } 9185 } 9186 return status; 9187 } 9188 9189 /** 9190 * qla4xxx_eh_abort - callback for abort task. 9191 * @cmd: Pointer to Linux's SCSI command structure 9192 * 9193 * This routine is called by the Linux OS to abort the specified 9194 * command. 9195 **/ 9196 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9197 { 9198 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9199 unsigned int id = cmd->device->id; 9200 uint64_t lun = cmd->device->lun; 9201 unsigned long flags; 9202 struct srb *srb = NULL; 9203 int ret = SUCCESS; 9204 int wait = 0; 9205 int rval; 9206 9207 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9208 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9209 9210 rval = qla4xxx_isp_check_reg(ha); 9211 if (rval != QLA_SUCCESS) { 9212 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9213 return FAILED; 9214 } 9215 9216 spin_lock_irqsave(&ha->hardware_lock, flags); 9217 srb = qla4xxx_cmd_priv(cmd)->srb; 9218 if (!srb) { 9219 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9220 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9221 ha->host_no, id, lun); 9222 return SUCCESS; 9223 } 9224 kref_get(&srb->srb_ref); 9225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9226 9227 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9228 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9229 ha->host_no, id, lun)); 9230 ret = FAILED; 9231 } else { 9232 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9233 ha->host_no, id, lun)); 9234 wait = 1; 9235 } 9236 9237 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9238 9239 /* Wait for command to complete */ 9240 if (wait) { 9241 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9242 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9243 ha->host_no, id, lun)); 9244 ret = FAILED; 9245 } 9246 } 9247 9248 ql4_printk(KERN_INFO, ha, 9249 "scsi%ld:%d:%llu: Abort command - %s\n", 9250 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9251 9252 return ret; 9253 } 9254 9255 /** 9256 * qla4xxx_eh_device_reset - callback for target reset. 9257 * @cmd: Pointer to Linux's SCSI command structure 9258 * 9259 * This routine is called by the Linux OS to reset all luns on the 9260 * specified target. 9261 **/ 9262 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9263 { 9264 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9265 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9266 int ret = FAILED, stat; 9267 int rval; 9268 9269 if (!ddb_entry) 9270 return ret; 9271 9272 ret = iscsi_block_scsi_eh(cmd); 9273 if (ret) 9274 return ret; 9275 ret = FAILED; 9276 9277 ql4_printk(KERN_INFO, ha, 9278 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9279 cmd->device->channel, cmd->device->id, cmd->device->lun); 9280 9281 DEBUG2(printk(KERN_INFO 9282 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9283 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9284 cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, 9285 ha->dpc_flags, cmd->result, cmd->allowed)); 9286 9287 rval = qla4xxx_isp_check_reg(ha); 9288 if (rval != QLA_SUCCESS) { 9289 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9290 return FAILED; 9291 } 9292 9293 /* FIXME: wait for hba to go online */ 9294 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9295 if (stat != QLA_SUCCESS) { 9296 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9297 goto eh_dev_reset_done; 9298 } 9299 9300 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9301 cmd->device)) { 9302 ql4_printk(KERN_INFO, ha, 9303 "DEVICE RESET FAILED - waiting for " 9304 "commands.\n"); 9305 goto eh_dev_reset_done; 9306 } 9307 9308 /* Send marker. */ 9309 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9310 MM_LUN_RESET) != QLA_SUCCESS) 9311 goto eh_dev_reset_done; 9312 9313 ql4_printk(KERN_INFO, ha, 9314 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9315 ha->host_no, cmd->device->channel, cmd->device->id, 9316 cmd->device->lun); 9317 9318 ret = SUCCESS; 9319 9320 eh_dev_reset_done: 9321 9322 return ret; 9323 } 9324 9325 /** 9326 * qla4xxx_eh_target_reset - callback for target reset. 9327 * @cmd: Pointer to Linux's SCSI command structure 9328 * 9329 * This routine is called by the Linux OS to reset the target. 9330 **/ 9331 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9332 { 9333 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9334 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9335 int stat, ret; 9336 int rval; 9337 9338 if (!ddb_entry) 9339 return FAILED; 9340 9341 ret = iscsi_block_scsi_eh(cmd); 9342 if (ret) 9343 return ret; 9344 9345 starget_printk(KERN_INFO, scsi_target(cmd->device), 9346 "WARM TARGET RESET ISSUED.\n"); 9347 9348 DEBUG2(printk(KERN_INFO 9349 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9350 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9351 ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, 9352 ha->dpc_flags, cmd->result, cmd->allowed)); 9353 9354 rval = qla4xxx_isp_check_reg(ha); 9355 if (rval != QLA_SUCCESS) { 9356 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9357 return FAILED; 9358 } 9359 9360 stat = qla4xxx_reset_target(ha, ddb_entry); 9361 if (stat != QLA_SUCCESS) { 9362 starget_printk(KERN_INFO, scsi_target(cmd->device), 9363 "WARM TARGET RESET FAILED.\n"); 9364 return FAILED; 9365 } 9366 9367 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9368 NULL)) { 9369 starget_printk(KERN_INFO, scsi_target(cmd->device), 9370 "WARM TARGET DEVICE RESET FAILED - " 9371 "waiting for commands.\n"); 9372 return FAILED; 9373 } 9374 9375 /* Send marker. */ 9376 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9377 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9378 starget_printk(KERN_INFO, scsi_target(cmd->device), 9379 "WARM TARGET DEVICE RESET FAILED - " 9380 "marker iocb failed.\n"); 9381 return FAILED; 9382 } 9383 9384 starget_printk(KERN_INFO, scsi_target(cmd->device), 9385 "WARM TARGET RESET SUCCEEDED.\n"); 9386 return SUCCESS; 9387 } 9388 9389 /** 9390 * qla4xxx_is_eh_active - check if error handler is running 9391 * @shost: Pointer to SCSI Host struct 9392 * 9393 * This routine finds that if reset host is called in EH 9394 * scenario or from some application like sg_reset 9395 **/ 9396 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9397 { 9398 if (shost->shost_state == SHOST_RECOVERY) 9399 return 1; 9400 return 0; 9401 } 9402 9403 /** 9404 * qla4xxx_eh_host_reset - kernel callback 9405 * @cmd: Pointer to Linux's SCSI command structure 9406 * 9407 * This routine is invoked by the Linux kernel to perform fatal error 9408 * recovery on the specified adapter. 9409 **/ 9410 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9411 { 9412 int return_status = FAILED; 9413 struct scsi_qla_host *ha; 9414 int rval; 9415 9416 ha = to_qla_host(cmd->device->host); 9417 9418 rval = qla4xxx_isp_check_reg(ha); 9419 if (rval != QLA_SUCCESS) { 9420 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9421 return FAILED; 9422 } 9423 9424 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9425 qla4_83xx_set_idc_dontreset(ha); 9426 9427 /* 9428 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9429 * protocol drivers, we should not set device_state to NEED_RESET 9430 */ 9431 if (ql4xdontresethba || 9432 ((is_qla8032(ha) || is_qla8042(ha)) && 9433 qla4_83xx_idc_dontreset(ha))) { 9434 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9435 ha->host_no, __func__)); 9436 9437 /* Clear outstanding srb in queues */ 9438 if (qla4xxx_is_eh_active(cmd->device->host)) 9439 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9440 9441 return FAILED; 9442 } 9443 9444 ql4_printk(KERN_INFO, ha, 9445 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9446 cmd->device->channel, cmd->device->id, cmd->device->lun); 9447 9448 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9449 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9450 "DEAD.\n", ha->host_no, cmd->device->channel, 9451 __func__)); 9452 9453 return FAILED; 9454 } 9455 9456 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9457 if (is_qla80XX(ha)) 9458 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9459 else 9460 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9461 } 9462 9463 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9464 return_status = SUCCESS; 9465 9466 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9467 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9468 9469 return return_status; 9470 } 9471 9472 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9473 { 9474 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9475 uint32_t mbox_sts[MBOX_REG_COUNT]; 9476 struct addr_ctrl_blk_def *acb = NULL; 9477 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9478 int rval = QLA_SUCCESS; 9479 dma_addr_t acb_dma; 9480 9481 acb = dma_alloc_coherent(&ha->pdev->dev, 9482 sizeof(struct addr_ctrl_blk_def), 9483 &acb_dma, GFP_KERNEL); 9484 if (!acb) { 9485 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9486 __func__); 9487 rval = -ENOMEM; 9488 goto exit_port_reset; 9489 } 9490 9491 memset(acb, 0, acb_len); 9492 9493 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9494 if (rval != QLA_SUCCESS) { 9495 rval = -EIO; 9496 goto exit_free_acb; 9497 } 9498 9499 rval = qla4xxx_disable_acb(ha); 9500 if (rval != QLA_SUCCESS) { 9501 rval = -EIO; 9502 goto exit_free_acb; 9503 } 9504 9505 wait_for_completion_timeout(&ha->disable_acb_comp, 9506 DISABLE_ACB_TOV * HZ); 9507 9508 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9509 if (rval != QLA_SUCCESS) { 9510 rval = -EIO; 9511 goto exit_free_acb; 9512 } 9513 9514 exit_free_acb: 9515 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9516 acb, acb_dma); 9517 exit_port_reset: 9518 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9519 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9520 return rval; 9521 } 9522 9523 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9524 { 9525 struct scsi_qla_host *ha = to_qla_host(shost); 9526 int rval = QLA_SUCCESS; 9527 uint32_t idc_ctrl; 9528 9529 if (ql4xdontresethba) { 9530 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9531 __func__)); 9532 rval = -EPERM; 9533 goto exit_host_reset; 9534 } 9535 9536 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9537 goto recover_adapter; 9538 9539 switch (reset_type) { 9540 case SCSI_ADAPTER_RESET: 9541 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9542 break; 9543 case SCSI_FIRMWARE_RESET: 9544 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9545 if (is_qla80XX(ha)) 9546 /* set firmware context reset */ 9547 set_bit(DPC_RESET_HA_FW_CONTEXT, 9548 &ha->dpc_flags); 9549 else { 9550 rval = qla4xxx_context_reset(ha); 9551 goto exit_host_reset; 9552 } 9553 } 9554 break; 9555 } 9556 9557 recover_adapter: 9558 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9559 * reset is issued by application */ 9560 if ((is_qla8032(ha) || is_qla8042(ha)) && 9561 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9562 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9563 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9564 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9565 } 9566 9567 rval = qla4xxx_recover_adapter(ha); 9568 if (rval != QLA_SUCCESS) { 9569 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9570 __func__)); 9571 rval = -EIO; 9572 } 9573 9574 exit_host_reset: 9575 return rval; 9576 } 9577 9578 /* PCI AER driver recovers from all correctable errors w/o 9579 * driver intervention. For uncorrectable errors PCI AER 9580 * driver calls the following device driver's callbacks 9581 * 9582 * - Fatal Errors - link_reset 9583 * - Non-Fatal Errors - driver's error_detected() which 9584 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9585 * 9586 * PCI AER driver calls 9587 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() 9588 * returns RECOVERED or NEED_RESET if fw_hung 9589 * NEED_RESET - driver's slot_reset() 9590 * DISCONNECT - device is dead & cannot recover 9591 * RECOVERED - driver's resume() 9592 */ 9593 static pci_ers_result_t 9594 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9595 { 9596 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9597 9598 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9599 ha->host_no, __func__, state); 9600 9601 if (!is_aer_supported(ha)) 9602 return PCI_ERS_RESULT_NONE; 9603 9604 switch (state) { 9605 case pci_channel_io_normal: 9606 clear_bit(AF_EEH_BUSY, &ha->flags); 9607 return PCI_ERS_RESULT_CAN_RECOVER; 9608 case pci_channel_io_frozen: 9609 set_bit(AF_EEH_BUSY, &ha->flags); 9610 qla4xxx_mailbox_premature_completion(ha); 9611 qla4xxx_free_irqs(ha); 9612 pci_disable_device(pdev); 9613 /* Return back all IOs */ 9614 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9615 return PCI_ERS_RESULT_NEED_RESET; 9616 case pci_channel_io_perm_failure: 9617 set_bit(AF_EEH_BUSY, &ha->flags); 9618 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9619 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9620 return PCI_ERS_RESULT_DISCONNECT; 9621 } 9622 return PCI_ERS_RESULT_NEED_RESET; 9623 } 9624 9625 /** 9626 * qla4xxx_pci_mmio_enabled() - gets called if 9627 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9628 * and read/write to the device still works. 9629 * @pdev: PCI device pointer 9630 **/ 9631 static pci_ers_result_t 9632 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9633 { 9634 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9635 9636 if (!is_aer_supported(ha)) 9637 return PCI_ERS_RESULT_NONE; 9638 9639 return PCI_ERS_RESULT_RECOVERED; 9640 } 9641 9642 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9643 { 9644 uint32_t rval = QLA_ERROR; 9645 int fn; 9646 struct pci_dev *other_pdev = NULL; 9647 9648 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9649 9650 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9651 9652 if (test_bit(AF_ONLINE, &ha->flags)) { 9653 clear_bit(AF_ONLINE, &ha->flags); 9654 clear_bit(AF_LINK_UP, &ha->flags); 9655 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9656 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9657 } 9658 9659 fn = PCI_FUNC(ha->pdev->devfn); 9660 if (is_qla8022(ha)) { 9661 while (fn > 0) { 9662 fn--; 9663 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9664 ha->host_no, __func__, fn); 9665 /* Get the pci device given the domain, bus, 9666 * slot/function number */ 9667 other_pdev = pci_get_domain_bus_and_slot( 9668 pci_domain_nr(ha->pdev->bus), 9669 ha->pdev->bus->number, 9670 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9671 fn)); 9672 9673 if (!other_pdev) 9674 continue; 9675 9676 if (atomic_read(&other_pdev->enable_cnt)) { 9677 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9678 ha->host_no, __func__, fn); 9679 pci_dev_put(other_pdev); 9680 break; 9681 } 9682 pci_dev_put(other_pdev); 9683 } 9684 } else { 9685 /* this case is meant for ISP83xx/ISP84xx only */ 9686 if (qla4_83xx_can_perform_reset(ha)) { 9687 /* reset fn as iSCSI is going to perform the reset */ 9688 fn = 0; 9689 } 9690 } 9691 9692 /* The first function on the card, the reset owner will 9693 * start & initialize the firmware. The other functions 9694 * on the card will reset the firmware context 9695 */ 9696 if (!fn) { 9697 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9698 "0x%x is the owner\n", ha->host_no, __func__, 9699 ha->pdev->devfn); 9700 9701 ha->isp_ops->idc_lock(ha); 9702 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9703 QLA8XXX_DEV_COLD); 9704 ha->isp_ops->idc_unlock(ha); 9705 9706 rval = qla4_8xxx_update_idc_reg(ha); 9707 if (rval == QLA_ERROR) { 9708 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9709 ha->host_no, __func__); 9710 ha->isp_ops->idc_lock(ha); 9711 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9712 QLA8XXX_DEV_FAILED); 9713 ha->isp_ops->idc_unlock(ha); 9714 goto exit_error_recovery; 9715 } 9716 9717 clear_bit(AF_FW_RECOVERY, &ha->flags); 9718 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9719 9720 if (rval != QLA_SUCCESS) { 9721 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9722 "FAILED\n", ha->host_no, __func__); 9723 qla4xxx_free_irqs(ha); 9724 ha->isp_ops->idc_lock(ha); 9725 qla4_8xxx_clear_drv_active(ha); 9726 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9727 QLA8XXX_DEV_FAILED); 9728 ha->isp_ops->idc_unlock(ha); 9729 } else { 9730 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9731 "READY\n", ha->host_no, __func__); 9732 ha->isp_ops->idc_lock(ha); 9733 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9734 QLA8XXX_DEV_READY); 9735 /* Clear driver state register */ 9736 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9737 qla4_8xxx_set_drv_active(ha); 9738 ha->isp_ops->idc_unlock(ha); 9739 ha->isp_ops->enable_intrs(ha); 9740 } 9741 } else { 9742 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9743 "the reset owner\n", ha->host_no, __func__, 9744 ha->pdev->devfn); 9745 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9746 QLA8XXX_DEV_READY)) { 9747 clear_bit(AF_FW_RECOVERY, &ha->flags); 9748 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9749 if (rval == QLA_SUCCESS) 9750 ha->isp_ops->enable_intrs(ha); 9751 else 9752 qla4xxx_free_irqs(ha); 9753 9754 ha->isp_ops->idc_lock(ha); 9755 qla4_8xxx_set_drv_active(ha); 9756 ha->isp_ops->idc_unlock(ha); 9757 } 9758 } 9759 exit_error_recovery: 9760 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9761 return rval; 9762 } 9763 9764 static pci_ers_result_t 9765 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9766 { 9767 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9768 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9769 int rc; 9770 9771 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9772 ha->host_no, __func__); 9773 9774 if (!is_aer_supported(ha)) 9775 return PCI_ERS_RESULT_NONE; 9776 9777 /* Restore the saved state of PCIe device - 9778 * BAR registers, PCI Config space, PCIX, MSI, 9779 * IOV states 9780 */ 9781 pci_restore_state(pdev); 9782 9783 /* pci_restore_state() clears the saved_state flag of the device 9784 * save restored state which resets saved_state flag 9785 */ 9786 pci_save_state(pdev); 9787 9788 /* Initialize device or resume if in suspended state */ 9789 rc = pci_enable_device(pdev); 9790 if (rc) { 9791 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9792 "device after reset\n", ha->host_no, __func__); 9793 goto exit_slot_reset; 9794 } 9795 9796 ha->isp_ops->disable_intrs(ha); 9797 9798 if (is_qla80XX(ha)) { 9799 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9800 ret = PCI_ERS_RESULT_RECOVERED; 9801 goto exit_slot_reset; 9802 } else 9803 goto exit_slot_reset; 9804 } 9805 9806 exit_slot_reset: 9807 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9808 "device after reset\n", ha->host_no, __func__, ret); 9809 return ret; 9810 } 9811 9812 static void 9813 qla4xxx_pci_resume(struct pci_dev *pdev) 9814 { 9815 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9816 int ret; 9817 9818 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9819 ha->host_no, __func__); 9820 9821 ret = qla4xxx_wait_for_hba_online(ha); 9822 if (ret != QLA_SUCCESS) { 9823 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9824 "resume I/O from slot/link_reset\n", ha->host_no, 9825 __func__); 9826 } 9827 9828 clear_bit(AF_EEH_BUSY, &ha->flags); 9829 } 9830 9831 static const struct pci_error_handlers qla4xxx_err_handler = { 9832 .error_detected = qla4xxx_pci_error_detected, 9833 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9834 .slot_reset = qla4xxx_pci_slot_reset, 9835 .resume = qla4xxx_pci_resume, 9836 }; 9837 9838 static struct pci_device_id qla4xxx_pci_tbl[] = { 9839 { 9840 .vendor = PCI_VENDOR_ID_QLOGIC, 9841 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9842 .subvendor = PCI_ANY_ID, 9843 .subdevice = PCI_ANY_ID, 9844 }, 9845 { 9846 .vendor = PCI_VENDOR_ID_QLOGIC, 9847 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9848 .subvendor = PCI_ANY_ID, 9849 .subdevice = PCI_ANY_ID, 9850 }, 9851 { 9852 .vendor = PCI_VENDOR_ID_QLOGIC, 9853 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9854 .subvendor = PCI_ANY_ID, 9855 .subdevice = PCI_ANY_ID, 9856 }, 9857 { 9858 .vendor = PCI_VENDOR_ID_QLOGIC, 9859 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9860 .subvendor = PCI_ANY_ID, 9861 .subdevice = PCI_ANY_ID, 9862 }, 9863 { 9864 .vendor = PCI_VENDOR_ID_QLOGIC, 9865 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9866 .subvendor = PCI_ANY_ID, 9867 .subdevice = PCI_ANY_ID, 9868 }, 9869 { 9870 .vendor = PCI_VENDOR_ID_QLOGIC, 9871 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9872 .subvendor = PCI_ANY_ID, 9873 .subdevice = PCI_ANY_ID, 9874 }, 9875 {0, 0}, 9876 }; 9877 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9878 9879 static struct pci_driver qla4xxx_pci_driver = { 9880 .name = DRIVER_NAME, 9881 .id_table = qla4xxx_pci_tbl, 9882 .probe = qla4xxx_probe_adapter, 9883 .remove = qla4xxx_remove_adapter, 9884 .err_handler = &qla4xxx_err_handler, 9885 }; 9886 9887 static int __init qla4xxx_module_init(void) 9888 { 9889 int ret; 9890 9891 if (ql4xqfulltracking) 9892 qla4xxx_driver_template.track_queue_depth = 1; 9893 9894 /* Allocate cache for SRBs. */ 9895 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9896 SLAB_HWCACHE_ALIGN, NULL); 9897 if (srb_cachep == NULL) { 9898 printk(KERN_ERR 9899 "%s: Unable to allocate SRB cache..." 9900 "Failing load!\n", DRIVER_NAME); 9901 ret = -ENOMEM; 9902 goto no_srp_cache; 9903 } 9904 9905 /* Derive version string. */ 9906 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9907 if (ql4xextended_error_logging) 9908 strcat(qla4xxx_version_str, "-debug"); 9909 9910 qla4xxx_scsi_transport = 9911 iscsi_register_transport(&qla4xxx_iscsi_transport); 9912 if (!qla4xxx_scsi_transport){ 9913 ret = -ENODEV; 9914 goto release_srb_cache; 9915 } 9916 9917 ret = pci_register_driver(&qla4xxx_pci_driver); 9918 if (ret) 9919 goto unregister_transport; 9920 9921 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9922 return 0; 9923 9924 unregister_transport: 9925 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9926 release_srb_cache: 9927 kmem_cache_destroy(srb_cachep); 9928 no_srp_cache: 9929 return ret; 9930 } 9931 9932 static void __exit qla4xxx_module_exit(void) 9933 { 9934 pci_unregister_driver(&qla4xxx_pci_driver); 9935 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9936 kmem_cache_destroy(srb_cachep); 9937 } 9938 9939 module_init(qla4xxx_module_init); 9940 module_exit(qla4xxx_module_exit); 9941 9942 MODULE_AUTHOR("QLogic Corporation"); 9943 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9944 MODULE_LICENSE("GPL"); 9945 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9946