1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic iSCSI HBA Driver 4 * Copyright (c) 2003-2013 QLogic Corporation 5 */ 6 #include <linux/moduleparam.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/iscsi_boot_sysfs.h> 10 #include <linux/inet.h> 11 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsicam.h> 14 15 #include "ql4_def.h" 16 #include "ql4_version.h" 17 #include "ql4_glbl.h" 18 #include "ql4_dbg.h" 19 #include "ql4_inline.h" 20 #include "ql4_83xx.h" 21 22 /* 23 * Driver version 24 */ 25 static char qla4xxx_version_str[40]; 26 27 /* 28 * SRB allocation cache 29 */ 30 static struct kmem_cache *srb_cachep; 31 32 /* 33 * Module parameter information and variables 34 */ 35 static int ql4xdisablesysfsboot = 1; 36 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 37 MODULE_PARM_DESC(ql4xdisablesysfsboot, 38 " Set to disable exporting boot targets to sysfs.\n" 39 "\t\t 0 - Export boot targets\n" 40 "\t\t 1 - Do not export boot targets (Default)"); 41 42 int ql4xdontresethba; 43 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 44 MODULE_PARM_DESC(ql4xdontresethba, 45 " Don't reset the HBA for driver recovery.\n" 46 "\t\t 0 - It will reset HBA (Default)\n" 47 "\t\t 1 - It will NOT reset HBA"); 48 49 int ql4xextended_error_logging; 50 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 51 MODULE_PARM_DESC(ql4xextended_error_logging, 52 " Option to enable extended error logging.\n" 53 "\t\t 0 - no logging (Default)\n" 54 "\t\t 2 - debug logging"); 55 56 int ql4xenablemsix = 1; 57 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 58 MODULE_PARM_DESC(ql4xenablemsix, 59 " Set to enable MSI or MSI-X interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 62 "\t\t 2 = enable MSI interrupt mechanism."); 63 64 #define QL4_DEF_QDEPTH 32 65 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 66 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 67 MODULE_PARM_DESC(ql4xmaxqdepth, 68 " Maximum queue depth to report for target devices.\n" 69 "\t\t Default: 32."); 70 71 static int ql4xqfulltracking = 1; 72 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 73 MODULE_PARM_DESC(ql4xqfulltracking, 74 " Enable or disable dynamic tracking and adjustment of\n" 75 "\t\t scsi device queue depth.\n" 76 "\t\t 0 - Disable.\n" 77 "\t\t 1 - Enable. (Default)"); 78 79 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 80 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 81 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 82 " Target Session Recovery Timeout.\n" 83 "\t\t Default: 120 sec."); 84 85 int ql4xmdcapmask = 0; 86 module_param(ql4xmdcapmask, int, S_IRUGO); 87 MODULE_PARM_DESC(ql4xmdcapmask, 88 " Set the Minidump driver capture mask level.\n" 89 "\t\t Default is 0 (firmware default capture mask)\n" 90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 91 92 int ql4xenablemd = 1; 93 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 94 MODULE_PARM_DESC(ql4xenablemd, 95 " Set to enable minidump.\n" 96 "\t\t 0 - disable minidump\n" 97 "\t\t 1 - enable minidump (Default)"); 98 99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 100 /* 101 * SCSI host template entry points 102 */ 103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 104 105 /* 106 * iSCSI template entry points 107 */ 108 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 109 enum iscsi_param param, char *buf); 110 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 111 enum iscsi_param param, char *buf); 112 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 113 enum iscsi_host_param param, char *buf); 114 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 115 uint32_t len); 116 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 117 enum iscsi_param_type param_type, 118 int param, char *buf); 119 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 120 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 121 struct sockaddr *dst_addr, 122 int non_blocking); 123 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 124 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 125 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 126 enum iscsi_param param, char *buf); 127 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 128 static struct iscsi_cls_conn * 129 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 130 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 131 struct iscsi_cls_conn *cls_conn, 132 uint64_t transport_fd, int is_leading); 133 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 134 static struct iscsi_cls_session * 135 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 136 uint16_t qdepth, uint32_t initial_cmdsn); 137 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 138 static void qla4xxx_task_work(struct work_struct *wdata); 139 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 140 static int qla4xxx_task_xmit(struct iscsi_task *); 141 static void qla4xxx_task_cleanup(struct iscsi_task *); 142 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 143 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 144 struct iscsi_stats *stats); 145 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 146 uint32_t iface_type, uint32_t payload_size, 147 uint32_t pid, struct sockaddr *dst_addr); 148 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 149 uint32_t *num_entries, char *buf); 150 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 151 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 152 int len); 153 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 154 155 /* 156 * SCSI host template entry points 157 */ 158 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 159 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_slave_alloc(struct scsi_device *device); 164 static umode_t qla4_attr_is_visible(int param_type, int param); 165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 166 167 /* 168 * iSCSI Flash DDB sysfs entry points 169 */ 170 static int 171 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 172 struct iscsi_bus_flash_conn *fnode_conn, 173 void *data, int len); 174 static int 175 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 176 int param, char *buf); 177 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 178 int len); 179 static int 180 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 181 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 182 struct iscsi_bus_flash_conn *fnode_conn); 183 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 184 struct iscsi_bus_flash_conn *fnode_conn); 185 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 186 187 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 188 QLA82XX_LEGACY_INTR_CONFIG; 189 190 static const uint32_t qla4_82xx_reg_tbl[] = { 191 QLA82XX_PEG_HALT_STATUS1, 192 QLA82XX_PEG_HALT_STATUS2, 193 QLA82XX_PEG_ALIVE_COUNTER, 194 QLA82XX_CRB_DRV_ACTIVE, 195 QLA82XX_CRB_DEV_STATE, 196 QLA82XX_CRB_DRV_STATE, 197 QLA82XX_CRB_DRV_SCRATCH, 198 QLA82XX_CRB_DEV_PART_INFO, 199 QLA82XX_CRB_DRV_IDC_VERSION, 200 QLA82XX_FW_VERSION_MAJOR, 201 QLA82XX_FW_VERSION_MINOR, 202 QLA82XX_FW_VERSION_SUB, 203 CRB_CMDPEG_STATE, 204 CRB_TEMP_STATE, 205 }; 206 207 static const uint32_t qla4_83xx_reg_tbl[] = { 208 QLA83XX_PEG_HALT_STATUS1, 209 QLA83XX_PEG_HALT_STATUS2, 210 QLA83XX_PEG_ALIVE_COUNTER, 211 QLA83XX_CRB_DRV_ACTIVE, 212 QLA83XX_CRB_DEV_STATE, 213 QLA83XX_CRB_DRV_STATE, 214 QLA83XX_CRB_DRV_SCRATCH, 215 QLA83XX_CRB_DEV_PART_INFO1, 216 QLA83XX_CRB_IDC_VER_MAJOR, 217 QLA83XX_FW_VER_MAJOR, 218 QLA83XX_FW_VER_MINOR, 219 QLA83XX_FW_VER_SUB, 220 QLA83XX_CMDPEG_STATE, 221 QLA83XX_ASIC_TEMP, 222 }; 223 224 static struct scsi_host_template qla4xxx_driver_template = { 225 .module = THIS_MODULE, 226 .name = DRIVER_NAME, 227 .proc_name = DRIVER_NAME, 228 .queuecommand = qla4xxx_queuecommand, 229 230 .eh_abort_handler = qla4xxx_eh_abort, 231 .eh_device_reset_handler = qla4xxx_eh_device_reset, 232 .eh_target_reset_handler = qla4xxx_eh_target_reset, 233 .eh_host_reset_handler = qla4xxx_eh_host_reset, 234 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 235 236 .slave_alloc = qla4xxx_slave_alloc, 237 .change_queue_depth = scsi_change_queue_depth, 238 239 .this_id = -1, 240 .cmd_per_lun = 3, 241 .sg_tablesize = SG_ALL, 242 243 .max_sectors = 0xFFFF, 244 .shost_attrs = qla4xxx_host_attrs, 245 .host_reset = qla4xxx_host_reset, 246 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 247 }; 248 249 static struct iscsi_transport qla4xxx_iscsi_transport = { 250 .owner = THIS_MODULE, 251 .name = DRIVER_NAME, 252 .caps = CAP_TEXT_NEGO | 253 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 254 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 255 CAP_MULTI_R2T, 256 .attr_is_visible = qla4_attr_is_visible, 257 .create_session = qla4xxx_session_create, 258 .destroy_session = qla4xxx_session_destroy, 259 .start_conn = qla4xxx_conn_start, 260 .create_conn = qla4xxx_conn_create, 261 .bind_conn = qla4xxx_conn_bind, 262 .stop_conn = iscsi_conn_stop, 263 .destroy_conn = qla4xxx_conn_destroy, 264 .set_param = iscsi_set_param, 265 .get_conn_param = qla4xxx_conn_get_param, 266 .get_session_param = qla4xxx_session_get_param, 267 .get_ep_param = qla4xxx_get_ep_param, 268 .ep_connect = qla4xxx_ep_connect, 269 .ep_poll = qla4xxx_ep_poll, 270 .ep_disconnect = qla4xxx_ep_disconnect, 271 .get_stats = qla4xxx_conn_get_stats, 272 .send_pdu = iscsi_conn_send_pdu, 273 .xmit_task = qla4xxx_task_xmit, 274 .cleanup_task = qla4xxx_task_cleanup, 275 .alloc_pdu = qla4xxx_alloc_pdu, 276 277 .get_host_param = qla4xxx_host_get_param, 278 .set_iface_param = qla4xxx_iface_set_param, 279 .get_iface_param = qla4xxx_get_iface_param, 280 .bsg_request = qla4xxx_bsg_request, 281 .send_ping = qla4xxx_send_ping, 282 .get_chap = qla4xxx_get_chap_list, 283 .delete_chap = qla4xxx_delete_chap, 284 .set_chap = qla4xxx_set_chap_entry, 285 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 286 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 287 .new_flashnode = qla4xxx_sysfs_ddb_add, 288 .del_flashnode = qla4xxx_sysfs_ddb_delete, 289 .login_flashnode = qla4xxx_sysfs_ddb_login, 290 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 291 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 292 .get_host_stats = qla4xxx_get_host_stats, 293 }; 294 295 static struct scsi_transport_template *qla4xxx_scsi_transport; 296 297 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) 298 { 299 u32 reg_val = 0; 300 int rval = QLA_SUCCESS; 301 302 if (is_qla8022(ha)) 303 reg_val = readl(&ha->qla4_82xx_reg->host_status); 304 else if (is_qla8032(ha) || is_qla8042(ha)) 305 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); 306 else 307 reg_val = readw(&ha->reg->ctrl_status); 308 309 if (reg_val == QL4_ISP_REG_DISCONNECT) 310 rval = QLA_ERROR; 311 312 return rval; 313 } 314 315 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 316 uint32_t iface_type, uint32_t payload_size, 317 uint32_t pid, struct sockaddr *dst_addr) 318 { 319 struct scsi_qla_host *ha = to_qla_host(shost); 320 struct sockaddr_in *addr; 321 struct sockaddr_in6 *addr6; 322 uint32_t options = 0; 323 uint8_t ipaddr[IPv6_ADDR_LEN]; 324 int rval; 325 326 memset(ipaddr, 0, IPv6_ADDR_LEN); 327 /* IPv4 to IPv4 */ 328 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 329 (dst_addr->sa_family == AF_INET)) { 330 addr = (struct sockaddr_in *)dst_addr; 331 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 332 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 333 "dest: %pI4\n", __func__, 334 &ha->ip_config.ip_address, ipaddr)); 335 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 336 ipaddr); 337 if (rval) 338 rval = -EINVAL; 339 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 340 (dst_addr->sa_family == AF_INET6)) { 341 /* IPv6 to IPv6 */ 342 addr6 = (struct sockaddr_in6 *)dst_addr; 343 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 344 345 options |= PING_IPV6_PROTOCOL_ENABLE; 346 347 /* Ping using LinkLocal address */ 348 if ((iface_num == 0) || (iface_num == 1)) { 349 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 350 "src: %pI6 dest: %pI6\n", __func__, 351 &ha->ip_config.ipv6_link_local_addr, 352 ipaddr)); 353 options |= PING_IPV6_LINKLOCAL_ADDR; 354 rval = qla4xxx_ping_iocb(ha, options, payload_size, 355 pid, ipaddr); 356 } else { 357 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 358 "not supported\n", __func__, iface_num); 359 rval = -ENOSYS; 360 goto exit_send_ping; 361 } 362 363 /* 364 * If ping using LinkLocal address fails, try ping using 365 * IPv6 address 366 */ 367 if (rval != QLA_SUCCESS) { 368 options &= ~PING_IPV6_LINKLOCAL_ADDR; 369 if (iface_num == 0) { 370 options |= PING_IPV6_ADDR0; 371 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 372 "Ping src: %pI6 " 373 "dest: %pI6\n", __func__, 374 &ha->ip_config.ipv6_addr0, 375 ipaddr)); 376 } else if (iface_num == 1) { 377 options |= PING_IPV6_ADDR1; 378 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 379 "Ping src: %pI6 " 380 "dest: %pI6\n", __func__, 381 &ha->ip_config.ipv6_addr1, 382 ipaddr)); 383 } 384 rval = qla4xxx_ping_iocb(ha, options, payload_size, 385 pid, ipaddr); 386 if (rval) 387 rval = -EINVAL; 388 } 389 } else 390 rval = -ENOSYS; 391 exit_send_ping: 392 return rval; 393 } 394 395 static umode_t qla4_attr_is_visible(int param_type, int param) 396 { 397 switch (param_type) { 398 case ISCSI_HOST_PARAM: 399 switch (param) { 400 case ISCSI_HOST_PARAM_HWADDRESS: 401 case ISCSI_HOST_PARAM_IPADDRESS: 402 case ISCSI_HOST_PARAM_INITIATOR_NAME: 403 case ISCSI_HOST_PARAM_PORT_STATE: 404 case ISCSI_HOST_PARAM_PORT_SPEED: 405 return S_IRUGO; 406 default: 407 return 0; 408 } 409 case ISCSI_PARAM: 410 switch (param) { 411 case ISCSI_PARAM_PERSISTENT_ADDRESS: 412 case ISCSI_PARAM_PERSISTENT_PORT: 413 case ISCSI_PARAM_CONN_ADDRESS: 414 case ISCSI_PARAM_CONN_PORT: 415 case ISCSI_PARAM_TARGET_NAME: 416 case ISCSI_PARAM_TPGT: 417 case ISCSI_PARAM_TARGET_ALIAS: 418 case ISCSI_PARAM_MAX_BURST: 419 case ISCSI_PARAM_MAX_R2T: 420 case ISCSI_PARAM_FIRST_BURST: 421 case ISCSI_PARAM_MAX_RECV_DLENGTH: 422 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 423 case ISCSI_PARAM_IFACE_NAME: 424 case ISCSI_PARAM_CHAP_OUT_IDX: 425 case ISCSI_PARAM_CHAP_IN_IDX: 426 case ISCSI_PARAM_USERNAME: 427 case ISCSI_PARAM_PASSWORD: 428 case ISCSI_PARAM_USERNAME_IN: 429 case ISCSI_PARAM_PASSWORD_IN: 430 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 431 case ISCSI_PARAM_DISCOVERY_SESS: 432 case ISCSI_PARAM_PORTAL_TYPE: 433 case ISCSI_PARAM_CHAP_AUTH_EN: 434 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 435 case ISCSI_PARAM_BIDI_CHAP_EN: 436 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 437 case ISCSI_PARAM_DEF_TIME2WAIT: 438 case ISCSI_PARAM_DEF_TIME2RETAIN: 439 case ISCSI_PARAM_HDRDGST_EN: 440 case ISCSI_PARAM_DATADGST_EN: 441 case ISCSI_PARAM_INITIAL_R2T_EN: 442 case ISCSI_PARAM_IMM_DATA_EN: 443 case ISCSI_PARAM_PDU_INORDER_EN: 444 case ISCSI_PARAM_DATASEQ_INORDER_EN: 445 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 446 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 447 case ISCSI_PARAM_TCP_WSF_DISABLE: 448 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 449 case ISCSI_PARAM_TCP_TIMER_SCALE: 450 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 451 case ISCSI_PARAM_TCP_XMIT_WSF: 452 case ISCSI_PARAM_TCP_RECV_WSF: 453 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 454 case ISCSI_PARAM_IPV4_TOS: 455 case ISCSI_PARAM_IPV6_TC: 456 case ISCSI_PARAM_IPV6_FLOW_LABEL: 457 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 458 case ISCSI_PARAM_KEEPALIVE_TMO: 459 case ISCSI_PARAM_LOCAL_PORT: 460 case ISCSI_PARAM_ISID: 461 case ISCSI_PARAM_TSID: 462 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 463 case ISCSI_PARAM_ERL: 464 case ISCSI_PARAM_STATSN: 465 case ISCSI_PARAM_EXP_STATSN: 466 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 467 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 468 case ISCSI_PARAM_LOCAL_IPADDR: 469 return S_IRUGO; 470 default: 471 return 0; 472 } 473 case ISCSI_NET_PARAM: 474 switch (param) { 475 case ISCSI_NET_PARAM_IPV4_ADDR: 476 case ISCSI_NET_PARAM_IPV4_SUBNET: 477 case ISCSI_NET_PARAM_IPV4_GW: 478 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 479 case ISCSI_NET_PARAM_IFACE_ENABLE: 480 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 481 case ISCSI_NET_PARAM_IPV6_ADDR: 482 case ISCSI_NET_PARAM_IPV6_ROUTER: 483 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 484 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 485 case ISCSI_NET_PARAM_VLAN_ID: 486 case ISCSI_NET_PARAM_VLAN_PRIORITY: 487 case ISCSI_NET_PARAM_VLAN_ENABLED: 488 case ISCSI_NET_PARAM_MTU: 489 case ISCSI_NET_PARAM_PORT: 490 case ISCSI_NET_PARAM_IPADDR_STATE: 491 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 492 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 493 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 494 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 495 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 496 case ISCSI_NET_PARAM_TCP_WSF: 497 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 498 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 499 case ISCSI_NET_PARAM_CACHE_ID: 500 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 501 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 502 case ISCSI_NET_PARAM_IPV4_TOS_EN: 503 case ISCSI_NET_PARAM_IPV4_TOS: 504 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 505 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 506 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 507 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 508 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 509 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 510 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 511 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 512 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 513 case ISCSI_NET_PARAM_REDIRECT_EN: 514 case ISCSI_NET_PARAM_IPV4_TTL: 515 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 516 case ISCSI_NET_PARAM_IPV6_MLD_EN: 517 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 518 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 519 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 520 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 521 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 522 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 523 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 524 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 525 return S_IRUGO; 526 default: 527 return 0; 528 } 529 case ISCSI_IFACE_PARAM: 530 switch (param) { 531 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 532 case ISCSI_IFACE_PARAM_HDRDGST_EN: 533 case ISCSI_IFACE_PARAM_DATADGST_EN: 534 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 535 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 536 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 537 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 538 case ISCSI_IFACE_PARAM_ERL: 539 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 540 case ISCSI_IFACE_PARAM_FIRST_BURST: 541 case ISCSI_IFACE_PARAM_MAX_R2T: 542 case ISCSI_IFACE_PARAM_MAX_BURST: 543 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 544 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 545 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 546 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 547 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 548 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 549 return S_IRUGO; 550 default: 551 return 0; 552 } 553 case ISCSI_FLASHNODE_PARAM: 554 switch (param) { 555 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 556 case ISCSI_FLASHNODE_PORTAL_TYPE: 557 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 558 case ISCSI_FLASHNODE_DISCOVERY_SESS: 559 case ISCSI_FLASHNODE_ENTRY_EN: 560 case ISCSI_FLASHNODE_HDR_DGST_EN: 561 case ISCSI_FLASHNODE_DATA_DGST_EN: 562 case ISCSI_FLASHNODE_IMM_DATA_EN: 563 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 564 case ISCSI_FLASHNODE_DATASEQ_INORDER: 565 case ISCSI_FLASHNODE_PDU_INORDER: 566 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 567 case ISCSI_FLASHNODE_SNACK_REQ_EN: 568 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 569 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 570 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 571 case ISCSI_FLASHNODE_ERL: 572 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 573 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 574 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 575 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 576 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 577 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 578 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 579 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 580 case ISCSI_FLASHNODE_FIRST_BURST: 581 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 582 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 583 case ISCSI_FLASHNODE_MAX_R2T: 584 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 585 case ISCSI_FLASHNODE_ISID: 586 case ISCSI_FLASHNODE_TSID: 587 case ISCSI_FLASHNODE_PORT: 588 case ISCSI_FLASHNODE_MAX_BURST: 589 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 590 case ISCSI_FLASHNODE_IPADDR: 591 case ISCSI_FLASHNODE_ALIAS: 592 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 593 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 594 case ISCSI_FLASHNODE_LOCAL_PORT: 595 case ISCSI_FLASHNODE_IPV4_TOS: 596 case ISCSI_FLASHNODE_IPV6_TC: 597 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 598 case ISCSI_FLASHNODE_NAME: 599 case ISCSI_FLASHNODE_TPGT: 600 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 601 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 602 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 603 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 604 case ISCSI_FLASHNODE_TCP_RECV_WSF: 605 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 606 case ISCSI_FLASHNODE_USERNAME: 607 case ISCSI_FLASHNODE_PASSWORD: 608 case ISCSI_FLASHNODE_STATSN: 609 case ISCSI_FLASHNODE_EXP_STATSN: 610 case ISCSI_FLASHNODE_IS_BOOT_TGT: 611 return S_IRUGO; 612 default: 613 return 0; 614 } 615 } 616 617 return 0; 618 } 619 620 /** 621 * qla4xxx_create chap_list - Create CHAP list from FLASH 622 * @ha: pointer to adapter structure 623 * 624 * Read flash and make a list of CHAP entries, during login when a CHAP entry 625 * is received, it will be checked in this list. If entry exist then the CHAP 626 * entry index is set in the DDB. If CHAP entry does not exist in this list 627 * then a new entry is added in FLASH in CHAP table and the index obtained is 628 * used in the DDB. 629 **/ 630 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 631 { 632 int rval = 0; 633 uint8_t *chap_flash_data = NULL; 634 uint32_t offset; 635 dma_addr_t chap_dma; 636 uint32_t chap_size = 0; 637 638 if (is_qla40XX(ha)) 639 chap_size = MAX_CHAP_ENTRIES_40XX * 640 sizeof(struct ql4_chap_table); 641 else /* Single region contains CHAP info for both 642 * ports which is divided into half for each port. 643 */ 644 chap_size = ha->hw.flt_chap_size / 2; 645 646 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 647 &chap_dma, GFP_KERNEL); 648 if (!chap_flash_data) { 649 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 650 return; 651 } 652 653 if (is_qla40XX(ha)) { 654 offset = FLASH_CHAP_OFFSET; 655 } else { 656 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 657 if (ha->port_num == 1) 658 offset += chap_size; 659 } 660 661 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 662 if (rval != QLA_SUCCESS) 663 goto exit_chap_list; 664 665 if (ha->chap_list == NULL) 666 ha->chap_list = vmalloc(chap_size); 667 if (ha->chap_list == NULL) { 668 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 669 goto exit_chap_list; 670 } 671 672 memset(ha->chap_list, 0, chap_size); 673 memcpy(ha->chap_list, chap_flash_data, chap_size); 674 675 exit_chap_list: 676 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 677 } 678 679 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 680 int16_t chap_index, 681 struct ql4_chap_table **chap_entry) 682 { 683 int rval = QLA_ERROR; 684 int max_chap_entries; 685 686 if (!ha->chap_list) { 687 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 688 rval = QLA_ERROR; 689 goto exit_get_chap; 690 } 691 692 if (is_qla80XX(ha)) 693 max_chap_entries = (ha->hw.flt_chap_size / 2) / 694 sizeof(struct ql4_chap_table); 695 else 696 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 697 698 if (chap_index > max_chap_entries) { 699 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 700 rval = QLA_ERROR; 701 goto exit_get_chap; 702 } 703 704 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 705 if ((*chap_entry)->cookie != 706 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 707 rval = QLA_ERROR; 708 *chap_entry = NULL; 709 } else { 710 rval = QLA_SUCCESS; 711 } 712 713 exit_get_chap: 714 return rval; 715 } 716 717 /** 718 * qla4xxx_find_free_chap_index - Find the first free chap index 719 * @ha: pointer to adapter structure 720 * @chap_index: CHAP index to be returned 721 * 722 * Find the first free chap index available in the chap table 723 * 724 * Note: Caller should acquire the chap lock before getting here. 725 **/ 726 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 727 uint16_t *chap_index) 728 { 729 int i, rval; 730 int free_index = -1; 731 int max_chap_entries = 0; 732 struct ql4_chap_table *chap_table; 733 734 if (is_qla80XX(ha)) 735 max_chap_entries = (ha->hw.flt_chap_size / 2) / 736 sizeof(struct ql4_chap_table); 737 else 738 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 739 740 if (!ha->chap_list) { 741 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 742 rval = QLA_ERROR; 743 goto exit_find_chap; 744 } 745 746 for (i = 0; i < max_chap_entries; i++) { 747 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 748 749 if ((chap_table->cookie != 750 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 751 (i > MAX_RESRV_CHAP_IDX)) { 752 free_index = i; 753 break; 754 } 755 } 756 757 if (free_index != -1) { 758 *chap_index = free_index; 759 rval = QLA_SUCCESS; 760 } else { 761 rval = QLA_ERROR; 762 } 763 764 exit_find_chap: 765 return rval; 766 } 767 768 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 769 uint32_t *num_entries, char *buf) 770 { 771 struct scsi_qla_host *ha = to_qla_host(shost); 772 struct ql4_chap_table *chap_table; 773 struct iscsi_chap_rec *chap_rec; 774 int max_chap_entries = 0; 775 int valid_chap_entries = 0; 776 int ret = 0, i; 777 778 if (is_qla80XX(ha)) 779 max_chap_entries = (ha->hw.flt_chap_size / 2) / 780 sizeof(struct ql4_chap_table); 781 else 782 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 783 784 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 785 __func__, *num_entries, chap_tbl_idx); 786 787 if (!buf) { 788 ret = -ENOMEM; 789 goto exit_get_chap_list; 790 } 791 792 qla4xxx_create_chap_list(ha); 793 794 chap_rec = (struct iscsi_chap_rec *) buf; 795 mutex_lock(&ha->chap_sem); 796 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 797 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 798 if (chap_table->cookie != 799 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 800 continue; 801 802 chap_rec->chap_tbl_idx = i; 803 strlcpy(chap_rec->username, chap_table->name, 804 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 805 strlcpy(chap_rec->password, chap_table->secret, 806 QL4_CHAP_MAX_SECRET_LEN); 807 chap_rec->password_length = chap_table->secret_len; 808 809 if (chap_table->flags & BIT_7) /* local */ 810 chap_rec->chap_type = CHAP_TYPE_OUT; 811 812 if (chap_table->flags & BIT_6) /* peer */ 813 chap_rec->chap_type = CHAP_TYPE_IN; 814 815 chap_rec++; 816 817 valid_chap_entries++; 818 if (valid_chap_entries == *num_entries) 819 break; 820 else 821 continue; 822 } 823 mutex_unlock(&ha->chap_sem); 824 825 exit_get_chap_list: 826 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 827 __func__, valid_chap_entries); 828 *num_entries = valid_chap_entries; 829 return ret; 830 } 831 832 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 833 { 834 int ret = 0; 835 uint16_t *chap_tbl_idx = (uint16_t *) data; 836 struct iscsi_cls_session *cls_session; 837 struct iscsi_session *sess; 838 struct ddb_entry *ddb_entry; 839 840 if (!iscsi_is_session_dev(dev)) 841 goto exit_is_chap_active; 842 843 cls_session = iscsi_dev_to_session(dev); 844 sess = cls_session->dd_data; 845 ddb_entry = sess->dd_data; 846 847 if (iscsi_session_chkready(cls_session)) 848 goto exit_is_chap_active; 849 850 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 851 ret = 1; 852 853 exit_is_chap_active: 854 return ret; 855 } 856 857 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 858 uint16_t chap_tbl_idx) 859 { 860 int ret = 0; 861 862 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 863 __qla4xxx_is_chap_active); 864 865 return ret; 866 } 867 868 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 869 { 870 struct scsi_qla_host *ha = to_qla_host(shost); 871 struct ql4_chap_table *chap_table; 872 dma_addr_t chap_dma; 873 int max_chap_entries = 0; 874 uint32_t offset = 0; 875 uint32_t chap_size; 876 int ret = 0; 877 878 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 879 if (chap_table == NULL) 880 return -ENOMEM; 881 882 if (is_qla80XX(ha)) 883 max_chap_entries = (ha->hw.flt_chap_size / 2) / 884 sizeof(struct ql4_chap_table); 885 else 886 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 887 888 if (chap_tbl_idx > max_chap_entries) { 889 ret = -EINVAL; 890 goto exit_delete_chap; 891 } 892 893 /* Check if chap index is in use. 894 * If chap is in use don't delet chap entry */ 895 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 896 if (ret) { 897 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 898 "delete from flash\n", chap_tbl_idx); 899 ret = -EBUSY; 900 goto exit_delete_chap; 901 } 902 903 chap_size = sizeof(struct ql4_chap_table); 904 if (is_qla40XX(ha)) 905 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 906 else { 907 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 908 /* flt_chap_size is CHAP table size for both ports 909 * so divide it by 2 to calculate the offset for second port 910 */ 911 if (ha->port_num == 1) 912 offset += (ha->hw.flt_chap_size / 2); 913 offset += (chap_tbl_idx * chap_size); 914 } 915 916 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 917 if (ret != QLA_SUCCESS) { 918 ret = -EINVAL; 919 goto exit_delete_chap; 920 } 921 922 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 923 __le16_to_cpu(chap_table->cookie))); 924 925 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 926 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 927 goto exit_delete_chap; 928 } 929 930 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 931 932 offset = FLASH_CHAP_OFFSET | 933 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 934 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 935 FLASH_OPT_RMW_COMMIT); 936 if (ret == QLA_SUCCESS && ha->chap_list) { 937 mutex_lock(&ha->chap_sem); 938 /* Update ha chap_list cache */ 939 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 940 chap_table, sizeof(struct ql4_chap_table)); 941 mutex_unlock(&ha->chap_sem); 942 } 943 if (ret != QLA_SUCCESS) 944 ret = -EINVAL; 945 946 exit_delete_chap: 947 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 948 return ret; 949 } 950 951 /** 952 * qla4xxx_set_chap_entry - Make chap entry with given information 953 * @shost: pointer to host 954 * @data: chap info - credentials, index and type to make chap entry 955 * @len: length of data 956 * 957 * Add or update chap entry with the given information 958 **/ 959 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 960 { 961 struct scsi_qla_host *ha = to_qla_host(shost); 962 struct iscsi_chap_rec chap_rec; 963 struct ql4_chap_table *chap_entry = NULL; 964 struct iscsi_param_info *param_info; 965 struct nlattr *attr; 966 int max_chap_entries = 0; 967 int type; 968 int rem = len; 969 int rc = 0; 970 int size; 971 972 memset(&chap_rec, 0, sizeof(chap_rec)); 973 974 nla_for_each_attr(attr, data, len, rem) { 975 param_info = nla_data(attr); 976 977 switch (param_info->param) { 978 case ISCSI_CHAP_PARAM_INDEX: 979 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 980 break; 981 case ISCSI_CHAP_PARAM_CHAP_TYPE: 982 chap_rec.chap_type = param_info->value[0]; 983 break; 984 case ISCSI_CHAP_PARAM_USERNAME: 985 size = min_t(size_t, sizeof(chap_rec.username), 986 param_info->len); 987 memcpy(chap_rec.username, param_info->value, size); 988 break; 989 case ISCSI_CHAP_PARAM_PASSWORD: 990 size = min_t(size_t, sizeof(chap_rec.password), 991 param_info->len); 992 memcpy(chap_rec.password, param_info->value, size); 993 break; 994 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 995 chap_rec.password_length = param_info->value[0]; 996 break; 997 default: 998 ql4_printk(KERN_ERR, ha, 999 "%s: No such sysfs attribute\n", __func__); 1000 rc = -ENOSYS; 1001 goto exit_set_chap; 1002 } 1003 } 1004 1005 if (chap_rec.chap_type == CHAP_TYPE_IN) 1006 type = BIDI_CHAP; 1007 else 1008 type = LOCAL_CHAP; 1009 1010 if (is_qla80XX(ha)) 1011 max_chap_entries = (ha->hw.flt_chap_size / 2) / 1012 sizeof(struct ql4_chap_table); 1013 else 1014 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 1015 1016 mutex_lock(&ha->chap_sem); 1017 if (chap_rec.chap_tbl_idx < max_chap_entries) { 1018 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 1019 &chap_entry); 1020 if (!rc) { 1021 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 1022 ql4_printk(KERN_INFO, ha, 1023 "Type mismatch for CHAP entry %d\n", 1024 chap_rec.chap_tbl_idx); 1025 rc = -EINVAL; 1026 goto exit_unlock_chap; 1027 } 1028 1029 /* If chap index is in use then don't modify it */ 1030 rc = qla4xxx_is_chap_active(shost, 1031 chap_rec.chap_tbl_idx); 1032 if (rc) { 1033 ql4_printk(KERN_INFO, ha, 1034 "CHAP entry %d is in use\n", 1035 chap_rec.chap_tbl_idx); 1036 rc = -EBUSY; 1037 goto exit_unlock_chap; 1038 } 1039 } 1040 } else { 1041 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1042 if (rc) { 1043 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1044 rc = -EBUSY; 1045 goto exit_unlock_chap; 1046 } 1047 } 1048 1049 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1050 chap_rec.chap_tbl_idx, type); 1051 1052 exit_unlock_chap: 1053 mutex_unlock(&ha->chap_sem); 1054 1055 exit_set_chap: 1056 return rc; 1057 } 1058 1059 1060 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1061 { 1062 struct scsi_qla_host *ha = to_qla_host(shost); 1063 struct iscsi_offload_host_stats *host_stats = NULL; 1064 int host_stats_size; 1065 int ret = 0; 1066 int ddb_idx = 0; 1067 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1068 int stats_size; 1069 dma_addr_t iscsi_stats_dma; 1070 1071 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1072 1073 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1074 1075 if (host_stats_size != len) { 1076 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1077 __func__, len, host_stats_size); 1078 ret = -EINVAL; 1079 goto exit_host_stats; 1080 } 1081 host_stats = (struct iscsi_offload_host_stats *)buf; 1082 1083 if (!buf) { 1084 ret = -ENOMEM; 1085 goto exit_host_stats; 1086 } 1087 1088 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1089 1090 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1091 &iscsi_stats_dma, GFP_KERNEL); 1092 if (!ql_iscsi_stats) { 1093 ql4_printk(KERN_ERR, ha, 1094 "Unable to allocate memory for iscsi stats\n"); 1095 ret = -ENOMEM; 1096 goto exit_host_stats; 1097 } 1098 1099 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1100 iscsi_stats_dma); 1101 if (ret != QLA_SUCCESS) { 1102 ql4_printk(KERN_ERR, ha, 1103 "Unable to retrieve iscsi stats\n"); 1104 ret = -EIO; 1105 goto exit_host_stats; 1106 } 1107 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1108 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1109 host_stats->mactx_multicast_frames = 1110 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1111 host_stats->mactx_broadcast_frames = 1112 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1113 host_stats->mactx_pause_frames = 1114 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1115 host_stats->mactx_control_frames = 1116 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1117 host_stats->mactx_deferral = 1118 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1119 host_stats->mactx_excess_deferral = 1120 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1121 host_stats->mactx_late_collision = 1122 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1123 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1124 host_stats->mactx_single_collision = 1125 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1126 host_stats->mactx_multiple_collision = 1127 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1128 host_stats->mactx_collision = 1129 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1130 host_stats->mactx_frames_dropped = 1131 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1132 host_stats->mactx_jumbo_frames = 1133 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1134 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1135 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1136 host_stats->macrx_unknown_control_frames = 1137 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1138 host_stats->macrx_pause_frames = 1139 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1140 host_stats->macrx_control_frames = 1141 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1142 host_stats->macrx_dribble = 1143 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1144 host_stats->macrx_frame_length_error = 1145 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1146 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1147 host_stats->macrx_carrier_sense_error = 1148 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1149 host_stats->macrx_frame_discarded = 1150 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1151 host_stats->macrx_frames_dropped = 1152 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1153 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1154 host_stats->mac_encoding_error = 1155 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1156 host_stats->macrx_length_error_large = 1157 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1158 host_stats->macrx_length_error_small = 1159 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1160 host_stats->macrx_multicast_frames = 1161 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1162 host_stats->macrx_broadcast_frames = 1163 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1164 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1165 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1166 host_stats->iptx_fragments = 1167 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1168 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1169 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1170 host_stats->iprx_fragments = 1171 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1172 host_stats->ip_datagram_reassembly = 1173 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1174 host_stats->ip_invalid_address_error = 1175 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1176 host_stats->ip_error_packets = 1177 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1178 host_stats->ip_fragrx_overlap = 1179 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1180 host_stats->ip_fragrx_outoforder = 1181 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1182 host_stats->ip_datagram_reassembly_timeout = 1183 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1184 host_stats->ipv6tx_packets = 1185 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1186 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1187 host_stats->ipv6tx_fragments = 1188 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1189 host_stats->ipv6rx_packets = 1190 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1191 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1192 host_stats->ipv6rx_fragments = 1193 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1194 host_stats->ipv6_datagram_reassembly = 1195 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1196 host_stats->ipv6_invalid_address_error = 1197 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1198 host_stats->ipv6_error_packets = 1199 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1200 host_stats->ipv6_fragrx_overlap = 1201 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1202 host_stats->ipv6_fragrx_outoforder = 1203 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1204 host_stats->ipv6_datagram_reassembly_timeout = 1205 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1206 host_stats->tcptx_segments = 1207 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1208 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1209 host_stats->tcprx_segments = 1210 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1211 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1212 host_stats->tcp_duplicate_ack_retx = 1213 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1214 host_stats->tcp_retx_timer_expired = 1215 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1216 host_stats->tcprx_duplicate_ack = 1217 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1218 host_stats->tcprx_pure_ackr = 1219 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1220 host_stats->tcptx_delayed_ack = 1221 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1222 host_stats->tcptx_pure_ack = 1223 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1224 host_stats->tcprx_segment_error = 1225 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1226 host_stats->tcprx_segment_outoforder = 1227 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1228 host_stats->tcprx_window_probe = 1229 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1230 host_stats->tcprx_window_update = 1231 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1232 host_stats->tcptx_window_probe_persist = 1233 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1234 host_stats->ecc_error_correction = 1235 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1236 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1237 host_stats->iscsi_data_bytes_tx = 1238 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1239 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1240 host_stats->iscsi_data_bytes_rx = 1241 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1242 host_stats->iscsi_io_completed = 1243 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1244 host_stats->iscsi_unexpected_io_rx = 1245 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1246 host_stats->iscsi_format_error = 1247 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1248 host_stats->iscsi_hdr_digest_error = 1249 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1250 host_stats->iscsi_data_digest_error = 1251 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1252 host_stats->iscsi_sequence_error = 1253 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1254 exit_host_stats: 1255 if (ql_iscsi_stats) 1256 dma_free_coherent(&ha->pdev->dev, stats_size, 1257 ql_iscsi_stats, iscsi_stats_dma); 1258 1259 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1260 __func__); 1261 return ret; 1262 } 1263 1264 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1265 enum iscsi_param_type param_type, 1266 int param, char *buf) 1267 { 1268 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1269 struct scsi_qla_host *ha = to_qla_host(shost); 1270 int ival; 1271 char *pval = NULL; 1272 int len = -ENOSYS; 1273 1274 if (param_type == ISCSI_NET_PARAM) { 1275 switch (param) { 1276 case ISCSI_NET_PARAM_IPV4_ADDR: 1277 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1278 break; 1279 case ISCSI_NET_PARAM_IPV4_SUBNET: 1280 len = sprintf(buf, "%pI4\n", 1281 &ha->ip_config.subnet_mask); 1282 break; 1283 case ISCSI_NET_PARAM_IPV4_GW: 1284 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1285 break; 1286 case ISCSI_NET_PARAM_IFACE_ENABLE: 1287 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1288 OP_STATE(ha->ip_config.ipv4_options, 1289 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1290 } else { 1291 OP_STATE(ha->ip_config.ipv6_options, 1292 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1293 } 1294 1295 len = sprintf(buf, "%s\n", pval); 1296 break; 1297 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1298 len = sprintf(buf, "%s\n", 1299 (ha->ip_config.tcp_options & 1300 TCPOPT_DHCP_ENABLE) ? 1301 "dhcp" : "static"); 1302 break; 1303 case ISCSI_NET_PARAM_IPV6_ADDR: 1304 if (iface->iface_num == 0) 1305 len = sprintf(buf, "%pI6\n", 1306 &ha->ip_config.ipv6_addr0); 1307 if (iface->iface_num == 1) 1308 len = sprintf(buf, "%pI6\n", 1309 &ha->ip_config.ipv6_addr1); 1310 break; 1311 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1312 len = sprintf(buf, "%pI6\n", 1313 &ha->ip_config.ipv6_link_local_addr); 1314 break; 1315 case ISCSI_NET_PARAM_IPV6_ROUTER: 1316 len = sprintf(buf, "%pI6\n", 1317 &ha->ip_config.ipv6_default_router_addr); 1318 break; 1319 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1320 pval = (ha->ip_config.ipv6_addl_options & 1321 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1322 "nd" : "static"; 1323 1324 len = sprintf(buf, "%s\n", pval); 1325 break; 1326 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1327 pval = (ha->ip_config.ipv6_addl_options & 1328 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1329 "auto" : "static"; 1330 1331 len = sprintf(buf, "%s\n", pval); 1332 break; 1333 case ISCSI_NET_PARAM_VLAN_ID: 1334 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1335 ival = ha->ip_config.ipv4_vlan_tag & 1336 ISCSI_MAX_VLAN_ID; 1337 else 1338 ival = ha->ip_config.ipv6_vlan_tag & 1339 ISCSI_MAX_VLAN_ID; 1340 1341 len = sprintf(buf, "%d\n", ival); 1342 break; 1343 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1344 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1345 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1346 ISCSI_MAX_VLAN_PRIORITY; 1347 else 1348 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1349 ISCSI_MAX_VLAN_PRIORITY; 1350 1351 len = sprintf(buf, "%d\n", ival); 1352 break; 1353 case ISCSI_NET_PARAM_VLAN_ENABLED: 1354 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1355 OP_STATE(ha->ip_config.ipv4_options, 1356 IPOPT_VLAN_TAGGING_ENABLE, pval); 1357 } else { 1358 OP_STATE(ha->ip_config.ipv6_options, 1359 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1360 } 1361 len = sprintf(buf, "%s\n", pval); 1362 break; 1363 case ISCSI_NET_PARAM_MTU: 1364 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1365 break; 1366 case ISCSI_NET_PARAM_PORT: 1367 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1368 len = sprintf(buf, "%d\n", 1369 ha->ip_config.ipv4_port); 1370 else 1371 len = sprintf(buf, "%d\n", 1372 ha->ip_config.ipv6_port); 1373 break; 1374 case ISCSI_NET_PARAM_IPADDR_STATE: 1375 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1376 pval = iscsi_get_ipaddress_state_name( 1377 ha->ip_config.ipv4_addr_state); 1378 } else { 1379 if (iface->iface_num == 0) 1380 pval = iscsi_get_ipaddress_state_name( 1381 ha->ip_config.ipv6_addr0_state); 1382 else if (iface->iface_num == 1) 1383 pval = iscsi_get_ipaddress_state_name( 1384 ha->ip_config.ipv6_addr1_state); 1385 } 1386 1387 len = sprintf(buf, "%s\n", pval); 1388 break; 1389 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1390 pval = iscsi_get_ipaddress_state_name( 1391 ha->ip_config.ipv6_link_local_state); 1392 len = sprintf(buf, "%s\n", pval); 1393 break; 1394 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1395 pval = iscsi_get_router_state_name( 1396 ha->ip_config.ipv6_default_router_state); 1397 len = sprintf(buf, "%s\n", pval); 1398 break; 1399 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1400 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1401 OP_STATE(~ha->ip_config.tcp_options, 1402 TCPOPT_DELAYED_ACK_DISABLE, pval); 1403 } else { 1404 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1405 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1406 } 1407 len = sprintf(buf, "%s\n", pval); 1408 break; 1409 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1410 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1411 OP_STATE(~ha->ip_config.tcp_options, 1412 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1413 } else { 1414 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1415 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1416 } 1417 len = sprintf(buf, "%s\n", pval); 1418 break; 1419 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1420 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1421 OP_STATE(~ha->ip_config.tcp_options, 1422 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1423 } else { 1424 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1425 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1426 pval); 1427 } 1428 len = sprintf(buf, "%s\n", pval); 1429 break; 1430 case ISCSI_NET_PARAM_TCP_WSF: 1431 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1432 len = sprintf(buf, "%d\n", 1433 ha->ip_config.tcp_wsf); 1434 else 1435 len = sprintf(buf, "%d\n", 1436 ha->ip_config.ipv6_tcp_wsf); 1437 break; 1438 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1439 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1440 ival = (ha->ip_config.tcp_options & 1441 TCPOPT_TIMER_SCALE) >> 1; 1442 else 1443 ival = (ha->ip_config.ipv6_tcp_options & 1444 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1445 1446 len = sprintf(buf, "%d\n", ival); 1447 break; 1448 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1449 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1450 OP_STATE(ha->ip_config.tcp_options, 1451 TCPOPT_TIMESTAMP_ENABLE, pval); 1452 } else { 1453 OP_STATE(ha->ip_config.ipv6_tcp_options, 1454 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1455 } 1456 len = sprintf(buf, "%s\n", pval); 1457 break; 1458 case ISCSI_NET_PARAM_CACHE_ID: 1459 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1460 len = sprintf(buf, "%d\n", 1461 ha->ip_config.ipv4_cache_id); 1462 else 1463 len = sprintf(buf, "%d\n", 1464 ha->ip_config.ipv6_cache_id); 1465 break; 1466 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1467 OP_STATE(ha->ip_config.tcp_options, 1468 TCPOPT_DNS_SERVER_IP_EN, pval); 1469 1470 len = sprintf(buf, "%s\n", pval); 1471 break; 1472 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1473 OP_STATE(ha->ip_config.tcp_options, 1474 TCPOPT_SLP_DA_INFO_EN, pval); 1475 1476 len = sprintf(buf, "%s\n", pval); 1477 break; 1478 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1479 OP_STATE(ha->ip_config.ipv4_options, 1480 IPOPT_IPV4_TOS_EN, pval); 1481 1482 len = sprintf(buf, "%s\n", pval); 1483 break; 1484 case ISCSI_NET_PARAM_IPV4_TOS: 1485 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1486 break; 1487 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1488 OP_STATE(ha->ip_config.ipv4_options, 1489 IPOPT_GRAT_ARP_EN, pval); 1490 1491 len = sprintf(buf, "%s\n", pval); 1492 break; 1493 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1494 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1495 pval); 1496 1497 len = sprintf(buf, "%s\n", pval); 1498 break; 1499 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1500 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1501 (char *)ha->ip_config.ipv4_alt_cid : ""; 1502 1503 len = sprintf(buf, "%s\n", pval); 1504 break; 1505 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1506 OP_STATE(ha->ip_config.ipv4_options, 1507 IPOPT_REQ_VID_EN, pval); 1508 1509 len = sprintf(buf, "%s\n", pval); 1510 break; 1511 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1512 OP_STATE(ha->ip_config.ipv4_options, 1513 IPOPT_USE_VID_EN, pval); 1514 1515 len = sprintf(buf, "%s\n", pval); 1516 break; 1517 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1518 pval = (ha->ip_config.ipv4_vid_len) ? 1519 (char *)ha->ip_config.ipv4_vid : ""; 1520 1521 len = sprintf(buf, "%s\n", pval); 1522 break; 1523 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1524 OP_STATE(ha->ip_config.ipv4_options, 1525 IPOPT_LEARN_IQN_EN, pval); 1526 1527 len = sprintf(buf, "%s\n", pval); 1528 break; 1529 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1530 OP_STATE(~ha->ip_config.ipv4_options, 1531 IPOPT_FRAGMENTATION_DISABLE, pval); 1532 1533 len = sprintf(buf, "%s\n", pval); 1534 break; 1535 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1536 OP_STATE(ha->ip_config.ipv4_options, 1537 IPOPT_IN_FORWARD_EN, pval); 1538 1539 len = sprintf(buf, "%s\n", pval); 1540 break; 1541 case ISCSI_NET_PARAM_REDIRECT_EN: 1542 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1543 OP_STATE(ha->ip_config.ipv4_options, 1544 IPOPT_ARP_REDIRECT_EN, pval); 1545 } else { 1546 OP_STATE(ha->ip_config.ipv6_options, 1547 IPV6_OPT_REDIRECT_EN, pval); 1548 } 1549 len = sprintf(buf, "%s\n", pval); 1550 break; 1551 case ISCSI_NET_PARAM_IPV4_TTL: 1552 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1553 break; 1554 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1555 OP_STATE(ha->ip_config.ipv6_options, 1556 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1557 1558 len = sprintf(buf, "%s\n", pval); 1559 break; 1560 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1561 OP_STATE(ha->ip_config.ipv6_addl_options, 1562 IPV6_ADDOPT_MLD_EN, pval); 1563 1564 len = sprintf(buf, "%s\n", pval); 1565 break; 1566 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1567 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1568 break; 1569 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1570 len = sprintf(buf, "%d\n", 1571 ha->ip_config.ipv6_traffic_class); 1572 break; 1573 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1574 len = sprintf(buf, "%d\n", 1575 ha->ip_config.ipv6_hop_limit); 1576 break; 1577 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1578 len = sprintf(buf, "%d\n", 1579 ha->ip_config.ipv6_nd_reach_time); 1580 break; 1581 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1582 len = sprintf(buf, "%d\n", 1583 ha->ip_config.ipv6_nd_rexmit_timer); 1584 break; 1585 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1586 len = sprintf(buf, "%d\n", 1587 ha->ip_config.ipv6_nd_stale_timeout); 1588 break; 1589 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1590 len = sprintf(buf, "%d\n", 1591 ha->ip_config.ipv6_dup_addr_detect_count); 1592 break; 1593 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1594 len = sprintf(buf, "%d\n", 1595 ha->ip_config.ipv6_gw_advrt_mtu); 1596 break; 1597 default: 1598 len = -ENOSYS; 1599 } 1600 } else if (param_type == ISCSI_IFACE_PARAM) { 1601 switch (param) { 1602 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1603 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1604 break; 1605 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1606 OP_STATE(ha->ip_config.iscsi_options, 1607 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1608 1609 len = sprintf(buf, "%s\n", pval); 1610 break; 1611 case ISCSI_IFACE_PARAM_DATADGST_EN: 1612 OP_STATE(ha->ip_config.iscsi_options, 1613 ISCSIOPTS_DATA_DIGEST_EN, pval); 1614 1615 len = sprintf(buf, "%s\n", pval); 1616 break; 1617 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1618 OP_STATE(ha->ip_config.iscsi_options, 1619 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1620 1621 len = sprintf(buf, "%s\n", pval); 1622 break; 1623 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1624 OP_STATE(ha->ip_config.iscsi_options, 1625 ISCSIOPTS_INITIAL_R2T_EN, pval); 1626 1627 len = sprintf(buf, "%s\n", pval); 1628 break; 1629 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1630 OP_STATE(ha->ip_config.iscsi_options, 1631 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1632 1633 len = sprintf(buf, "%s\n", pval); 1634 break; 1635 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1636 OP_STATE(ha->ip_config.iscsi_options, 1637 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1638 1639 len = sprintf(buf, "%s\n", pval); 1640 break; 1641 case ISCSI_IFACE_PARAM_ERL: 1642 len = sprintf(buf, "%d\n", 1643 (ha->ip_config.iscsi_options & 1644 ISCSIOPTS_ERL)); 1645 break; 1646 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1647 len = sprintf(buf, "%u\n", 1648 ha->ip_config.iscsi_max_pdu_size * 1649 BYTE_UNITS); 1650 break; 1651 case ISCSI_IFACE_PARAM_FIRST_BURST: 1652 len = sprintf(buf, "%u\n", 1653 ha->ip_config.iscsi_first_burst_len * 1654 BYTE_UNITS); 1655 break; 1656 case ISCSI_IFACE_PARAM_MAX_R2T: 1657 len = sprintf(buf, "%d\n", 1658 ha->ip_config.iscsi_max_outstnd_r2t); 1659 break; 1660 case ISCSI_IFACE_PARAM_MAX_BURST: 1661 len = sprintf(buf, "%u\n", 1662 ha->ip_config.iscsi_max_burst_len * 1663 BYTE_UNITS); 1664 break; 1665 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1666 OP_STATE(ha->ip_config.iscsi_options, 1667 ISCSIOPTS_CHAP_AUTH_EN, pval); 1668 1669 len = sprintf(buf, "%s\n", pval); 1670 break; 1671 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1672 OP_STATE(ha->ip_config.iscsi_options, 1673 ISCSIOPTS_BIDI_CHAP_EN, pval); 1674 1675 len = sprintf(buf, "%s\n", pval); 1676 break; 1677 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1678 OP_STATE(ha->ip_config.iscsi_options, 1679 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1680 1681 len = sprintf(buf, "%s\n", pval); 1682 break; 1683 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1684 OP_STATE(ha->ip_config.iscsi_options, 1685 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1686 1687 len = sprintf(buf, "%s\n", pval); 1688 break; 1689 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1690 OP_STATE(ha->ip_config.iscsi_options, 1691 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1692 1693 len = sprintf(buf, "%s\n", pval); 1694 break; 1695 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1696 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1697 break; 1698 default: 1699 len = -ENOSYS; 1700 } 1701 } 1702 1703 return len; 1704 } 1705 1706 static struct iscsi_endpoint * 1707 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1708 int non_blocking) 1709 { 1710 int ret; 1711 struct iscsi_endpoint *ep; 1712 struct qla_endpoint *qla_ep; 1713 struct scsi_qla_host *ha; 1714 struct sockaddr_in *addr; 1715 struct sockaddr_in6 *addr6; 1716 1717 if (!shost) { 1718 ret = -ENXIO; 1719 pr_err("%s: shost is NULL\n", __func__); 1720 return ERR_PTR(ret); 1721 } 1722 1723 ha = iscsi_host_priv(shost); 1724 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1725 if (!ep) { 1726 ret = -ENOMEM; 1727 return ERR_PTR(ret); 1728 } 1729 1730 qla_ep = ep->dd_data; 1731 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1732 if (dst_addr->sa_family == AF_INET) { 1733 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1734 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1735 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1736 (char *)&addr->sin_addr)); 1737 } else if (dst_addr->sa_family == AF_INET6) { 1738 memcpy(&qla_ep->dst_addr, dst_addr, 1739 sizeof(struct sockaddr_in6)); 1740 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1741 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1742 (char *)&addr6->sin6_addr)); 1743 } else { 1744 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1745 __func__); 1746 } 1747 1748 qla_ep->host = shost; 1749 1750 return ep; 1751 } 1752 1753 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1754 { 1755 struct qla_endpoint *qla_ep; 1756 struct scsi_qla_host *ha; 1757 int ret = 0; 1758 1759 qla_ep = ep->dd_data; 1760 ha = to_qla_host(qla_ep->host); 1761 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1762 1763 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1764 ret = 1; 1765 1766 return ret; 1767 } 1768 1769 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1770 { 1771 struct qla_endpoint *qla_ep; 1772 struct scsi_qla_host *ha; 1773 1774 qla_ep = ep->dd_data; 1775 ha = to_qla_host(qla_ep->host); 1776 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1777 ha->host_no)); 1778 iscsi_destroy_endpoint(ep); 1779 } 1780 1781 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1782 enum iscsi_param param, 1783 char *buf) 1784 { 1785 struct qla_endpoint *qla_ep = ep->dd_data; 1786 struct sockaddr *dst_addr; 1787 struct scsi_qla_host *ha; 1788 1789 if (!qla_ep) 1790 return -ENOTCONN; 1791 1792 ha = to_qla_host(qla_ep->host); 1793 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1794 ha->host_no)); 1795 1796 switch (param) { 1797 case ISCSI_PARAM_CONN_PORT: 1798 case ISCSI_PARAM_CONN_ADDRESS: 1799 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1800 if (!dst_addr) 1801 return -ENOTCONN; 1802 1803 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1804 &qla_ep->dst_addr, param, buf); 1805 default: 1806 return -ENOSYS; 1807 } 1808 } 1809 1810 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1811 struct iscsi_stats *stats) 1812 { 1813 struct iscsi_session *sess; 1814 struct iscsi_cls_session *cls_sess; 1815 struct ddb_entry *ddb_entry; 1816 struct scsi_qla_host *ha; 1817 struct ql_iscsi_stats *ql_iscsi_stats; 1818 int stats_size; 1819 int ret; 1820 dma_addr_t iscsi_stats_dma; 1821 1822 cls_sess = iscsi_conn_to_session(cls_conn); 1823 sess = cls_sess->dd_data; 1824 ddb_entry = sess->dd_data; 1825 ha = ddb_entry->ha; 1826 1827 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1828 ha->host_no)); 1829 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1830 /* Allocate memory */ 1831 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1832 &iscsi_stats_dma, GFP_KERNEL); 1833 if (!ql_iscsi_stats) { 1834 ql4_printk(KERN_ERR, ha, 1835 "Unable to allocate memory for iscsi stats\n"); 1836 goto exit_get_stats; 1837 } 1838 1839 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1840 iscsi_stats_dma); 1841 if (ret != QLA_SUCCESS) { 1842 ql4_printk(KERN_ERR, ha, 1843 "Unable to retrieve iscsi stats\n"); 1844 goto free_stats; 1845 } 1846 1847 /* octets */ 1848 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1849 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1850 /* xmit pdus */ 1851 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1852 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1853 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1854 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1855 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1856 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1857 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1858 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1859 /* recv pdus */ 1860 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1861 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1862 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1863 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1864 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1865 stats->logoutrsp_pdus = 1866 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1867 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1868 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1869 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1870 1871 free_stats: 1872 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1873 iscsi_stats_dma); 1874 exit_get_stats: 1875 return; 1876 } 1877 1878 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1879 { 1880 struct iscsi_cls_session *session; 1881 unsigned long flags; 1882 enum blk_eh_timer_return ret = BLK_EH_DONE; 1883 1884 session = starget_to_session(scsi_target(sc->device)); 1885 1886 spin_lock_irqsave(&session->lock, flags); 1887 if (session->state == ISCSI_SESSION_FAILED) 1888 ret = BLK_EH_RESET_TIMER; 1889 spin_unlock_irqrestore(&session->lock, flags); 1890 1891 return ret; 1892 } 1893 1894 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1895 { 1896 struct scsi_qla_host *ha = to_qla_host(shost); 1897 struct iscsi_cls_host *ihost = shost->shost_data; 1898 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1899 1900 qla4xxx_get_firmware_state(ha); 1901 1902 switch (ha->addl_fw_state & 0x0F00) { 1903 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1904 speed = ISCSI_PORT_SPEED_10MBPS; 1905 break; 1906 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1907 speed = ISCSI_PORT_SPEED_100MBPS; 1908 break; 1909 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1910 speed = ISCSI_PORT_SPEED_1GBPS; 1911 break; 1912 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1913 speed = ISCSI_PORT_SPEED_10GBPS; 1914 break; 1915 } 1916 ihost->port_speed = speed; 1917 } 1918 1919 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1920 { 1921 struct scsi_qla_host *ha = to_qla_host(shost); 1922 struct iscsi_cls_host *ihost = shost->shost_data; 1923 uint32_t state = ISCSI_PORT_STATE_DOWN; 1924 1925 if (test_bit(AF_LINK_UP, &ha->flags)) 1926 state = ISCSI_PORT_STATE_UP; 1927 1928 ihost->port_state = state; 1929 } 1930 1931 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1932 enum iscsi_host_param param, char *buf) 1933 { 1934 struct scsi_qla_host *ha = to_qla_host(shost); 1935 int len; 1936 1937 switch (param) { 1938 case ISCSI_HOST_PARAM_HWADDRESS: 1939 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1940 break; 1941 case ISCSI_HOST_PARAM_IPADDRESS: 1942 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1943 break; 1944 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1945 len = sprintf(buf, "%s\n", ha->name_string); 1946 break; 1947 case ISCSI_HOST_PARAM_PORT_STATE: 1948 qla4xxx_set_port_state(shost); 1949 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1950 break; 1951 case ISCSI_HOST_PARAM_PORT_SPEED: 1952 qla4xxx_set_port_speed(shost); 1953 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1954 break; 1955 default: 1956 return -ENOSYS; 1957 } 1958 1959 return len; 1960 } 1961 1962 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1963 { 1964 if (ha->iface_ipv4) 1965 return; 1966 1967 /* IPv4 */ 1968 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1969 &qla4xxx_iscsi_transport, 1970 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1971 if (!ha->iface_ipv4) 1972 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1973 "iface0.\n"); 1974 } 1975 1976 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1977 { 1978 if (!ha->iface_ipv6_0) 1979 /* IPv6 iface-0 */ 1980 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1981 &qla4xxx_iscsi_transport, 1982 ISCSI_IFACE_TYPE_IPV6, 0, 1983 0); 1984 if (!ha->iface_ipv6_0) 1985 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1986 "iface0.\n"); 1987 1988 if (!ha->iface_ipv6_1) 1989 /* IPv6 iface-1 */ 1990 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1991 &qla4xxx_iscsi_transport, 1992 ISCSI_IFACE_TYPE_IPV6, 1, 1993 0); 1994 if (!ha->iface_ipv6_1) 1995 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1996 "iface1.\n"); 1997 } 1998 1999 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 2000 { 2001 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 2002 qla4xxx_create_ipv4_iface(ha); 2003 2004 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 2005 qla4xxx_create_ipv6_iface(ha); 2006 } 2007 2008 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 2009 { 2010 if (ha->iface_ipv4) { 2011 iscsi_destroy_iface(ha->iface_ipv4); 2012 ha->iface_ipv4 = NULL; 2013 } 2014 } 2015 2016 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 2017 { 2018 if (ha->iface_ipv6_0) { 2019 iscsi_destroy_iface(ha->iface_ipv6_0); 2020 ha->iface_ipv6_0 = NULL; 2021 } 2022 if (ha->iface_ipv6_1) { 2023 iscsi_destroy_iface(ha->iface_ipv6_1); 2024 ha->iface_ipv6_1 = NULL; 2025 } 2026 } 2027 2028 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 2029 { 2030 qla4xxx_destroy_ipv4_iface(ha); 2031 qla4xxx_destroy_ipv6_iface(ha); 2032 } 2033 2034 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 2035 struct iscsi_iface_param_info *iface_param, 2036 struct addr_ctrl_blk *init_fw_cb) 2037 { 2038 /* 2039 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 2040 * iface_num 1 is valid only for IPv6 Addr. 2041 */ 2042 switch (iface_param->param) { 2043 case ISCSI_NET_PARAM_IPV6_ADDR: 2044 if (iface_param->iface_num & 0x1) 2045 /* IPv6 Addr 1 */ 2046 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2047 sizeof(init_fw_cb->ipv6_addr1)); 2048 else 2049 /* IPv6 Addr 0 */ 2050 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2051 sizeof(init_fw_cb->ipv6_addr0)); 2052 break; 2053 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2054 if (iface_param->iface_num & 0x1) 2055 break; 2056 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2057 sizeof(init_fw_cb->ipv6_if_id)); 2058 break; 2059 case ISCSI_NET_PARAM_IPV6_ROUTER: 2060 if (iface_param->iface_num & 0x1) 2061 break; 2062 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2063 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2064 break; 2065 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2066 /* Autocfg applies to even interface */ 2067 if (iface_param->iface_num & 0x1) 2068 break; 2069 2070 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2071 init_fw_cb->ipv6_addtl_opts &= 2072 cpu_to_le16( 2073 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2074 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2075 init_fw_cb->ipv6_addtl_opts |= 2076 cpu_to_le16( 2077 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2078 else 2079 ql4_printk(KERN_ERR, ha, 2080 "Invalid autocfg setting for IPv6 addr\n"); 2081 break; 2082 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2083 /* Autocfg applies to even interface */ 2084 if (iface_param->iface_num & 0x1) 2085 break; 2086 2087 if (iface_param->value[0] == 2088 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2089 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2090 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2091 else if (iface_param->value[0] == 2092 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2093 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2094 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2095 else 2096 ql4_printk(KERN_ERR, ha, 2097 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2098 break; 2099 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2100 /* Autocfg applies to even interface */ 2101 if (iface_param->iface_num & 0x1) 2102 break; 2103 2104 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2105 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2106 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2107 break; 2108 case ISCSI_NET_PARAM_IFACE_ENABLE: 2109 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2110 init_fw_cb->ipv6_opts |= 2111 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2112 qla4xxx_create_ipv6_iface(ha); 2113 } else { 2114 init_fw_cb->ipv6_opts &= 2115 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2116 0xFFFF); 2117 qla4xxx_destroy_ipv6_iface(ha); 2118 } 2119 break; 2120 case ISCSI_NET_PARAM_VLAN_TAG: 2121 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2122 break; 2123 init_fw_cb->ipv6_vlan_tag = 2124 cpu_to_be16(*(uint16_t *)iface_param->value); 2125 break; 2126 case ISCSI_NET_PARAM_VLAN_ENABLED: 2127 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2128 init_fw_cb->ipv6_opts |= 2129 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2130 else 2131 init_fw_cb->ipv6_opts &= 2132 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2133 break; 2134 case ISCSI_NET_PARAM_MTU: 2135 init_fw_cb->eth_mtu_size = 2136 cpu_to_le16(*(uint16_t *)iface_param->value); 2137 break; 2138 case ISCSI_NET_PARAM_PORT: 2139 /* Autocfg applies to even interface */ 2140 if (iface_param->iface_num & 0x1) 2141 break; 2142 2143 init_fw_cb->ipv6_port = 2144 cpu_to_le16(*(uint16_t *)iface_param->value); 2145 break; 2146 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2147 if (iface_param->iface_num & 0x1) 2148 break; 2149 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2150 init_fw_cb->ipv6_tcp_opts |= 2151 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2152 else 2153 init_fw_cb->ipv6_tcp_opts &= 2154 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2155 0xFFFF); 2156 break; 2157 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2158 if (iface_param->iface_num & 0x1) 2159 break; 2160 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2161 init_fw_cb->ipv6_tcp_opts |= 2162 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2163 else 2164 init_fw_cb->ipv6_tcp_opts &= 2165 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2166 break; 2167 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2168 if (iface_param->iface_num & 0x1) 2169 break; 2170 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2171 init_fw_cb->ipv6_tcp_opts |= 2172 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2173 else 2174 init_fw_cb->ipv6_tcp_opts &= 2175 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2176 break; 2177 case ISCSI_NET_PARAM_TCP_WSF: 2178 if (iface_param->iface_num & 0x1) 2179 break; 2180 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2181 break; 2182 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2183 if (iface_param->iface_num & 0x1) 2184 break; 2185 init_fw_cb->ipv6_tcp_opts &= 2186 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2187 init_fw_cb->ipv6_tcp_opts |= 2188 cpu_to_le16((iface_param->value[0] << 1) & 2189 IPV6_TCPOPT_TIMER_SCALE); 2190 break; 2191 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2192 if (iface_param->iface_num & 0x1) 2193 break; 2194 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2195 init_fw_cb->ipv6_tcp_opts |= 2196 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2197 else 2198 init_fw_cb->ipv6_tcp_opts &= 2199 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2200 break; 2201 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2202 if (iface_param->iface_num & 0x1) 2203 break; 2204 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2205 init_fw_cb->ipv6_opts |= 2206 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2207 else 2208 init_fw_cb->ipv6_opts &= 2209 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2210 break; 2211 case ISCSI_NET_PARAM_REDIRECT_EN: 2212 if (iface_param->iface_num & 0x1) 2213 break; 2214 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2215 init_fw_cb->ipv6_opts |= 2216 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2217 else 2218 init_fw_cb->ipv6_opts &= 2219 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2220 break; 2221 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2222 if (iface_param->iface_num & 0x1) 2223 break; 2224 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2225 init_fw_cb->ipv6_addtl_opts |= 2226 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2227 else 2228 init_fw_cb->ipv6_addtl_opts &= 2229 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2230 break; 2231 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2232 if (iface_param->iface_num & 0x1) 2233 break; 2234 init_fw_cb->ipv6_flow_lbl = 2235 cpu_to_le16(*(uint16_t *)iface_param->value); 2236 break; 2237 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2238 if (iface_param->iface_num & 0x1) 2239 break; 2240 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2241 break; 2242 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2243 if (iface_param->iface_num & 0x1) 2244 break; 2245 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2246 break; 2247 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2248 if (iface_param->iface_num & 0x1) 2249 break; 2250 init_fw_cb->ipv6_nd_reach_time = 2251 cpu_to_le32(*(uint32_t *)iface_param->value); 2252 break; 2253 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2254 if (iface_param->iface_num & 0x1) 2255 break; 2256 init_fw_cb->ipv6_nd_rexmit_timer = 2257 cpu_to_le32(*(uint32_t *)iface_param->value); 2258 break; 2259 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2260 if (iface_param->iface_num & 0x1) 2261 break; 2262 init_fw_cb->ipv6_nd_stale_timeout = 2263 cpu_to_le32(*(uint32_t *)iface_param->value); 2264 break; 2265 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2266 if (iface_param->iface_num & 0x1) 2267 break; 2268 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2269 break; 2270 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2271 if (iface_param->iface_num & 0x1) 2272 break; 2273 init_fw_cb->ipv6_gw_advrt_mtu = 2274 cpu_to_le32(*(uint32_t *)iface_param->value); 2275 break; 2276 default: 2277 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2278 iface_param->param); 2279 break; 2280 } 2281 } 2282 2283 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2284 struct iscsi_iface_param_info *iface_param, 2285 struct addr_ctrl_blk *init_fw_cb) 2286 { 2287 switch (iface_param->param) { 2288 case ISCSI_NET_PARAM_IPV4_ADDR: 2289 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2290 sizeof(init_fw_cb->ipv4_addr)); 2291 break; 2292 case ISCSI_NET_PARAM_IPV4_SUBNET: 2293 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2294 sizeof(init_fw_cb->ipv4_subnet)); 2295 break; 2296 case ISCSI_NET_PARAM_IPV4_GW: 2297 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2298 sizeof(init_fw_cb->ipv4_gw_addr)); 2299 break; 2300 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2301 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2302 init_fw_cb->ipv4_tcp_opts |= 2303 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2304 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2305 init_fw_cb->ipv4_tcp_opts &= 2306 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2307 else 2308 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2309 break; 2310 case ISCSI_NET_PARAM_IFACE_ENABLE: 2311 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2312 init_fw_cb->ipv4_ip_opts |= 2313 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2314 qla4xxx_create_ipv4_iface(ha); 2315 } else { 2316 init_fw_cb->ipv4_ip_opts &= 2317 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2318 0xFFFF); 2319 qla4xxx_destroy_ipv4_iface(ha); 2320 } 2321 break; 2322 case ISCSI_NET_PARAM_VLAN_TAG: 2323 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2324 break; 2325 init_fw_cb->ipv4_vlan_tag = 2326 cpu_to_be16(*(uint16_t *)iface_param->value); 2327 break; 2328 case ISCSI_NET_PARAM_VLAN_ENABLED: 2329 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2330 init_fw_cb->ipv4_ip_opts |= 2331 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2332 else 2333 init_fw_cb->ipv4_ip_opts &= 2334 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2335 break; 2336 case ISCSI_NET_PARAM_MTU: 2337 init_fw_cb->eth_mtu_size = 2338 cpu_to_le16(*(uint16_t *)iface_param->value); 2339 break; 2340 case ISCSI_NET_PARAM_PORT: 2341 init_fw_cb->ipv4_port = 2342 cpu_to_le16(*(uint16_t *)iface_param->value); 2343 break; 2344 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2345 if (iface_param->iface_num & 0x1) 2346 break; 2347 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2348 init_fw_cb->ipv4_tcp_opts |= 2349 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2350 else 2351 init_fw_cb->ipv4_tcp_opts &= 2352 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2353 0xFFFF); 2354 break; 2355 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2356 if (iface_param->iface_num & 0x1) 2357 break; 2358 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2359 init_fw_cb->ipv4_tcp_opts |= 2360 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2361 else 2362 init_fw_cb->ipv4_tcp_opts &= 2363 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2364 break; 2365 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2366 if (iface_param->iface_num & 0x1) 2367 break; 2368 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2369 init_fw_cb->ipv4_tcp_opts |= 2370 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2371 else 2372 init_fw_cb->ipv4_tcp_opts &= 2373 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2374 break; 2375 case ISCSI_NET_PARAM_TCP_WSF: 2376 if (iface_param->iface_num & 0x1) 2377 break; 2378 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2379 break; 2380 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2381 if (iface_param->iface_num & 0x1) 2382 break; 2383 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2384 init_fw_cb->ipv4_tcp_opts |= 2385 cpu_to_le16((iface_param->value[0] << 1) & 2386 TCPOPT_TIMER_SCALE); 2387 break; 2388 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2389 if (iface_param->iface_num & 0x1) 2390 break; 2391 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2392 init_fw_cb->ipv4_tcp_opts |= 2393 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2394 else 2395 init_fw_cb->ipv4_tcp_opts &= 2396 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2397 break; 2398 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2399 if (iface_param->iface_num & 0x1) 2400 break; 2401 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2402 init_fw_cb->ipv4_tcp_opts |= 2403 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2404 else 2405 init_fw_cb->ipv4_tcp_opts &= 2406 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2407 break; 2408 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2409 if (iface_param->iface_num & 0x1) 2410 break; 2411 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2412 init_fw_cb->ipv4_tcp_opts |= 2413 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2414 else 2415 init_fw_cb->ipv4_tcp_opts &= 2416 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2417 break; 2418 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2419 if (iface_param->iface_num & 0x1) 2420 break; 2421 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2422 init_fw_cb->ipv4_ip_opts |= 2423 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2424 else 2425 init_fw_cb->ipv4_ip_opts &= 2426 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2427 break; 2428 case ISCSI_NET_PARAM_IPV4_TOS: 2429 if (iface_param->iface_num & 0x1) 2430 break; 2431 init_fw_cb->ipv4_tos = iface_param->value[0]; 2432 break; 2433 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2434 if (iface_param->iface_num & 0x1) 2435 break; 2436 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2437 init_fw_cb->ipv4_ip_opts |= 2438 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2439 else 2440 init_fw_cb->ipv4_ip_opts &= 2441 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2442 break; 2443 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2444 if (iface_param->iface_num & 0x1) 2445 break; 2446 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2447 init_fw_cb->ipv4_ip_opts |= 2448 cpu_to_le16(IPOPT_ALT_CID_EN); 2449 else 2450 init_fw_cb->ipv4_ip_opts &= 2451 cpu_to_le16(~IPOPT_ALT_CID_EN); 2452 break; 2453 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2454 if (iface_param->iface_num & 0x1) 2455 break; 2456 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2457 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2458 init_fw_cb->ipv4_dhcp_alt_cid_len = 2459 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2460 break; 2461 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2462 if (iface_param->iface_num & 0x1) 2463 break; 2464 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2465 init_fw_cb->ipv4_ip_opts |= 2466 cpu_to_le16(IPOPT_REQ_VID_EN); 2467 else 2468 init_fw_cb->ipv4_ip_opts &= 2469 cpu_to_le16(~IPOPT_REQ_VID_EN); 2470 break; 2471 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2472 if (iface_param->iface_num & 0x1) 2473 break; 2474 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2475 init_fw_cb->ipv4_ip_opts |= 2476 cpu_to_le16(IPOPT_USE_VID_EN); 2477 else 2478 init_fw_cb->ipv4_ip_opts &= 2479 cpu_to_le16(~IPOPT_USE_VID_EN); 2480 break; 2481 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2482 if (iface_param->iface_num & 0x1) 2483 break; 2484 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2485 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2486 init_fw_cb->ipv4_dhcp_vid_len = 2487 strlen(init_fw_cb->ipv4_dhcp_vid); 2488 break; 2489 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2490 if (iface_param->iface_num & 0x1) 2491 break; 2492 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2493 init_fw_cb->ipv4_ip_opts |= 2494 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2495 else 2496 init_fw_cb->ipv4_ip_opts &= 2497 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2498 break; 2499 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2500 if (iface_param->iface_num & 0x1) 2501 break; 2502 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2503 init_fw_cb->ipv4_ip_opts |= 2504 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2505 else 2506 init_fw_cb->ipv4_ip_opts &= 2507 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2508 break; 2509 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2510 if (iface_param->iface_num & 0x1) 2511 break; 2512 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2513 init_fw_cb->ipv4_ip_opts |= 2514 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2515 else 2516 init_fw_cb->ipv4_ip_opts &= 2517 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2518 break; 2519 case ISCSI_NET_PARAM_REDIRECT_EN: 2520 if (iface_param->iface_num & 0x1) 2521 break; 2522 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2523 init_fw_cb->ipv4_ip_opts |= 2524 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2525 else 2526 init_fw_cb->ipv4_ip_opts &= 2527 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2528 break; 2529 case ISCSI_NET_PARAM_IPV4_TTL: 2530 if (iface_param->iface_num & 0x1) 2531 break; 2532 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2533 break; 2534 default: 2535 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2536 iface_param->param); 2537 break; 2538 } 2539 } 2540 2541 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2542 struct iscsi_iface_param_info *iface_param, 2543 struct addr_ctrl_blk *init_fw_cb) 2544 { 2545 switch (iface_param->param) { 2546 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2547 if (iface_param->iface_num & 0x1) 2548 break; 2549 init_fw_cb->def_timeout = 2550 cpu_to_le16(*(uint16_t *)iface_param->value); 2551 break; 2552 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2553 if (iface_param->iface_num & 0x1) 2554 break; 2555 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2556 init_fw_cb->iscsi_opts |= 2557 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2558 else 2559 init_fw_cb->iscsi_opts &= 2560 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2561 break; 2562 case ISCSI_IFACE_PARAM_DATADGST_EN: 2563 if (iface_param->iface_num & 0x1) 2564 break; 2565 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2566 init_fw_cb->iscsi_opts |= 2567 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2568 else 2569 init_fw_cb->iscsi_opts &= 2570 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2571 break; 2572 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2573 if (iface_param->iface_num & 0x1) 2574 break; 2575 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2576 init_fw_cb->iscsi_opts |= 2577 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2578 else 2579 init_fw_cb->iscsi_opts &= 2580 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2581 break; 2582 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2583 if (iface_param->iface_num & 0x1) 2584 break; 2585 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2586 init_fw_cb->iscsi_opts |= 2587 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2588 else 2589 init_fw_cb->iscsi_opts &= 2590 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2591 break; 2592 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2593 if (iface_param->iface_num & 0x1) 2594 break; 2595 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2596 init_fw_cb->iscsi_opts |= 2597 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2598 else 2599 init_fw_cb->iscsi_opts &= 2600 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2601 break; 2602 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2603 if (iface_param->iface_num & 0x1) 2604 break; 2605 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2606 init_fw_cb->iscsi_opts |= 2607 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2608 else 2609 init_fw_cb->iscsi_opts &= 2610 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2611 break; 2612 case ISCSI_IFACE_PARAM_ERL: 2613 if (iface_param->iface_num & 0x1) 2614 break; 2615 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2616 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2617 ISCSIOPTS_ERL); 2618 break; 2619 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2620 if (iface_param->iface_num & 0x1) 2621 break; 2622 init_fw_cb->iscsi_max_pdu_size = 2623 cpu_to_le32(*(uint32_t *)iface_param->value) / 2624 BYTE_UNITS; 2625 break; 2626 case ISCSI_IFACE_PARAM_FIRST_BURST: 2627 if (iface_param->iface_num & 0x1) 2628 break; 2629 init_fw_cb->iscsi_fburst_len = 2630 cpu_to_le32(*(uint32_t *)iface_param->value) / 2631 BYTE_UNITS; 2632 break; 2633 case ISCSI_IFACE_PARAM_MAX_R2T: 2634 if (iface_param->iface_num & 0x1) 2635 break; 2636 init_fw_cb->iscsi_max_outstnd_r2t = 2637 cpu_to_le16(*(uint16_t *)iface_param->value); 2638 break; 2639 case ISCSI_IFACE_PARAM_MAX_BURST: 2640 if (iface_param->iface_num & 0x1) 2641 break; 2642 init_fw_cb->iscsi_max_burst_len = 2643 cpu_to_le32(*(uint32_t *)iface_param->value) / 2644 BYTE_UNITS; 2645 break; 2646 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2647 if (iface_param->iface_num & 0x1) 2648 break; 2649 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2650 init_fw_cb->iscsi_opts |= 2651 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2652 else 2653 init_fw_cb->iscsi_opts &= 2654 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2655 break; 2656 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2657 if (iface_param->iface_num & 0x1) 2658 break; 2659 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2660 init_fw_cb->iscsi_opts |= 2661 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2662 else 2663 init_fw_cb->iscsi_opts &= 2664 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2665 break; 2666 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2667 if (iface_param->iface_num & 0x1) 2668 break; 2669 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2670 init_fw_cb->iscsi_opts |= 2671 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2672 else 2673 init_fw_cb->iscsi_opts &= 2674 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2675 break; 2676 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2677 if (iface_param->iface_num & 0x1) 2678 break; 2679 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2680 init_fw_cb->iscsi_opts |= 2681 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2682 else 2683 init_fw_cb->iscsi_opts &= 2684 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2685 break; 2686 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2687 if (iface_param->iface_num & 0x1) 2688 break; 2689 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2690 init_fw_cb->iscsi_opts |= 2691 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2692 else 2693 init_fw_cb->iscsi_opts &= 2694 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2695 break; 2696 default: 2697 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2698 iface_param->param); 2699 break; 2700 } 2701 } 2702 2703 static void 2704 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2705 { 2706 struct addr_ctrl_blk_def *acb; 2707 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2708 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2709 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2710 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2711 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2712 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2713 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2714 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2715 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2716 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2717 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2718 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2719 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2720 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2721 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2722 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2723 } 2724 2725 static int 2726 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2727 { 2728 struct scsi_qla_host *ha = to_qla_host(shost); 2729 int rval = 0; 2730 struct iscsi_iface_param_info *iface_param = NULL; 2731 struct addr_ctrl_blk *init_fw_cb = NULL; 2732 dma_addr_t init_fw_cb_dma; 2733 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2734 uint32_t mbox_sts[MBOX_REG_COUNT]; 2735 uint32_t rem = len; 2736 struct nlattr *attr; 2737 2738 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2739 sizeof(struct addr_ctrl_blk), 2740 &init_fw_cb_dma, GFP_KERNEL); 2741 if (!init_fw_cb) { 2742 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2743 __func__); 2744 return -ENOMEM; 2745 } 2746 2747 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2748 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2749 2750 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2751 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2752 rval = -EIO; 2753 goto exit_init_fw_cb; 2754 } 2755 2756 nla_for_each_attr(attr, data, len, rem) { 2757 iface_param = nla_data(attr); 2758 2759 if (iface_param->param_type == ISCSI_NET_PARAM) { 2760 switch (iface_param->iface_type) { 2761 case ISCSI_IFACE_TYPE_IPV4: 2762 switch (iface_param->iface_num) { 2763 case 0: 2764 qla4xxx_set_ipv4(ha, iface_param, 2765 init_fw_cb); 2766 break; 2767 default: 2768 /* Cannot have more than one IPv4 interface */ 2769 ql4_printk(KERN_ERR, ha, 2770 "Invalid IPv4 iface number = %d\n", 2771 iface_param->iface_num); 2772 break; 2773 } 2774 break; 2775 case ISCSI_IFACE_TYPE_IPV6: 2776 switch (iface_param->iface_num) { 2777 case 0: 2778 case 1: 2779 qla4xxx_set_ipv6(ha, iface_param, 2780 init_fw_cb); 2781 break; 2782 default: 2783 /* Cannot have more than two IPv6 interface */ 2784 ql4_printk(KERN_ERR, ha, 2785 "Invalid IPv6 iface number = %d\n", 2786 iface_param->iface_num); 2787 break; 2788 } 2789 break; 2790 default: 2791 ql4_printk(KERN_ERR, ha, 2792 "Invalid iface type\n"); 2793 break; 2794 } 2795 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2796 qla4xxx_set_iscsi_param(ha, iface_param, 2797 init_fw_cb); 2798 } else { 2799 continue; 2800 } 2801 } 2802 2803 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2804 2805 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2806 sizeof(struct addr_ctrl_blk), 2807 FLASH_OPT_RMW_COMMIT); 2808 if (rval != QLA_SUCCESS) { 2809 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2810 __func__); 2811 rval = -EIO; 2812 goto exit_init_fw_cb; 2813 } 2814 2815 rval = qla4xxx_disable_acb(ha); 2816 if (rval != QLA_SUCCESS) { 2817 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2818 __func__); 2819 rval = -EIO; 2820 goto exit_init_fw_cb; 2821 } 2822 2823 wait_for_completion_timeout(&ha->disable_acb_comp, 2824 DISABLE_ACB_TOV * HZ); 2825 2826 qla4xxx_initcb_to_acb(init_fw_cb); 2827 2828 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2829 if (rval != QLA_SUCCESS) { 2830 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2831 __func__); 2832 rval = -EIO; 2833 goto exit_init_fw_cb; 2834 } 2835 2836 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2837 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2838 init_fw_cb_dma); 2839 2840 exit_init_fw_cb: 2841 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2842 init_fw_cb, init_fw_cb_dma); 2843 2844 return rval; 2845 } 2846 2847 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2848 enum iscsi_param param, char *buf) 2849 { 2850 struct iscsi_session *sess = cls_sess->dd_data; 2851 struct ddb_entry *ddb_entry = sess->dd_data; 2852 struct scsi_qla_host *ha = ddb_entry->ha; 2853 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2854 struct ql4_chap_table chap_tbl; 2855 int rval, len; 2856 uint16_t idx; 2857 2858 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2859 switch (param) { 2860 case ISCSI_PARAM_CHAP_IN_IDX: 2861 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2862 sess->password_in, BIDI_CHAP, 2863 &idx); 2864 if (rval) 2865 len = sprintf(buf, "\n"); 2866 else 2867 len = sprintf(buf, "%hu\n", idx); 2868 break; 2869 case ISCSI_PARAM_CHAP_OUT_IDX: 2870 if (ddb_entry->ddb_type == FLASH_DDB) { 2871 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2872 idx = ddb_entry->chap_tbl_idx; 2873 rval = QLA_SUCCESS; 2874 } else { 2875 rval = QLA_ERROR; 2876 } 2877 } else { 2878 rval = qla4xxx_get_chap_index(ha, sess->username, 2879 sess->password, 2880 LOCAL_CHAP, &idx); 2881 } 2882 if (rval) 2883 len = sprintf(buf, "\n"); 2884 else 2885 len = sprintf(buf, "%hu\n", idx); 2886 break; 2887 case ISCSI_PARAM_USERNAME: 2888 case ISCSI_PARAM_PASSWORD: 2889 /* First, populate session username and password for FLASH DDB, 2890 * if not already done. This happens when session login fails 2891 * for a FLASH DDB. 2892 */ 2893 if (ddb_entry->ddb_type == FLASH_DDB && 2894 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2895 !sess->username && !sess->password) { 2896 idx = ddb_entry->chap_tbl_idx; 2897 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2898 chap_tbl.secret, 2899 idx); 2900 if (!rval) { 2901 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2902 (char *)chap_tbl.name, 2903 strlen((char *)chap_tbl.name)); 2904 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2905 (char *)chap_tbl.secret, 2906 chap_tbl.secret_len); 2907 } 2908 } 2909 fallthrough; 2910 default: 2911 return iscsi_session_get_param(cls_sess, param, buf); 2912 } 2913 2914 return len; 2915 } 2916 2917 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2918 enum iscsi_param param, char *buf) 2919 { 2920 struct iscsi_conn *conn; 2921 struct qla_conn *qla_conn; 2922 struct sockaddr *dst_addr; 2923 2924 conn = cls_conn->dd_data; 2925 qla_conn = conn->dd_data; 2926 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2927 2928 switch (param) { 2929 case ISCSI_PARAM_CONN_PORT: 2930 case ISCSI_PARAM_CONN_ADDRESS: 2931 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2932 dst_addr, param, buf); 2933 default: 2934 return iscsi_conn_get_param(cls_conn, param, buf); 2935 } 2936 } 2937 2938 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2939 { 2940 uint32_t mbx_sts = 0; 2941 uint16_t tmp_ddb_index; 2942 int ret; 2943 2944 get_ddb_index: 2945 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2946 2947 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2948 DEBUG2(ql4_printk(KERN_INFO, ha, 2949 "Free DDB index not available\n")); 2950 ret = QLA_ERROR; 2951 goto exit_get_ddb_index; 2952 } 2953 2954 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2955 goto get_ddb_index; 2956 2957 DEBUG2(ql4_printk(KERN_INFO, ha, 2958 "Found a free DDB index at %d\n", tmp_ddb_index)); 2959 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2960 if (ret == QLA_ERROR) { 2961 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2962 ql4_printk(KERN_INFO, ha, 2963 "DDB index = %d not available trying next\n", 2964 tmp_ddb_index); 2965 goto get_ddb_index; 2966 } 2967 DEBUG2(ql4_printk(KERN_INFO, ha, 2968 "Free FW DDB not available\n")); 2969 } 2970 2971 *ddb_index = tmp_ddb_index; 2972 2973 exit_get_ddb_index: 2974 return ret; 2975 } 2976 2977 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2978 struct ddb_entry *ddb_entry, 2979 char *existing_ipaddr, 2980 char *user_ipaddr) 2981 { 2982 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2983 char formatted_ipaddr[DDB_IPADDR_LEN]; 2984 int status = QLA_SUCCESS, ret = 0; 2985 2986 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2987 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2988 '\0', NULL); 2989 if (ret == 0) { 2990 status = QLA_ERROR; 2991 goto out_match; 2992 } 2993 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2994 } else { 2995 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2996 '\0', NULL); 2997 if (ret == 0) { 2998 status = QLA_ERROR; 2999 goto out_match; 3000 } 3001 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 3002 } 3003 3004 if (strcmp(existing_ipaddr, formatted_ipaddr)) 3005 status = QLA_ERROR; 3006 3007 out_match: 3008 return status; 3009 } 3010 3011 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 3012 struct iscsi_cls_conn *cls_conn) 3013 { 3014 int idx = 0, max_ddbs, rval; 3015 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3016 struct iscsi_session *sess, *existing_sess; 3017 struct iscsi_conn *conn, *existing_conn; 3018 struct ddb_entry *ddb_entry; 3019 3020 sess = cls_sess->dd_data; 3021 conn = cls_conn->dd_data; 3022 3023 if (sess->targetname == NULL || 3024 conn->persistent_address == NULL || 3025 conn->persistent_port == 0) 3026 return QLA_ERROR; 3027 3028 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 3029 MAX_DEV_DB_ENTRIES; 3030 3031 for (idx = 0; idx < max_ddbs; idx++) { 3032 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 3033 if (ddb_entry == NULL) 3034 continue; 3035 3036 if (ddb_entry->ddb_type != FLASH_DDB) 3037 continue; 3038 3039 existing_sess = ddb_entry->sess->dd_data; 3040 existing_conn = ddb_entry->conn->dd_data; 3041 3042 if (existing_sess->targetname == NULL || 3043 existing_conn->persistent_address == NULL || 3044 existing_conn->persistent_port == 0) 3045 continue; 3046 3047 DEBUG2(ql4_printk(KERN_INFO, ha, 3048 "IQN = %s User IQN = %s\n", 3049 existing_sess->targetname, 3050 sess->targetname)); 3051 3052 DEBUG2(ql4_printk(KERN_INFO, ha, 3053 "IP = %s User IP = %s\n", 3054 existing_conn->persistent_address, 3055 conn->persistent_address)); 3056 3057 DEBUG2(ql4_printk(KERN_INFO, ha, 3058 "Port = %d User Port = %d\n", 3059 existing_conn->persistent_port, 3060 conn->persistent_port)); 3061 3062 if (strcmp(existing_sess->targetname, sess->targetname)) 3063 continue; 3064 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3065 existing_conn->persistent_address, 3066 conn->persistent_address); 3067 if (rval == QLA_ERROR) 3068 continue; 3069 if (existing_conn->persistent_port != conn->persistent_port) 3070 continue; 3071 break; 3072 } 3073 3074 if (idx == max_ddbs) 3075 return QLA_ERROR; 3076 3077 DEBUG2(ql4_printk(KERN_INFO, ha, 3078 "Match found in fwdb sessions\n")); 3079 return QLA_SUCCESS; 3080 } 3081 3082 static struct iscsi_cls_session * 3083 qla4xxx_session_create(struct iscsi_endpoint *ep, 3084 uint16_t cmds_max, uint16_t qdepth, 3085 uint32_t initial_cmdsn) 3086 { 3087 struct iscsi_cls_session *cls_sess; 3088 struct scsi_qla_host *ha; 3089 struct qla_endpoint *qla_ep; 3090 struct ddb_entry *ddb_entry; 3091 uint16_t ddb_index; 3092 struct iscsi_session *sess; 3093 int ret; 3094 3095 if (!ep) { 3096 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3097 return NULL; 3098 } 3099 3100 qla_ep = ep->dd_data; 3101 ha = to_qla_host(qla_ep->host); 3102 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3103 ha->host_no)); 3104 3105 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3106 if (ret == QLA_ERROR) 3107 return NULL; 3108 3109 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3110 cmds_max, sizeof(struct ddb_entry), 3111 sizeof(struct ql4_task_data), 3112 initial_cmdsn, ddb_index); 3113 if (!cls_sess) 3114 return NULL; 3115 3116 sess = cls_sess->dd_data; 3117 ddb_entry = sess->dd_data; 3118 ddb_entry->fw_ddb_index = ddb_index; 3119 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3120 ddb_entry->ha = ha; 3121 ddb_entry->sess = cls_sess; 3122 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3123 ddb_entry->ddb_change = qla4xxx_ddb_change; 3124 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3125 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3126 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3127 ha->tot_ddbs++; 3128 3129 return cls_sess; 3130 } 3131 3132 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3133 { 3134 struct iscsi_session *sess; 3135 struct ddb_entry *ddb_entry; 3136 struct scsi_qla_host *ha; 3137 unsigned long flags, wtime; 3138 struct dev_db_entry *fw_ddb_entry = NULL; 3139 dma_addr_t fw_ddb_entry_dma; 3140 uint32_t ddb_state; 3141 int ret; 3142 3143 sess = cls_sess->dd_data; 3144 ddb_entry = sess->dd_data; 3145 ha = ddb_entry->ha; 3146 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3147 ha->host_no)); 3148 3149 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3150 &fw_ddb_entry_dma, GFP_KERNEL); 3151 if (!fw_ddb_entry) { 3152 ql4_printk(KERN_ERR, ha, 3153 "%s: Unable to allocate dma buffer\n", __func__); 3154 goto destroy_session; 3155 } 3156 3157 wtime = jiffies + (HZ * LOGOUT_TOV); 3158 do { 3159 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3160 fw_ddb_entry, fw_ddb_entry_dma, 3161 NULL, NULL, &ddb_state, NULL, 3162 NULL, NULL); 3163 if (ret == QLA_ERROR) 3164 goto destroy_session; 3165 3166 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3167 (ddb_state == DDB_DS_SESSION_FAILED)) 3168 goto destroy_session; 3169 3170 schedule_timeout_uninterruptible(HZ); 3171 } while ((time_after(wtime, jiffies))); 3172 3173 destroy_session: 3174 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3175 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3176 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3177 spin_lock_irqsave(&ha->hardware_lock, flags); 3178 qla4xxx_free_ddb(ha, ddb_entry); 3179 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3180 3181 iscsi_session_teardown(cls_sess); 3182 3183 if (fw_ddb_entry) 3184 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3185 fw_ddb_entry, fw_ddb_entry_dma); 3186 } 3187 3188 static struct iscsi_cls_conn * 3189 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3190 { 3191 struct iscsi_cls_conn *cls_conn; 3192 struct iscsi_session *sess; 3193 struct ddb_entry *ddb_entry; 3194 struct scsi_qla_host *ha; 3195 3196 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3197 conn_idx); 3198 if (!cls_conn) { 3199 pr_info("%s: Can not create connection for conn_idx = %u\n", 3200 __func__, conn_idx); 3201 return NULL; 3202 } 3203 3204 sess = cls_sess->dd_data; 3205 ddb_entry = sess->dd_data; 3206 ddb_entry->conn = cls_conn; 3207 3208 ha = ddb_entry->ha; 3209 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3210 conn_idx)); 3211 return cls_conn; 3212 } 3213 3214 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3215 struct iscsi_cls_conn *cls_conn, 3216 uint64_t transport_fd, int is_leading) 3217 { 3218 struct iscsi_conn *conn; 3219 struct qla_conn *qla_conn; 3220 struct iscsi_endpoint *ep; 3221 struct ddb_entry *ddb_entry; 3222 struct scsi_qla_host *ha; 3223 struct iscsi_session *sess; 3224 3225 sess = cls_session->dd_data; 3226 ddb_entry = sess->dd_data; 3227 ha = ddb_entry->ha; 3228 3229 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3230 cls_session->sid, cls_conn->cid)); 3231 3232 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3233 return -EINVAL; 3234 ep = iscsi_lookup_endpoint(transport_fd); 3235 if (!ep) 3236 return -EINVAL; 3237 conn = cls_conn->dd_data; 3238 qla_conn = conn->dd_data; 3239 qla_conn->qla_ep = ep->dd_data; 3240 return 0; 3241 } 3242 3243 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3244 { 3245 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3246 struct iscsi_session *sess; 3247 struct ddb_entry *ddb_entry; 3248 struct scsi_qla_host *ha; 3249 struct dev_db_entry *fw_ddb_entry = NULL; 3250 dma_addr_t fw_ddb_entry_dma; 3251 uint32_t mbx_sts = 0; 3252 int ret = 0; 3253 int status = QLA_SUCCESS; 3254 3255 sess = cls_sess->dd_data; 3256 ddb_entry = sess->dd_data; 3257 ha = ddb_entry->ha; 3258 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3259 cls_sess->sid, cls_conn->cid)); 3260 3261 /* Check if we have matching FW DDB, if yes then do not 3262 * login to this target. This could cause target to logout previous 3263 * connection 3264 */ 3265 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3266 if (ret == QLA_SUCCESS) { 3267 ql4_printk(KERN_INFO, ha, 3268 "Session already exist in FW.\n"); 3269 ret = -EEXIST; 3270 goto exit_conn_start; 3271 } 3272 3273 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3274 &fw_ddb_entry_dma, GFP_KERNEL); 3275 if (!fw_ddb_entry) { 3276 ql4_printk(KERN_ERR, ha, 3277 "%s: Unable to allocate dma buffer\n", __func__); 3278 ret = -ENOMEM; 3279 goto exit_conn_start; 3280 } 3281 3282 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3283 if (ret) { 3284 /* If iscsid is stopped and started then no need to do 3285 * set param again since ddb state will be already 3286 * active and FW does not allow set ddb to an 3287 * active session. 3288 */ 3289 if (mbx_sts) 3290 if (ddb_entry->fw_ddb_device_state == 3291 DDB_DS_SESSION_ACTIVE) { 3292 ddb_entry->unblock_sess(ddb_entry->sess); 3293 goto exit_set_param; 3294 } 3295 3296 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3297 __func__, ddb_entry->fw_ddb_index); 3298 goto exit_conn_start; 3299 } 3300 3301 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3302 if (status == QLA_ERROR) { 3303 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3304 sess->targetname); 3305 ret = -EINVAL; 3306 goto exit_conn_start; 3307 } 3308 3309 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3310 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3311 3312 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3313 ddb_entry->fw_ddb_device_state)); 3314 3315 exit_set_param: 3316 ret = 0; 3317 3318 exit_conn_start: 3319 if (fw_ddb_entry) 3320 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3321 fw_ddb_entry, fw_ddb_entry_dma); 3322 return ret; 3323 } 3324 3325 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3326 { 3327 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3328 struct iscsi_session *sess; 3329 struct scsi_qla_host *ha; 3330 struct ddb_entry *ddb_entry; 3331 int options; 3332 3333 sess = cls_sess->dd_data; 3334 ddb_entry = sess->dd_data; 3335 ha = ddb_entry->ha; 3336 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3337 cls_conn->cid)); 3338 3339 options = LOGOUT_OPTION_CLOSE_SESSION; 3340 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3341 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3342 } 3343 3344 static void qla4xxx_task_work(struct work_struct *wdata) 3345 { 3346 struct ql4_task_data *task_data; 3347 struct scsi_qla_host *ha; 3348 struct passthru_status *sts; 3349 struct iscsi_task *task; 3350 struct iscsi_hdr *hdr; 3351 uint8_t *data; 3352 uint32_t data_len; 3353 struct iscsi_conn *conn; 3354 int hdr_len; 3355 itt_t itt; 3356 3357 task_data = container_of(wdata, struct ql4_task_data, task_work); 3358 ha = task_data->ha; 3359 task = task_data->task; 3360 sts = &task_data->sts; 3361 hdr_len = sizeof(struct iscsi_hdr); 3362 3363 DEBUG3(printk(KERN_INFO "Status returned\n")); 3364 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3365 DEBUG3(printk(KERN_INFO "Response buffer")); 3366 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3367 3368 conn = task->conn; 3369 3370 switch (sts->completionStatus) { 3371 case PASSTHRU_STATUS_COMPLETE: 3372 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3373 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3374 itt = sts->handle; 3375 hdr->itt = itt; 3376 data = task_data->resp_buffer + hdr_len; 3377 data_len = task_data->resp_len - hdr_len; 3378 iscsi_complete_pdu(conn, hdr, data, data_len); 3379 break; 3380 default: 3381 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3382 sts->completionStatus); 3383 break; 3384 } 3385 return; 3386 } 3387 3388 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3389 { 3390 struct ql4_task_data *task_data; 3391 struct iscsi_session *sess; 3392 struct ddb_entry *ddb_entry; 3393 struct scsi_qla_host *ha; 3394 int hdr_len; 3395 3396 sess = task->conn->session; 3397 ddb_entry = sess->dd_data; 3398 ha = ddb_entry->ha; 3399 task_data = task->dd_data; 3400 memset(task_data, 0, sizeof(struct ql4_task_data)); 3401 3402 if (task->sc) { 3403 ql4_printk(KERN_INFO, ha, 3404 "%s: SCSI Commands not implemented\n", __func__); 3405 return -EINVAL; 3406 } 3407 3408 hdr_len = sizeof(struct iscsi_hdr); 3409 task_data->ha = ha; 3410 task_data->task = task; 3411 3412 if (task->data_count) { 3413 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3414 task->data_count, 3415 DMA_TO_DEVICE); 3416 } 3417 3418 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3419 __func__, task->conn->max_recv_dlength, hdr_len)); 3420 3421 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3422 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3423 task_data->resp_len, 3424 &task_data->resp_dma, 3425 GFP_ATOMIC); 3426 if (!task_data->resp_buffer) 3427 goto exit_alloc_pdu; 3428 3429 task_data->req_len = task->data_count + hdr_len; 3430 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3431 task_data->req_len, 3432 &task_data->req_dma, 3433 GFP_ATOMIC); 3434 if (!task_data->req_buffer) 3435 goto exit_alloc_pdu; 3436 3437 task->hdr = task_data->req_buffer; 3438 3439 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3440 3441 return 0; 3442 3443 exit_alloc_pdu: 3444 if (task_data->resp_buffer) 3445 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3446 task_data->resp_buffer, task_data->resp_dma); 3447 3448 if (task_data->req_buffer) 3449 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3450 task_data->req_buffer, task_data->req_dma); 3451 return -ENOMEM; 3452 } 3453 3454 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3455 { 3456 struct ql4_task_data *task_data; 3457 struct iscsi_session *sess; 3458 struct ddb_entry *ddb_entry; 3459 struct scsi_qla_host *ha; 3460 int hdr_len; 3461 3462 hdr_len = sizeof(struct iscsi_hdr); 3463 sess = task->conn->session; 3464 ddb_entry = sess->dd_data; 3465 ha = ddb_entry->ha; 3466 task_data = task->dd_data; 3467 3468 if (task->data_count) { 3469 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3470 task->data_count, DMA_TO_DEVICE); 3471 } 3472 3473 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3474 __func__, task->conn->max_recv_dlength, hdr_len)); 3475 3476 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3477 task_data->resp_buffer, task_data->resp_dma); 3478 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3479 task_data->req_buffer, task_data->req_dma); 3480 return; 3481 } 3482 3483 static int qla4xxx_task_xmit(struct iscsi_task *task) 3484 { 3485 struct scsi_cmnd *sc = task->sc; 3486 struct iscsi_session *sess = task->conn->session; 3487 struct ddb_entry *ddb_entry = sess->dd_data; 3488 struct scsi_qla_host *ha = ddb_entry->ha; 3489 3490 if (!sc) 3491 return qla4xxx_send_passthru0(task); 3492 3493 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3494 __func__); 3495 return -ENOSYS; 3496 } 3497 3498 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3499 struct iscsi_bus_flash_conn *conn, 3500 struct dev_db_entry *fw_ddb_entry) 3501 { 3502 unsigned long options = 0; 3503 int rc = 0; 3504 3505 options = le16_to_cpu(fw_ddb_entry->options); 3506 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3507 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3508 rc = iscsi_switch_str_param(&sess->portal_type, 3509 PORTAL_TYPE_IPV6); 3510 if (rc) 3511 goto exit_copy; 3512 } else { 3513 rc = iscsi_switch_str_param(&sess->portal_type, 3514 PORTAL_TYPE_IPV4); 3515 if (rc) 3516 goto exit_copy; 3517 } 3518 3519 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3520 &options); 3521 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3522 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3523 3524 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3525 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3526 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3527 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3528 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3529 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3530 &options); 3531 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3532 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3533 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3534 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3535 &options); 3536 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3537 sess->discovery_auth_optional = 3538 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3539 if (test_bit(ISCSIOPT_ERL1, &options)) 3540 sess->erl |= BIT_1; 3541 if (test_bit(ISCSIOPT_ERL0, &options)) 3542 sess->erl |= BIT_0; 3543 3544 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3545 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3546 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3547 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3548 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3549 conn->tcp_timer_scale |= BIT_3; 3550 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3551 conn->tcp_timer_scale |= BIT_2; 3552 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3553 conn->tcp_timer_scale |= BIT_1; 3554 3555 conn->tcp_timer_scale >>= 1; 3556 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3557 3558 options = le16_to_cpu(fw_ddb_entry->ip_options); 3559 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3560 3561 conn->max_recv_dlength = BYTE_UNITS * 3562 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3563 conn->max_xmit_dlength = BYTE_UNITS * 3564 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3565 sess->first_burst = BYTE_UNITS * 3566 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3567 sess->max_burst = BYTE_UNITS * 3568 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3569 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3570 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3571 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3572 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3573 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3574 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3575 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3576 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3577 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3578 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3579 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3580 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3581 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3582 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3583 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3584 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3585 3586 sess->default_taskmgmt_timeout = 3587 le16_to_cpu(fw_ddb_entry->def_timeout); 3588 conn->port = le16_to_cpu(fw_ddb_entry->port); 3589 3590 options = le16_to_cpu(fw_ddb_entry->options); 3591 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3592 if (!conn->ipaddress) { 3593 rc = -ENOMEM; 3594 goto exit_copy; 3595 } 3596 3597 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3598 if (!conn->redirect_ipaddr) { 3599 rc = -ENOMEM; 3600 goto exit_copy; 3601 } 3602 3603 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3604 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3605 3606 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3607 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3608 3609 conn->link_local_ipv6_addr = kmemdup( 3610 fw_ddb_entry->link_local_ipv6_addr, 3611 IPv6_ADDR_LEN, GFP_KERNEL); 3612 if (!conn->link_local_ipv6_addr) { 3613 rc = -ENOMEM; 3614 goto exit_copy; 3615 } 3616 } else { 3617 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3618 } 3619 3620 if (fw_ddb_entry->iscsi_name[0]) { 3621 rc = iscsi_switch_str_param(&sess->targetname, 3622 (char *)fw_ddb_entry->iscsi_name); 3623 if (rc) 3624 goto exit_copy; 3625 } 3626 3627 if (fw_ddb_entry->iscsi_alias[0]) { 3628 rc = iscsi_switch_str_param(&sess->targetalias, 3629 (char *)fw_ddb_entry->iscsi_alias); 3630 if (rc) 3631 goto exit_copy; 3632 } 3633 3634 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3635 3636 exit_copy: 3637 return rc; 3638 } 3639 3640 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3641 struct iscsi_bus_flash_conn *conn, 3642 struct dev_db_entry *fw_ddb_entry) 3643 { 3644 uint16_t options; 3645 int rc = 0; 3646 3647 options = le16_to_cpu(fw_ddb_entry->options); 3648 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3649 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3650 options |= BIT_8; 3651 else 3652 options &= ~BIT_8; 3653 3654 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3655 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3656 SET_BITVAL(sess->entry_state, options, BIT_3); 3657 fw_ddb_entry->options = cpu_to_le16(options); 3658 3659 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3660 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3661 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3662 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3663 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3664 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3665 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3666 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3667 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3668 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3669 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3670 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3671 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3672 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3673 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3674 3675 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3676 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3677 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3678 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3679 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3680 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3681 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3682 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3683 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3684 3685 options = le16_to_cpu(fw_ddb_entry->ip_options); 3686 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3687 fw_ddb_entry->ip_options = cpu_to_le16(options); 3688 3689 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3690 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3691 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3692 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3693 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3694 fw_ddb_entry->iscsi_first_burst_len = 3695 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3696 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3697 BYTE_UNITS); 3698 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3699 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3700 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3701 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3702 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3703 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3704 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3705 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3706 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3707 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3708 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3709 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3710 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3711 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3712 fw_ddb_entry->port = cpu_to_le16(conn->port); 3713 fw_ddb_entry->def_timeout = 3714 cpu_to_le16(sess->default_taskmgmt_timeout); 3715 3716 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3717 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3718 else 3719 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3720 3721 if (conn->ipaddress) 3722 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3723 sizeof(fw_ddb_entry->ip_addr)); 3724 3725 if (conn->redirect_ipaddr) 3726 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3727 sizeof(fw_ddb_entry->tgt_addr)); 3728 3729 if (conn->link_local_ipv6_addr) 3730 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3731 conn->link_local_ipv6_addr, 3732 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3733 3734 if (sess->targetname) 3735 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3736 sizeof(fw_ddb_entry->iscsi_name)); 3737 3738 if (sess->targetalias) 3739 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3740 sizeof(fw_ddb_entry->iscsi_alias)); 3741 3742 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3743 3744 return rc; 3745 } 3746 3747 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3748 struct iscsi_session *sess, 3749 struct dev_db_entry *fw_ddb_entry) 3750 { 3751 unsigned long options = 0; 3752 uint16_t ddb_link; 3753 uint16_t disc_parent; 3754 char ip_addr[DDB_IPADDR_LEN]; 3755 3756 options = le16_to_cpu(fw_ddb_entry->options); 3757 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3758 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3759 &options); 3760 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3761 3762 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3763 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3764 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3765 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3766 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3767 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3768 &options); 3769 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3770 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3771 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3772 &options); 3773 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3774 sess->discovery_auth_optional = 3775 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3776 if (test_bit(ISCSIOPT_ERL1, &options)) 3777 sess->erl |= BIT_1; 3778 if (test_bit(ISCSIOPT_ERL0, &options)) 3779 sess->erl |= BIT_0; 3780 3781 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3782 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3783 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3784 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3785 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3786 conn->tcp_timer_scale |= BIT_3; 3787 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3788 conn->tcp_timer_scale |= BIT_2; 3789 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3790 conn->tcp_timer_scale |= BIT_1; 3791 3792 conn->tcp_timer_scale >>= 1; 3793 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3794 3795 options = le16_to_cpu(fw_ddb_entry->ip_options); 3796 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3797 3798 conn->max_recv_dlength = BYTE_UNITS * 3799 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3800 conn->max_xmit_dlength = BYTE_UNITS * 3801 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3802 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3803 sess->first_burst = BYTE_UNITS * 3804 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3805 sess->max_burst = BYTE_UNITS * 3806 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3807 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3808 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3809 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3810 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3811 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3812 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3813 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3814 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3815 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3816 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3817 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3818 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3819 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3820 3821 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3822 if (ddb_link == DDB_ISNS) 3823 disc_parent = ISCSI_DISC_PARENT_ISNS; 3824 else if (ddb_link == DDB_NO_LINK) 3825 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3826 else if (ddb_link < MAX_DDB_ENTRIES) 3827 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3828 else 3829 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3830 3831 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3832 iscsi_get_discovery_parent_name(disc_parent), 0); 3833 3834 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3835 (char *)fw_ddb_entry->iscsi_alias, 0); 3836 3837 options = le16_to_cpu(fw_ddb_entry->options); 3838 if (options & DDB_OPT_IPV6_DEVICE) { 3839 memset(ip_addr, 0, sizeof(ip_addr)); 3840 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3841 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3842 (char *)ip_addr, 0); 3843 } 3844 } 3845 3846 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3847 struct dev_db_entry *fw_ddb_entry, 3848 struct iscsi_cls_session *cls_sess, 3849 struct iscsi_cls_conn *cls_conn) 3850 { 3851 int buflen = 0; 3852 struct iscsi_session *sess; 3853 struct ddb_entry *ddb_entry; 3854 struct ql4_chap_table chap_tbl; 3855 struct iscsi_conn *conn; 3856 char ip_addr[DDB_IPADDR_LEN]; 3857 uint16_t options = 0; 3858 3859 sess = cls_sess->dd_data; 3860 ddb_entry = sess->dd_data; 3861 conn = cls_conn->dd_data; 3862 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3863 3864 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3865 3866 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3867 3868 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3869 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3870 3871 memset(ip_addr, 0, sizeof(ip_addr)); 3872 options = le16_to_cpu(fw_ddb_entry->options); 3873 if (options & DDB_OPT_IPV6_DEVICE) { 3874 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3875 3876 memset(ip_addr, 0, sizeof(ip_addr)); 3877 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3878 } else { 3879 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3880 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3881 } 3882 3883 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3884 (char *)ip_addr, buflen); 3885 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3886 (char *)fw_ddb_entry->iscsi_name, buflen); 3887 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3888 (char *)ha->name_string, buflen); 3889 3890 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3891 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3892 chap_tbl.secret, 3893 ddb_entry->chap_tbl_idx)) { 3894 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3895 (char *)chap_tbl.name, 3896 strlen((char *)chap_tbl.name)); 3897 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3898 (char *)chap_tbl.secret, 3899 chap_tbl.secret_len); 3900 } 3901 } 3902 } 3903 3904 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3905 struct ddb_entry *ddb_entry) 3906 { 3907 struct iscsi_cls_session *cls_sess; 3908 struct iscsi_cls_conn *cls_conn; 3909 uint32_t ddb_state; 3910 dma_addr_t fw_ddb_entry_dma; 3911 struct dev_db_entry *fw_ddb_entry; 3912 3913 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3914 &fw_ddb_entry_dma, GFP_KERNEL); 3915 if (!fw_ddb_entry) { 3916 ql4_printk(KERN_ERR, ha, 3917 "%s: Unable to allocate dma buffer\n", __func__); 3918 goto exit_session_conn_fwddb_param; 3919 } 3920 3921 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3922 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3923 NULL, NULL, NULL) == QLA_ERROR) { 3924 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3925 "get_ddb_entry for fw_ddb_index %d\n", 3926 ha->host_no, __func__, 3927 ddb_entry->fw_ddb_index)); 3928 goto exit_session_conn_fwddb_param; 3929 } 3930 3931 cls_sess = ddb_entry->sess; 3932 3933 cls_conn = ddb_entry->conn; 3934 3935 /* Update params */ 3936 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3937 3938 exit_session_conn_fwddb_param: 3939 if (fw_ddb_entry) 3940 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3941 fw_ddb_entry, fw_ddb_entry_dma); 3942 } 3943 3944 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3945 struct ddb_entry *ddb_entry) 3946 { 3947 struct iscsi_cls_session *cls_sess; 3948 struct iscsi_cls_conn *cls_conn; 3949 struct iscsi_session *sess; 3950 struct iscsi_conn *conn; 3951 uint32_t ddb_state; 3952 dma_addr_t fw_ddb_entry_dma; 3953 struct dev_db_entry *fw_ddb_entry; 3954 3955 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3956 &fw_ddb_entry_dma, GFP_KERNEL); 3957 if (!fw_ddb_entry) { 3958 ql4_printk(KERN_ERR, ha, 3959 "%s: Unable to allocate dma buffer\n", __func__); 3960 goto exit_session_conn_param; 3961 } 3962 3963 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3964 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3965 NULL, NULL, NULL) == QLA_ERROR) { 3966 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3967 "get_ddb_entry for fw_ddb_index %d\n", 3968 ha->host_no, __func__, 3969 ddb_entry->fw_ddb_index)); 3970 goto exit_session_conn_param; 3971 } 3972 3973 cls_sess = ddb_entry->sess; 3974 sess = cls_sess->dd_data; 3975 3976 cls_conn = ddb_entry->conn; 3977 conn = cls_conn->dd_data; 3978 3979 /* Update timers after login */ 3980 ddb_entry->default_relogin_timeout = 3981 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3982 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3983 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3984 ddb_entry->default_time2wait = 3985 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3986 3987 /* Update params */ 3988 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3989 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3990 3991 memcpy(sess->initiatorname, ha->name_string, 3992 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3993 3994 exit_session_conn_param: 3995 if (fw_ddb_entry) 3996 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3997 fw_ddb_entry, fw_ddb_entry_dma); 3998 } 3999 4000 /* 4001 * Timer routines 4002 */ 4003 static void qla4xxx_timer(struct timer_list *t); 4004 4005 static void qla4xxx_start_timer(struct scsi_qla_host *ha, 4006 unsigned long interval) 4007 { 4008 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 4009 __func__, ha->host->host_no)); 4010 timer_setup(&ha->timer, qla4xxx_timer, 0); 4011 ha->timer.expires = jiffies + interval * HZ; 4012 add_timer(&ha->timer); 4013 ha->timer_active = 1; 4014 } 4015 4016 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 4017 { 4018 del_timer_sync(&ha->timer); 4019 ha->timer_active = 0; 4020 } 4021 4022 /*** 4023 * qla4xxx_mark_device_missing - blocks the session 4024 * @cls_session: Pointer to the session to be blocked 4025 * @ddb_entry: Pointer to device database entry 4026 * 4027 * This routine marks a device missing and close connection. 4028 **/ 4029 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 4030 { 4031 iscsi_block_session(cls_session); 4032 } 4033 4034 /** 4035 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 4036 * @ha: Pointer to host adapter structure. 4037 * 4038 * This routine marks a device missing and resets the relogin retry count. 4039 **/ 4040 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 4041 { 4042 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 4043 } 4044 4045 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4046 struct ddb_entry *ddb_entry, 4047 struct scsi_cmnd *cmd) 4048 { 4049 struct srb *srb; 4050 4051 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4052 if (!srb) 4053 return srb; 4054 4055 kref_init(&srb->srb_ref); 4056 srb->ha = ha; 4057 srb->ddb = ddb_entry; 4058 srb->cmd = cmd; 4059 srb->flags = 0; 4060 CMD_SP(cmd) = (void *)srb; 4061 4062 return srb; 4063 } 4064 4065 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4066 { 4067 struct scsi_cmnd *cmd = srb->cmd; 4068 4069 if (srb->flags & SRB_DMA_VALID) { 4070 scsi_dma_unmap(cmd); 4071 srb->flags &= ~SRB_DMA_VALID; 4072 } 4073 CMD_SP(cmd) = NULL; 4074 } 4075 4076 void qla4xxx_srb_compl(struct kref *ref) 4077 { 4078 struct srb *srb = container_of(ref, struct srb, srb_ref); 4079 struct scsi_cmnd *cmd = srb->cmd; 4080 struct scsi_qla_host *ha = srb->ha; 4081 4082 qla4xxx_srb_free_dma(ha, srb); 4083 4084 mempool_free(srb, ha->srb_mempool); 4085 4086 cmd->scsi_done(cmd); 4087 } 4088 4089 /** 4090 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4091 * @host: scsi host 4092 * @cmd: Pointer to Linux's SCSI command structure 4093 * 4094 * Remarks: 4095 * This routine is invoked by Linux to send a SCSI command to the driver. 4096 * The mid-level driver tries to ensure that queuecommand never gets 4097 * invoked concurrently with itself or the interrupt handler (although 4098 * the interrupt handler may call this routine as part of request- 4099 * completion handling). Unfortunely, it sometimes calls the scheduler 4100 * in interrupt context which is a big NO! NO!. 4101 **/ 4102 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4103 { 4104 struct scsi_qla_host *ha = to_qla_host(host); 4105 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4106 struct iscsi_cls_session *sess = ddb_entry->sess; 4107 struct srb *srb; 4108 int rval; 4109 4110 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4111 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4112 cmd->result = DID_NO_CONNECT << 16; 4113 else 4114 cmd->result = DID_REQUEUE << 16; 4115 goto qc_fail_command; 4116 } 4117 4118 if (!sess) { 4119 cmd->result = DID_IMM_RETRY << 16; 4120 goto qc_fail_command; 4121 } 4122 4123 rval = iscsi_session_chkready(sess); 4124 if (rval) { 4125 cmd->result = rval; 4126 goto qc_fail_command; 4127 } 4128 4129 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4130 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4131 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4132 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4133 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4134 !test_bit(AF_ONLINE, &ha->flags) || 4135 !test_bit(AF_LINK_UP, &ha->flags) || 4136 test_bit(AF_LOOPBACK, &ha->flags) || 4137 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4138 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4139 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4140 goto qc_host_busy; 4141 4142 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4143 if (!srb) 4144 goto qc_host_busy; 4145 4146 rval = qla4xxx_send_command_to_isp(ha, srb); 4147 if (rval != QLA_SUCCESS) 4148 goto qc_host_busy_free_sp; 4149 4150 return 0; 4151 4152 qc_host_busy_free_sp: 4153 qla4xxx_srb_free_dma(ha, srb); 4154 mempool_free(srb, ha->srb_mempool); 4155 4156 qc_host_busy: 4157 return SCSI_MLQUEUE_HOST_BUSY; 4158 4159 qc_fail_command: 4160 cmd->scsi_done(cmd); 4161 4162 return 0; 4163 } 4164 4165 /** 4166 * qla4xxx_mem_free - frees memory allocated to adapter 4167 * @ha: Pointer to host adapter structure. 4168 * 4169 * Frees memory previously allocated by qla4xxx_mem_alloc 4170 **/ 4171 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4172 { 4173 if (ha->queues) 4174 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4175 ha->queues_dma); 4176 4177 if (ha->fw_dump) 4178 vfree(ha->fw_dump); 4179 4180 ha->queues_len = 0; 4181 ha->queues = NULL; 4182 ha->queues_dma = 0; 4183 ha->request_ring = NULL; 4184 ha->request_dma = 0; 4185 ha->response_ring = NULL; 4186 ha->response_dma = 0; 4187 ha->shadow_regs = NULL; 4188 ha->shadow_regs_dma = 0; 4189 ha->fw_dump = NULL; 4190 ha->fw_dump_size = 0; 4191 4192 /* Free srb pool. */ 4193 mempool_destroy(ha->srb_mempool); 4194 ha->srb_mempool = NULL; 4195 4196 dma_pool_destroy(ha->chap_dma_pool); 4197 4198 if (ha->chap_list) 4199 vfree(ha->chap_list); 4200 ha->chap_list = NULL; 4201 4202 dma_pool_destroy(ha->fw_ddb_dma_pool); 4203 4204 /* release io space registers */ 4205 if (is_qla8022(ha)) { 4206 if (ha->nx_pcibase) 4207 iounmap( 4208 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4209 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4210 if (ha->nx_pcibase) 4211 iounmap( 4212 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4213 } else if (ha->reg) { 4214 iounmap(ha->reg); 4215 } 4216 4217 if (ha->reset_tmplt.buff) 4218 vfree(ha->reset_tmplt.buff); 4219 4220 pci_release_regions(ha->pdev); 4221 } 4222 4223 /** 4224 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4225 * @ha: Pointer to host adapter structure 4226 * 4227 * Allocates DMA memory for request and response queues. Also allocates memory 4228 * for srbs. 4229 **/ 4230 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4231 { 4232 unsigned long align; 4233 4234 /* Allocate contiguous block of DMA memory for queues. */ 4235 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4236 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4237 sizeof(struct shadow_regs) + 4238 MEM_ALIGN_VALUE + 4239 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4240 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4241 &ha->queues_dma, GFP_KERNEL); 4242 if (ha->queues == NULL) { 4243 ql4_printk(KERN_WARNING, ha, 4244 "Memory Allocation failed - queues.\n"); 4245 4246 goto mem_alloc_error_exit; 4247 } 4248 4249 /* 4250 * As per RISC alignment requirements -- the bus-address must be a 4251 * multiple of the request-ring size (in bytes). 4252 */ 4253 align = 0; 4254 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4255 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4256 (MEM_ALIGN_VALUE - 1)); 4257 4258 /* Update request and response queue pointers. */ 4259 ha->request_dma = ha->queues_dma + align; 4260 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4261 ha->response_dma = ha->queues_dma + align + 4262 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4263 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4264 (REQUEST_QUEUE_DEPTH * 4265 QUEUE_SIZE)); 4266 ha->shadow_regs_dma = ha->queues_dma + align + 4267 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4268 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4269 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4270 (REQUEST_QUEUE_DEPTH * 4271 QUEUE_SIZE) + 4272 (RESPONSE_QUEUE_DEPTH * 4273 QUEUE_SIZE)); 4274 4275 /* Allocate memory for srb pool. */ 4276 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4277 mempool_free_slab, srb_cachep); 4278 if (ha->srb_mempool == NULL) { 4279 ql4_printk(KERN_WARNING, ha, 4280 "Memory Allocation failed - SRB Pool.\n"); 4281 4282 goto mem_alloc_error_exit; 4283 } 4284 4285 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4286 CHAP_DMA_BLOCK_SIZE, 8, 0); 4287 4288 if (ha->chap_dma_pool == NULL) { 4289 ql4_printk(KERN_WARNING, ha, 4290 "%s: chap_dma_pool allocation failed..\n", __func__); 4291 goto mem_alloc_error_exit; 4292 } 4293 4294 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4295 DDB_DMA_BLOCK_SIZE, 8, 0); 4296 4297 if (ha->fw_ddb_dma_pool == NULL) { 4298 ql4_printk(KERN_WARNING, ha, 4299 "%s: fw_ddb_dma_pool allocation failed..\n", 4300 __func__); 4301 goto mem_alloc_error_exit; 4302 } 4303 4304 return QLA_SUCCESS; 4305 4306 mem_alloc_error_exit: 4307 return QLA_ERROR; 4308 } 4309 4310 /** 4311 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4312 * @ha: adapter block pointer. 4313 * 4314 * Note: The caller should not hold the idc lock. 4315 **/ 4316 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4317 { 4318 uint32_t temp, temp_state, temp_val; 4319 int status = QLA_SUCCESS; 4320 4321 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4322 4323 temp_state = qla82xx_get_temp_state(temp); 4324 temp_val = qla82xx_get_temp_val(temp); 4325 4326 if (temp_state == QLA82XX_TEMP_PANIC) { 4327 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4328 " exceeds maximum allowed. Hardware has been shut" 4329 " down.\n", temp_val); 4330 status = QLA_ERROR; 4331 } else if (temp_state == QLA82XX_TEMP_WARN) { 4332 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4333 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4334 " degrees C exceeds operating range." 4335 " Immediate action needed.\n", temp_val); 4336 } else { 4337 if (ha->temperature == QLA82XX_TEMP_WARN) 4338 ql4_printk(KERN_INFO, ha, "Device temperature is" 4339 " now %d degrees C in normal range.\n", 4340 temp_val); 4341 } 4342 ha->temperature = temp_state; 4343 return status; 4344 } 4345 4346 /** 4347 * qla4_8xxx_check_fw_alive - Check firmware health 4348 * @ha: Pointer to host adapter structure. 4349 * 4350 * Context: Interrupt 4351 **/ 4352 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4353 { 4354 uint32_t fw_heartbeat_counter; 4355 int status = QLA_SUCCESS; 4356 4357 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4358 QLA8XXX_PEG_ALIVE_COUNTER); 4359 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4360 if (fw_heartbeat_counter == 0xffffffff) { 4361 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4362 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4363 ha->host_no, __func__)); 4364 return status; 4365 } 4366 4367 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4368 ha->seconds_since_last_heartbeat++; 4369 /* FW not alive after 2 seconds */ 4370 if (ha->seconds_since_last_heartbeat == 2) { 4371 ha->seconds_since_last_heartbeat = 0; 4372 qla4_8xxx_dump_peg_reg(ha); 4373 status = QLA_ERROR; 4374 } 4375 } else 4376 ha->seconds_since_last_heartbeat = 0; 4377 4378 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4379 return status; 4380 } 4381 4382 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4383 { 4384 uint32_t halt_status; 4385 int halt_status_unrecoverable = 0; 4386 4387 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4388 4389 if (is_qla8022(ha)) { 4390 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4391 __func__); 4392 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4393 CRB_NIU_XG_PAUSE_CTL_P0 | 4394 CRB_NIU_XG_PAUSE_CTL_P1); 4395 4396 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4397 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4398 __func__); 4399 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4400 halt_status_unrecoverable = 1; 4401 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4402 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4403 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4404 __func__); 4405 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4406 halt_status_unrecoverable = 1; 4407 } 4408 4409 /* 4410 * Since we cannot change dev_state in interrupt context, 4411 * set appropriate DPC flag then wakeup DPC 4412 */ 4413 if (halt_status_unrecoverable) { 4414 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4415 } else { 4416 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4417 __func__); 4418 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4419 } 4420 qla4xxx_mailbox_premature_completion(ha); 4421 qla4xxx_wake_dpc(ha); 4422 } 4423 4424 /** 4425 * qla4_8xxx_watchdog - Poll dev state 4426 * @ha: Pointer to host adapter structure. 4427 * 4428 * Context: Interrupt 4429 **/ 4430 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4431 { 4432 uint32_t dev_state; 4433 uint32_t idc_ctrl; 4434 4435 if (is_qla8032(ha) && 4436 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4437 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4438 __func__, ha->func_num); 4439 4440 /* don't poll if reset is going on */ 4441 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4442 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4443 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4444 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4445 4446 if (qla4_8xxx_check_temp(ha)) { 4447 if (is_qla8022(ha)) { 4448 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4449 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4450 CRB_NIU_XG_PAUSE_CTL_P0 | 4451 CRB_NIU_XG_PAUSE_CTL_P1); 4452 } 4453 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4454 qla4xxx_wake_dpc(ha); 4455 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4456 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4457 4458 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4459 __func__); 4460 4461 if (is_qla8032(ha) || is_qla8042(ha)) { 4462 idc_ctrl = qla4_83xx_rd_reg(ha, 4463 QLA83XX_IDC_DRV_CTRL); 4464 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4465 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4466 __func__); 4467 qla4xxx_mailbox_premature_completion( 4468 ha); 4469 } 4470 } 4471 4472 if ((is_qla8032(ha) || is_qla8042(ha)) || 4473 (is_qla8022(ha) && !ql4xdontresethba)) { 4474 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4475 qla4xxx_wake_dpc(ha); 4476 } 4477 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4478 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4479 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4480 __func__); 4481 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4482 qla4xxx_wake_dpc(ha); 4483 } else { 4484 /* Check firmware health */ 4485 if (qla4_8xxx_check_fw_alive(ha)) 4486 qla4_8xxx_process_fw_error(ha); 4487 } 4488 } 4489 } 4490 4491 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4492 { 4493 struct iscsi_session *sess; 4494 struct ddb_entry *ddb_entry; 4495 struct scsi_qla_host *ha; 4496 4497 sess = cls_sess->dd_data; 4498 ddb_entry = sess->dd_data; 4499 ha = ddb_entry->ha; 4500 4501 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4502 return; 4503 4504 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4505 !iscsi_is_session_online(cls_sess)) { 4506 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4507 INVALID_ENTRY) { 4508 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4509 0) { 4510 atomic_set(&ddb_entry->retry_relogin_timer, 4511 INVALID_ENTRY); 4512 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4513 set_bit(DF_RELOGIN, &ddb_entry->flags); 4514 DEBUG2(ql4_printk(KERN_INFO, ha, 4515 "%s: index [%d] login device\n", 4516 __func__, ddb_entry->fw_ddb_index)); 4517 } else 4518 atomic_dec(&ddb_entry->retry_relogin_timer); 4519 } 4520 } 4521 4522 /* Wait for relogin to timeout */ 4523 if (atomic_read(&ddb_entry->relogin_timer) && 4524 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4525 /* 4526 * If the relogin times out and the device is 4527 * still NOT ONLINE then try and relogin again. 4528 */ 4529 if (!iscsi_is_session_online(cls_sess)) { 4530 /* Reset retry relogin timer */ 4531 atomic_inc(&ddb_entry->relogin_retry_count); 4532 DEBUG2(ql4_printk(KERN_INFO, ha, 4533 "%s: index[%d] relogin timed out-retrying" 4534 " relogin (%d), retry (%d)\n", __func__, 4535 ddb_entry->fw_ddb_index, 4536 atomic_read(&ddb_entry->relogin_retry_count), 4537 ddb_entry->default_time2wait + 4)); 4538 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4539 atomic_set(&ddb_entry->retry_relogin_timer, 4540 ddb_entry->default_time2wait + 4); 4541 } 4542 } 4543 } 4544 4545 /** 4546 * qla4xxx_timer - checks every second for work to do. 4547 * @t: Context to obtain pointer to host adapter structure. 4548 **/ 4549 static void qla4xxx_timer(struct timer_list *t) 4550 { 4551 struct scsi_qla_host *ha = from_timer(ha, t, timer); 4552 int start_dpc = 0; 4553 uint16_t w; 4554 4555 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4556 4557 /* If we are in the middle of AER/EEH processing 4558 * skip any processing and reschedule the timer 4559 */ 4560 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4561 mod_timer(&ha->timer, jiffies + HZ); 4562 return; 4563 } 4564 4565 /* Hardware read to trigger an EEH error during mailbox waits. */ 4566 if (!pci_channel_offline(ha->pdev)) 4567 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4568 4569 if (is_qla80XX(ha)) 4570 qla4_8xxx_watchdog(ha); 4571 4572 if (is_qla40XX(ha)) { 4573 /* Check for heartbeat interval. */ 4574 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4575 ha->heartbeat_interval != 0) { 4576 ha->seconds_since_last_heartbeat++; 4577 if (ha->seconds_since_last_heartbeat > 4578 ha->heartbeat_interval + 2) 4579 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4580 } 4581 } 4582 4583 /* Process any deferred work. */ 4584 if (!list_empty(&ha->work_list)) 4585 start_dpc++; 4586 4587 /* Wakeup the dpc routine for this adapter, if needed. */ 4588 if (start_dpc || 4589 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4590 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4591 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4592 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4593 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4594 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4595 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4596 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4597 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4598 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4599 test_bit(DPC_AEN, &ha->dpc_flags)) { 4600 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4601 " - dpc flags = 0x%lx\n", 4602 ha->host_no, __func__, ha->dpc_flags)); 4603 qla4xxx_wake_dpc(ha); 4604 } 4605 4606 /* Reschedule timer thread to call us back in one second */ 4607 mod_timer(&ha->timer, jiffies + HZ); 4608 4609 DEBUG2(ha->seconds_since_last_intr++); 4610 } 4611 4612 /** 4613 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4614 * @ha: Pointer to host adapter structure. 4615 * 4616 * This routine stalls the driver until all outstanding commands are returned. 4617 * Caller must release the Hardware Lock prior to calling this routine. 4618 **/ 4619 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4620 { 4621 uint32_t index = 0; 4622 unsigned long flags; 4623 struct scsi_cmnd *cmd; 4624 unsigned long wtime; 4625 uint32_t wtmo; 4626 4627 if (is_qla40XX(ha)) 4628 wtmo = WAIT_CMD_TOV; 4629 else 4630 wtmo = ha->nx_reset_timeout / 2; 4631 4632 wtime = jiffies + (wtmo * HZ); 4633 4634 DEBUG2(ql4_printk(KERN_INFO, ha, 4635 "Wait up to %u seconds for cmds to complete\n", 4636 wtmo)); 4637 4638 while (!time_after_eq(jiffies, wtime)) { 4639 spin_lock_irqsave(&ha->hardware_lock, flags); 4640 /* Find a command that hasn't completed. */ 4641 for (index = 0; index < ha->host->can_queue; index++) { 4642 cmd = scsi_host_find_tag(ha->host, index); 4643 /* 4644 * We cannot just check if the index is valid, 4645 * becase if we are run from the scsi eh, then 4646 * the scsi/block layer is going to prevent 4647 * the tag from being released. 4648 */ 4649 if (cmd != NULL && CMD_SP(cmd)) 4650 break; 4651 } 4652 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4653 4654 /* If No Commands are pending, wait is complete */ 4655 if (index == ha->host->can_queue) 4656 return QLA_SUCCESS; 4657 4658 msleep(1000); 4659 } 4660 /* If we timed out on waiting for commands to come back 4661 * return ERROR. */ 4662 return QLA_ERROR; 4663 } 4664 4665 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4666 { 4667 uint32_t ctrl_status; 4668 unsigned long flags = 0; 4669 4670 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4671 4672 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4673 return QLA_ERROR; 4674 4675 spin_lock_irqsave(&ha->hardware_lock, flags); 4676 4677 /* 4678 * If the SCSI Reset Interrupt bit is set, clear it. 4679 * Otherwise, the Soft Reset won't work. 4680 */ 4681 ctrl_status = readw(&ha->reg->ctrl_status); 4682 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4683 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4684 4685 /* Issue Soft Reset */ 4686 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4687 readl(&ha->reg->ctrl_status); 4688 4689 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4690 return QLA_SUCCESS; 4691 } 4692 4693 /** 4694 * qla4xxx_soft_reset - performs soft reset. 4695 * @ha: Pointer to host adapter structure. 4696 **/ 4697 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4698 { 4699 uint32_t max_wait_time; 4700 unsigned long flags = 0; 4701 int status; 4702 uint32_t ctrl_status; 4703 4704 status = qla4xxx_hw_reset(ha); 4705 if (status != QLA_SUCCESS) 4706 return status; 4707 4708 status = QLA_ERROR; 4709 /* Wait until the Network Reset Intr bit is cleared */ 4710 max_wait_time = RESET_INTR_TOV; 4711 do { 4712 spin_lock_irqsave(&ha->hardware_lock, flags); 4713 ctrl_status = readw(&ha->reg->ctrl_status); 4714 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4715 4716 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4717 break; 4718 4719 msleep(1000); 4720 } while ((--max_wait_time)); 4721 4722 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4723 DEBUG2(printk(KERN_WARNING 4724 "scsi%ld: Network Reset Intr not cleared by " 4725 "Network function, clearing it now!\n", 4726 ha->host_no)); 4727 spin_lock_irqsave(&ha->hardware_lock, flags); 4728 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4729 readl(&ha->reg->ctrl_status); 4730 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4731 } 4732 4733 /* Wait until the firmware tells us the Soft Reset is done */ 4734 max_wait_time = SOFT_RESET_TOV; 4735 do { 4736 spin_lock_irqsave(&ha->hardware_lock, flags); 4737 ctrl_status = readw(&ha->reg->ctrl_status); 4738 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4739 4740 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4741 status = QLA_SUCCESS; 4742 break; 4743 } 4744 4745 msleep(1000); 4746 } while ((--max_wait_time)); 4747 4748 /* 4749 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4750 * after the soft reset has taken place. 4751 */ 4752 spin_lock_irqsave(&ha->hardware_lock, flags); 4753 ctrl_status = readw(&ha->reg->ctrl_status); 4754 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4755 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4756 readl(&ha->reg->ctrl_status); 4757 } 4758 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4759 4760 /* If soft reset fails then most probably the bios on other 4761 * function is also enabled. 4762 * Since the initialization is sequential the other fn 4763 * wont be able to acknowledge the soft reset. 4764 * Issue a force soft reset to workaround this scenario. 4765 */ 4766 if (max_wait_time == 0) { 4767 /* Issue Force Soft Reset */ 4768 spin_lock_irqsave(&ha->hardware_lock, flags); 4769 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4770 readl(&ha->reg->ctrl_status); 4771 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4772 /* Wait until the firmware tells us the Soft Reset is done */ 4773 max_wait_time = SOFT_RESET_TOV; 4774 do { 4775 spin_lock_irqsave(&ha->hardware_lock, flags); 4776 ctrl_status = readw(&ha->reg->ctrl_status); 4777 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4778 4779 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4780 status = QLA_SUCCESS; 4781 break; 4782 } 4783 4784 msleep(1000); 4785 } while ((--max_wait_time)); 4786 } 4787 4788 return status; 4789 } 4790 4791 /** 4792 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4793 * @ha: Pointer to host adapter structure. 4794 * @res: returned scsi status 4795 * 4796 * This routine is called just prior to a HARD RESET to return all 4797 * outstanding commands back to the Operating System. 4798 * Caller should make sure that the following locks are released 4799 * before this calling routine: Hardware lock, and io_request_lock. 4800 **/ 4801 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4802 { 4803 struct srb *srb; 4804 int i; 4805 unsigned long flags; 4806 4807 spin_lock_irqsave(&ha->hardware_lock, flags); 4808 for (i = 0; i < ha->host->can_queue; i++) { 4809 srb = qla4xxx_del_from_active_array(ha, i); 4810 if (srb != NULL) { 4811 srb->cmd->result = res; 4812 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4813 } 4814 } 4815 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4816 } 4817 4818 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4819 { 4820 clear_bit(AF_ONLINE, &ha->flags); 4821 4822 /* Disable the board */ 4823 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4824 4825 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4826 qla4xxx_mark_all_devices_missing(ha); 4827 clear_bit(AF_INIT_DONE, &ha->flags); 4828 } 4829 4830 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4831 { 4832 struct iscsi_session *sess; 4833 struct ddb_entry *ddb_entry; 4834 4835 sess = cls_session->dd_data; 4836 ddb_entry = sess->dd_data; 4837 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4838 4839 if (ddb_entry->ddb_type == FLASH_DDB) 4840 iscsi_block_session(ddb_entry->sess); 4841 else 4842 iscsi_session_failure(cls_session->dd_data, 4843 ISCSI_ERR_CONN_FAILED); 4844 } 4845 4846 /** 4847 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4848 * @ha: Pointer to host adapter structure. 4849 **/ 4850 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4851 { 4852 int status = QLA_ERROR; 4853 uint8_t reset_chip = 0; 4854 uint32_t dev_state; 4855 unsigned long wait; 4856 4857 /* Stall incoming I/O until we are done */ 4858 scsi_block_requests(ha->host); 4859 clear_bit(AF_ONLINE, &ha->flags); 4860 clear_bit(AF_LINK_UP, &ha->flags); 4861 4862 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4863 4864 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4865 4866 if ((is_qla8032(ha) || is_qla8042(ha)) && 4867 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4868 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4869 __func__); 4870 /* disable pause frame for ISP83xx */ 4871 qla4_83xx_disable_pause(ha); 4872 } 4873 4874 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4875 4876 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4877 reset_chip = 1; 4878 4879 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4880 * do not reset adapter, jump to initialize_adapter */ 4881 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4882 status = QLA_SUCCESS; 4883 goto recover_ha_init_adapter; 4884 } 4885 4886 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4887 * from eh_host_reset or ioctl module */ 4888 if (is_qla80XX(ha) && !reset_chip && 4889 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4890 4891 DEBUG2(ql4_printk(KERN_INFO, ha, 4892 "scsi%ld: %s - Performing stop_firmware...\n", 4893 ha->host_no, __func__)); 4894 status = ha->isp_ops->reset_firmware(ha); 4895 if (status == QLA_SUCCESS) { 4896 ha->isp_ops->disable_intrs(ha); 4897 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4898 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4899 } else { 4900 /* If the stop_firmware fails then 4901 * reset the entire chip */ 4902 reset_chip = 1; 4903 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4904 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4905 } 4906 } 4907 4908 /* Issue full chip reset if recovering from a catastrophic error, 4909 * or if stop_firmware fails for ISP-8xxx. 4910 * This is the default case for ISP-4xxx */ 4911 if (is_qla40XX(ha) || reset_chip) { 4912 if (is_qla40XX(ha)) 4913 goto chip_reset; 4914 4915 /* Check if 8XXX firmware is alive or not 4916 * We may have arrived here from NEED_RESET 4917 * detection only */ 4918 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4919 goto chip_reset; 4920 4921 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4922 while (time_before(jiffies, wait)) { 4923 if (qla4_8xxx_check_fw_alive(ha)) { 4924 qla4xxx_mailbox_premature_completion(ha); 4925 break; 4926 } 4927 4928 set_current_state(TASK_UNINTERRUPTIBLE); 4929 schedule_timeout(HZ); 4930 } 4931 chip_reset: 4932 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4933 qla4xxx_cmd_wait(ha); 4934 4935 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4936 DEBUG2(ql4_printk(KERN_INFO, ha, 4937 "scsi%ld: %s - Performing chip reset..\n", 4938 ha->host_no, __func__)); 4939 status = ha->isp_ops->reset_chip(ha); 4940 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4941 } 4942 4943 /* Flush any pending ddb changed AENs */ 4944 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4945 4946 recover_ha_init_adapter: 4947 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4948 if (status == QLA_SUCCESS) { 4949 /* For ISP-4xxx, force function 1 to always initialize 4950 * before function 3 to prevent both funcions from 4951 * stepping on top of the other */ 4952 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4953 ssleep(6); 4954 4955 /* NOTE: AF_ONLINE flag set upon successful completion of 4956 * qla4xxx_initialize_adapter */ 4957 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4958 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4959 status = qla4_8xxx_check_init_adapter_retry(ha); 4960 if (status == QLA_ERROR) { 4961 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4962 ha->host_no, __func__); 4963 qla4xxx_dead_adapter_cleanup(ha); 4964 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4965 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4966 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4967 &ha->dpc_flags); 4968 goto exit_recover; 4969 } 4970 } 4971 } 4972 4973 /* Retry failed adapter initialization, if necessary 4974 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4975 * case to prevent ping-pong resets between functions */ 4976 if (!test_bit(AF_ONLINE, &ha->flags) && 4977 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4978 /* Adapter initialization failed, see if we can retry 4979 * resetting the ha. 4980 * Since we don't want to block the DPC for too long 4981 * with multiple resets in the same thread, 4982 * utilize DPC to retry */ 4983 if (is_qla80XX(ha)) { 4984 ha->isp_ops->idc_lock(ha); 4985 dev_state = qla4_8xxx_rd_direct(ha, 4986 QLA8XXX_CRB_DEV_STATE); 4987 ha->isp_ops->idc_unlock(ha); 4988 if (dev_state == QLA8XXX_DEV_FAILED) { 4989 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4990 "recover adapter. H/W is in Failed " 4991 "state\n", __func__); 4992 qla4xxx_dead_adapter_cleanup(ha); 4993 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4994 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4995 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4996 &ha->dpc_flags); 4997 status = QLA_ERROR; 4998 4999 goto exit_recover; 5000 } 5001 } 5002 5003 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 5004 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 5005 DEBUG2(printk("scsi%ld: recover adapter - retrying " 5006 "(%d) more times\n", ha->host_no, 5007 ha->retry_reset_ha_cnt)); 5008 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5009 status = QLA_ERROR; 5010 } else { 5011 if (ha->retry_reset_ha_cnt > 0) { 5012 /* Schedule another Reset HA--DPC will retry */ 5013 ha->retry_reset_ha_cnt--; 5014 DEBUG2(printk("scsi%ld: recover adapter - " 5015 "retry remaining %d\n", 5016 ha->host_no, 5017 ha->retry_reset_ha_cnt)); 5018 status = QLA_ERROR; 5019 } 5020 5021 if (ha->retry_reset_ha_cnt == 0) { 5022 /* Recover adapter retries have been exhausted. 5023 * Adapter DEAD */ 5024 DEBUG2(printk("scsi%ld: recover adapter " 5025 "failed - board disabled\n", 5026 ha->host_no)); 5027 qla4xxx_dead_adapter_cleanup(ha); 5028 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5029 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5030 clear_bit(DPC_RESET_HA_FW_CONTEXT, 5031 &ha->dpc_flags); 5032 status = QLA_ERROR; 5033 } 5034 } 5035 } else { 5036 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5037 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5038 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5039 } 5040 5041 exit_recover: 5042 ha->adapter_error_count++; 5043 5044 if (test_bit(AF_ONLINE, &ha->flags)) 5045 ha->isp_ops->enable_intrs(ha); 5046 5047 scsi_unblock_requests(ha->host); 5048 5049 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5050 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5051 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5052 5053 return status; 5054 } 5055 5056 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5057 { 5058 struct iscsi_session *sess; 5059 struct ddb_entry *ddb_entry; 5060 struct scsi_qla_host *ha; 5061 5062 sess = cls_session->dd_data; 5063 ddb_entry = sess->dd_data; 5064 ha = ddb_entry->ha; 5065 if (!iscsi_is_session_online(cls_session)) { 5066 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5067 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5068 " unblock session\n", ha->host_no, __func__, 5069 ddb_entry->fw_ddb_index); 5070 iscsi_unblock_session(ddb_entry->sess); 5071 } else { 5072 /* Trigger relogin */ 5073 if (ddb_entry->ddb_type == FLASH_DDB) { 5074 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5075 test_bit(DF_DISABLE_RELOGIN, 5076 &ddb_entry->flags))) 5077 qla4xxx_arm_relogin_timer(ddb_entry); 5078 } else 5079 iscsi_session_failure(cls_session->dd_data, 5080 ISCSI_ERR_CONN_FAILED); 5081 } 5082 } 5083 } 5084 5085 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5086 { 5087 struct iscsi_session *sess; 5088 struct ddb_entry *ddb_entry; 5089 struct scsi_qla_host *ha; 5090 5091 sess = cls_session->dd_data; 5092 ddb_entry = sess->dd_data; 5093 ha = ddb_entry->ha; 5094 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5095 " unblock session\n", ha->host_no, __func__, 5096 ddb_entry->fw_ddb_index); 5097 5098 iscsi_unblock_session(ddb_entry->sess); 5099 5100 /* Start scan target */ 5101 if (test_bit(AF_ONLINE, &ha->flags)) { 5102 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5103 " start scan\n", ha->host_no, __func__, 5104 ddb_entry->fw_ddb_index); 5105 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5106 } 5107 return QLA_SUCCESS; 5108 } 5109 5110 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5111 { 5112 struct iscsi_session *sess; 5113 struct ddb_entry *ddb_entry; 5114 struct scsi_qla_host *ha; 5115 int status = QLA_SUCCESS; 5116 5117 sess = cls_session->dd_data; 5118 ddb_entry = sess->dd_data; 5119 ha = ddb_entry->ha; 5120 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5121 " unblock user space session\n", ha->host_no, __func__, 5122 ddb_entry->fw_ddb_index); 5123 5124 if (!iscsi_is_session_online(cls_session)) { 5125 iscsi_conn_start(ddb_entry->conn); 5126 iscsi_conn_login_event(ddb_entry->conn, 5127 ISCSI_CONN_STATE_LOGGED_IN); 5128 } else { 5129 ql4_printk(KERN_INFO, ha, 5130 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5131 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5132 cls_session->sid); 5133 status = QLA_ERROR; 5134 } 5135 5136 return status; 5137 } 5138 5139 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5140 { 5141 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5142 } 5143 5144 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5145 { 5146 uint16_t relogin_timer; 5147 struct iscsi_session *sess; 5148 struct ddb_entry *ddb_entry; 5149 struct scsi_qla_host *ha; 5150 5151 sess = cls_sess->dd_data; 5152 ddb_entry = sess->dd_data; 5153 ha = ddb_entry->ha; 5154 5155 relogin_timer = max(ddb_entry->default_relogin_timeout, 5156 (uint16_t)RELOGIN_TOV); 5157 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5158 5159 DEBUG2(ql4_printk(KERN_INFO, ha, 5160 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5161 ddb_entry->fw_ddb_index, relogin_timer)); 5162 5163 qla4xxx_login_flash_ddb(cls_sess); 5164 } 5165 5166 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5167 { 5168 struct iscsi_session *sess; 5169 struct ddb_entry *ddb_entry; 5170 struct scsi_qla_host *ha; 5171 5172 sess = cls_sess->dd_data; 5173 ddb_entry = sess->dd_data; 5174 ha = ddb_entry->ha; 5175 5176 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5177 return; 5178 5179 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5180 return; 5181 5182 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5183 !iscsi_is_session_online(cls_sess)) { 5184 DEBUG2(ql4_printk(KERN_INFO, ha, 5185 "relogin issued\n")); 5186 qla4xxx_relogin_flash_ddb(cls_sess); 5187 } 5188 } 5189 5190 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5191 { 5192 if (ha->dpc_thread) 5193 queue_work(ha->dpc_thread, &ha->dpc_work); 5194 } 5195 5196 static struct qla4_work_evt * 5197 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5198 enum qla4_work_type type) 5199 { 5200 struct qla4_work_evt *e; 5201 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5202 5203 e = kzalloc(size, GFP_ATOMIC); 5204 if (!e) 5205 return NULL; 5206 5207 INIT_LIST_HEAD(&e->list); 5208 e->type = type; 5209 return e; 5210 } 5211 5212 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5213 struct qla4_work_evt *e) 5214 { 5215 unsigned long flags; 5216 5217 spin_lock_irqsave(&ha->work_lock, flags); 5218 list_add_tail(&e->list, &ha->work_list); 5219 spin_unlock_irqrestore(&ha->work_lock, flags); 5220 qla4xxx_wake_dpc(ha); 5221 } 5222 5223 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5224 enum iscsi_host_event_code aen_code, 5225 uint32_t data_size, uint8_t *data) 5226 { 5227 struct qla4_work_evt *e; 5228 5229 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5230 if (!e) 5231 return QLA_ERROR; 5232 5233 e->u.aen.code = aen_code; 5234 e->u.aen.data_size = data_size; 5235 memcpy(e->u.aen.data, data, data_size); 5236 5237 qla4xxx_post_work(ha, e); 5238 5239 return QLA_SUCCESS; 5240 } 5241 5242 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5243 uint32_t status, uint32_t pid, 5244 uint32_t data_size, uint8_t *data) 5245 { 5246 struct qla4_work_evt *e; 5247 5248 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5249 if (!e) 5250 return QLA_ERROR; 5251 5252 e->u.ping.status = status; 5253 e->u.ping.pid = pid; 5254 e->u.ping.data_size = data_size; 5255 memcpy(e->u.ping.data, data, data_size); 5256 5257 qla4xxx_post_work(ha, e); 5258 5259 return QLA_SUCCESS; 5260 } 5261 5262 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5263 { 5264 struct qla4_work_evt *e, *tmp; 5265 unsigned long flags; 5266 LIST_HEAD(work); 5267 5268 spin_lock_irqsave(&ha->work_lock, flags); 5269 list_splice_init(&ha->work_list, &work); 5270 spin_unlock_irqrestore(&ha->work_lock, flags); 5271 5272 list_for_each_entry_safe(e, tmp, &work, list) { 5273 list_del_init(&e->list); 5274 5275 switch (e->type) { 5276 case QLA4_EVENT_AEN: 5277 iscsi_post_host_event(ha->host_no, 5278 &qla4xxx_iscsi_transport, 5279 e->u.aen.code, 5280 e->u.aen.data_size, 5281 e->u.aen.data); 5282 break; 5283 case QLA4_EVENT_PING_STATUS: 5284 iscsi_ping_comp_event(ha->host_no, 5285 &qla4xxx_iscsi_transport, 5286 e->u.ping.status, 5287 e->u.ping.pid, 5288 e->u.ping.data_size, 5289 e->u.ping.data); 5290 break; 5291 default: 5292 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5293 "supported", e->type); 5294 } 5295 kfree(e); 5296 } 5297 } 5298 5299 /** 5300 * qla4xxx_do_dpc - dpc routine 5301 * @work: Context to obtain pointer to host adapter structure. 5302 * 5303 * This routine is a task that is schedule by the interrupt handler 5304 * to perform the background processing for interrupts. We put it 5305 * on a task queue that is consumed whenever the scheduler runs; that's 5306 * so you can do anything (i.e. put the process to sleep etc). In fact, 5307 * the mid-level tries to sleep when it reaches the driver threshold 5308 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5309 **/ 5310 static void qla4xxx_do_dpc(struct work_struct *work) 5311 { 5312 struct scsi_qla_host *ha = 5313 container_of(work, struct scsi_qla_host, dpc_work); 5314 int status = QLA_ERROR; 5315 5316 DEBUG2(ql4_printk(KERN_INFO, ha, 5317 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5318 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5319 5320 /* Initialization not yet finished. Don't do anything yet. */ 5321 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5322 return; 5323 5324 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5325 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5326 ha->host_no, __func__, ha->flags)); 5327 return; 5328 } 5329 5330 /* post events to application */ 5331 qla4xxx_do_work(ha); 5332 5333 if (is_qla80XX(ha)) { 5334 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5335 if (is_qla8032(ha) || is_qla8042(ha)) { 5336 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5337 __func__); 5338 /* disable pause frame for ISP83xx */ 5339 qla4_83xx_disable_pause(ha); 5340 } 5341 5342 ha->isp_ops->idc_lock(ha); 5343 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5344 QLA8XXX_DEV_FAILED); 5345 ha->isp_ops->idc_unlock(ha); 5346 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5347 qla4_8xxx_device_state_handler(ha); 5348 } 5349 5350 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5351 if (is_qla8042(ha)) { 5352 if (ha->idc_info.info2 & 5353 ENABLE_INTERNAL_LOOPBACK) { 5354 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5355 __func__); 5356 status = qla4_84xx_config_acb(ha, 5357 ACB_CONFIG_DISABLE); 5358 if (status != QLA_SUCCESS) { 5359 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5360 __func__); 5361 } 5362 } 5363 } 5364 qla4_83xx_post_idc_ack(ha); 5365 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5366 } 5367 5368 if (is_qla8042(ha) && 5369 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5370 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5371 __func__); 5372 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5373 QLA_SUCCESS) { 5374 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5375 __func__); 5376 } 5377 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5378 } 5379 5380 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5381 qla4_8xxx_need_qsnt_handler(ha); 5382 } 5383 } 5384 5385 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5386 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5387 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5388 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5389 if ((is_qla8022(ha) && ql4xdontresethba) || 5390 ((is_qla8032(ha) || is_qla8042(ha)) && 5391 qla4_83xx_idc_dontreset(ha))) { 5392 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5393 ha->host_no, __func__)); 5394 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5395 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5396 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5397 goto dpc_post_reset_ha; 5398 } 5399 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5400 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5401 qla4xxx_recover_adapter(ha); 5402 5403 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5404 uint8_t wait_time = RESET_INTR_TOV; 5405 5406 while ((readw(&ha->reg->ctrl_status) & 5407 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5408 if (--wait_time == 0) 5409 break; 5410 msleep(1000); 5411 } 5412 if (wait_time == 0) 5413 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5414 "bit not cleared-- resetting\n", 5415 ha->host_no, __func__)); 5416 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5417 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5418 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5419 status = qla4xxx_recover_adapter(ha); 5420 } 5421 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5422 if (status == QLA_SUCCESS) 5423 ha->isp_ops->enable_intrs(ha); 5424 } 5425 } 5426 5427 dpc_post_reset_ha: 5428 /* ---- process AEN? --- */ 5429 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5430 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5431 5432 /* ---- Get DHCP IP Address? --- */ 5433 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5434 qla4xxx_get_dhcp_ip_address(ha); 5435 5436 /* ---- relogin device? --- */ 5437 if (adapter_up(ha) && 5438 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5439 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5440 } 5441 5442 /* ---- link change? --- */ 5443 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5444 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5445 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5446 /* ---- link down? --- */ 5447 qla4xxx_mark_all_devices_missing(ha); 5448 } else { 5449 /* ---- link up? --- * 5450 * F/W will auto login to all devices ONLY ONCE after 5451 * link up during driver initialization and runtime 5452 * fatal error recovery. Therefore, the driver must 5453 * manually relogin to devices when recovering from 5454 * connection failures, logouts, expired KATO, etc. */ 5455 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5456 qla4xxx_build_ddb_list(ha, ha->is_reset); 5457 iscsi_host_for_each_session(ha->host, 5458 qla4xxx_login_flash_ddb); 5459 } else 5460 qla4xxx_relogin_all_devices(ha); 5461 } 5462 } 5463 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5464 if (qla4xxx_sysfs_ddb_export(ha)) 5465 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5466 __func__); 5467 } 5468 } 5469 5470 /** 5471 * qla4xxx_free_adapter - release the adapter 5472 * @ha: pointer to adapter structure 5473 **/ 5474 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5475 { 5476 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5477 5478 /* Turn-off interrupts on the card. */ 5479 ha->isp_ops->disable_intrs(ha); 5480 5481 if (is_qla40XX(ha)) { 5482 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5483 &ha->reg->ctrl_status); 5484 readl(&ha->reg->ctrl_status); 5485 } else if (is_qla8022(ha)) { 5486 writel(0, &ha->qla4_82xx_reg->host_int); 5487 readl(&ha->qla4_82xx_reg->host_int); 5488 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5489 writel(0, &ha->qla4_83xx_reg->risc_intr); 5490 readl(&ha->qla4_83xx_reg->risc_intr); 5491 } 5492 5493 /* Remove timer thread, if present */ 5494 if (ha->timer_active) 5495 qla4xxx_stop_timer(ha); 5496 5497 /* Kill the kernel thread for this host */ 5498 if (ha->dpc_thread) 5499 destroy_workqueue(ha->dpc_thread); 5500 5501 /* Kill the kernel thread for this host */ 5502 if (ha->task_wq) 5503 destroy_workqueue(ha->task_wq); 5504 5505 /* Put firmware in known state */ 5506 ha->isp_ops->reset_firmware(ha); 5507 5508 if (is_qla80XX(ha)) { 5509 ha->isp_ops->idc_lock(ha); 5510 qla4_8xxx_clear_drv_active(ha); 5511 ha->isp_ops->idc_unlock(ha); 5512 } 5513 5514 /* Detach interrupts */ 5515 qla4xxx_free_irqs(ha); 5516 5517 /* free extra memory */ 5518 qla4xxx_mem_free(ha); 5519 } 5520 5521 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5522 { 5523 int status = 0; 5524 unsigned long mem_base, mem_len; 5525 struct pci_dev *pdev = ha->pdev; 5526 5527 status = pci_request_regions(pdev, DRIVER_NAME); 5528 if (status) { 5529 printk(KERN_WARNING 5530 "scsi(%ld) Failed to reserve PIO regions (%s) " 5531 "status=%d\n", ha->host_no, pci_name(pdev), status); 5532 goto iospace_error_exit; 5533 } 5534 5535 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5536 __func__, pdev->revision)); 5537 ha->revision_id = pdev->revision; 5538 5539 /* remap phys address */ 5540 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5541 mem_len = pci_resource_len(pdev, 0); 5542 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5543 __func__, mem_base, mem_len)); 5544 5545 /* mapping of pcibase pointer */ 5546 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5547 if (!ha->nx_pcibase) { 5548 printk(KERN_ERR 5549 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5550 pci_release_regions(ha->pdev); 5551 goto iospace_error_exit; 5552 } 5553 5554 /* Mapping of IO base pointer, door bell read and write pointer */ 5555 5556 /* mapping of IO base pointer */ 5557 if (is_qla8022(ha)) { 5558 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5559 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5560 (ha->pdev->devfn << 11)); 5561 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5562 QLA82XX_CAM_RAM_DB2); 5563 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5564 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5565 ((uint8_t *)ha->nx_pcibase); 5566 } 5567 5568 return 0; 5569 iospace_error_exit: 5570 return -ENOMEM; 5571 } 5572 5573 /*** 5574 * qla4xxx_iospace_config - maps registers 5575 * @ha: pointer to adapter structure 5576 * 5577 * This routines maps HBA's registers from the pci address space 5578 * into the kernel virtual address space for memory mapped i/o. 5579 **/ 5580 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5581 { 5582 unsigned long pio, pio_len, pio_flags; 5583 unsigned long mmio, mmio_len, mmio_flags; 5584 5585 pio = pci_resource_start(ha->pdev, 0); 5586 pio_len = pci_resource_len(ha->pdev, 0); 5587 pio_flags = pci_resource_flags(ha->pdev, 0); 5588 if (pio_flags & IORESOURCE_IO) { 5589 if (pio_len < MIN_IOBASE_LEN) { 5590 ql4_printk(KERN_WARNING, ha, 5591 "Invalid PCI I/O region size\n"); 5592 pio = 0; 5593 } 5594 } else { 5595 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5596 pio = 0; 5597 } 5598 5599 /* Use MMIO operations for all accesses. */ 5600 mmio = pci_resource_start(ha->pdev, 1); 5601 mmio_len = pci_resource_len(ha->pdev, 1); 5602 mmio_flags = pci_resource_flags(ha->pdev, 1); 5603 5604 if (!(mmio_flags & IORESOURCE_MEM)) { 5605 ql4_printk(KERN_ERR, ha, 5606 "region #0 not an MMIO resource, aborting\n"); 5607 5608 goto iospace_error_exit; 5609 } 5610 5611 if (mmio_len < MIN_IOBASE_LEN) { 5612 ql4_printk(KERN_ERR, ha, 5613 "Invalid PCI mem region size, aborting\n"); 5614 goto iospace_error_exit; 5615 } 5616 5617 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5618 ql4_printk(KERN_WARNING, ha, 5619 "Failed to reserve PIO/MMIO regions\n"); 5620 5621 goto iospace_error_exit; 5622 } 5623 5624 ha->pio_address = pio; 5625 ha->pio_length = pio_len; 5626 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5627 if (!ha->reg) { 5628 ql4_printk(KERN_ERR, ha, 5629 "cannot remap MMIO, aborting\n"); 5630 5631 goto iospace_error_exit; 5632 } 5633 5634 return 0; 5635 5636 iospace_error_exit: 5637 return -ENOMEM; 5638 } 5639 5640 static struct isp_operations qla4xxx_isp_ops = { 5641 .iospace_config = qla4xxx_iospace_config, 5642 .pci_config = qla4xxx_pci_config, 5643 .disable_intrs = qla4xxx_disable_intrs, 5644 .enable_intrs = qla4xxx_enable_intrs, 5645 .start_firmware = qla4xxx_start_firmware, 5646 .intr_handler = qla4xxx_intr_handler, 5647 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5648 .reset_chip = qla4xxx_soft_reset, 5649 .reset_firmware = qla4xxx_hw_reset, 5650 .queue_iocb = qla4xxx_queue_iocb, 5651 .complete_iocb = qla4xxx_complete_iocb, 5652 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5653 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5654 .get_sys_info = qla4xxx_get_sys_info, 5655 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5656 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5657 }; 5658 5659 static struct isp_operations qla4_82xx_isp_ops = { 5660 .iospace_config = qla4_8xxx_iospace_config, 5661 .pci_config = qla4_8xxx_pci_config, 5662 .disable_intrs = qla4_82xx_disable_intrs, 5663 .enable_intrs = qla4_82xx_enable_intrs, 5664 .start_firmware = qla4_8xxx_load_risc, 5665 .restart_firmware = qla4_82xx_try_start_fw, 5666 .intr_handler = qla4_82xx_intr_handler, 5667 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5668 .need_reset = qla4_8xxx_need_reset, 5669 .reset_chip = qla4_82xx_isp_reset, 5670 .reset_firmware = qla4_8xxx_stop_firmware, 5671 .queue_iocb = qla4_82xx_queue_iocb, 5672 .complete_iocb = qla4_82xx_complete_iocb, 5673 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5674 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5675 .get_sys_info = qla4_8xxx_get_sys_info, 5676 .rd_reg_direct = qla4_82xx_rd_32, 5677 .wr_reg_direct = qla4_82xx_wr_32, 5678 .rd_reg_indirect = qla4_82xx_md_rd_32, 5679 .wr_reg_indirect = qla4_82xx_md_wr_32, 5680 .idc_lock = qla4_82xx_idc_lock, 5681 .idc_unlock = qla4_82xx_idc_unlock, 5682 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5683 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5684 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5685 }; 5686 5687 static struct isp_operations qla4_83xx_isp_ops = { 5688 .iospace_config = qla4_8xxx_iospace_config, 5689 .pci_config = qla4_8xxx_pci_config, 5690 .disable_intrs = qla4_83xx_disable_intrs, 5691 .enable_intrs = qla4_83xx_enable_intrs, 5692 .start_firmware = qla4_8xxx_load_risc, 5693 .restart_firmware = qla4_83xx_start_firmware, 5694 .intr_handler = qla4_83xx_intr_handler, 5695 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5696 .need_reset = qla4_8xxx_need_reset, 5697 .reset_chip = qla4_83xx_isp_reset, 5698 .reset_firmware = qla4_8xxx_stop_firmware, 5699 .queue_iocb = qla4_83xx_queue_iocb, 5700 .complete_iocb = qla4_83xx_complete_iocb, 5701 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5702 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5703 .get_sys_info = qla4_8xxx_get_sys_info, 5704 .rd_reg_direct = qla4_83xx_rd_reg, 5705 .wr_reg_direct = qla4_83xx_wr_reg, 5706 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5707 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5708 .idc_lock = qla4_83xx_drv_lock, 5709 .idc_unlock = qla4_83xx_drv_unlock, 5710 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5711 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5712 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5713 }; 5714 5715 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5716 { 5717 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5718 } 5719 5720 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5721 { 5722 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5723 } 5724 5725 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5726 { 5727 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5728 } 5729 5730 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5731 { 5732 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5733 } 5734 5735 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5736 { 5737 struct scsi_qla_host *ha = data; 5738 char *str = buf; 5739 int rc; 5740 5741 switch (type) { 5742 case ISCSI_BOOT_ETH_FLAGS: 5743 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5744 break; 5745 case ISCSI_BOOT_ETH_INDEX: 5746 rc = sprintf(str, "0\n"); 5747 break; 5748 case ISCSI_BOOT_ETH_MAC: 5749 rc = sysfs_format_mac(str, ha->my_mac, 5750 MAC_ADDR_LEN); 5751 break; 5752 default: 5753 rc = -ENOSYS; 5754 break; 5755 } 5756 return rc; 5757 } 5758 5759 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5760 { 5761 int rc; 5762 5763 switch (type) { 5764 case ISCSI_BOOT_ETH_FLAGS: 5765 case ISCSI_BOOT_ETH_MAC: 5766 case ISCSI_BOOT_ETH_INDEX: 5767 rc = S_IRUGO; 5768 break; 5769 default: 5770 rc = 0; 5771 break; 5772 } 5773 return rc; 5774 } 5775 5776 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5777 { 5778 struct scsi_qla_host *ha = data; 5779 char *str = buf; 5780 int rc; 5781 5782 switch (type) { 5783 case ISCSI_BOOT_INI_INITIATOR_NAME: 5784 rc = sprintf(str, "%s\n", ha->name_string); 5785 break; 5786 default: 5787 rc = -ENOSYS; 5788 break; 5789 } 5790 return rc; 5791 } 5792 5793 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5794 { 5795 int rc; 5796 5797 switch (type) { 5798 case ISCSI_BOOT_INI_INITIATOR_NAME: 5799 rc = S_IRUGO; 5800 break; 5801 default: 5802 rc = 0; 5803 break; 5804 } 5805 return rc; 5806 } 5807 5808 static ssize_t 5809 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5810 char *buf) 5811 { 5812 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5813 char *str = buf; 5814 int rc; 5815 5816 switch (type) { 5817 case ISCSI_BOOT_TGT_NAME: 5818 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5819 break; 5820 case ISCSI_BOOT_TGT_IP_ADDR: 5821 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5822 rc = sprintf(buf, "%pI4\n", 5823 &boot_conn->dest_ipaddr.ip_address); 5824 else 5825 rc = sprintf(str, "%pI6\n", 5826 &boot_conn->dest_ipaddr.ip_address); 5827 break; 5828 case ISCSI_BOOT_TGT_PORT: 5829 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5830 break; 5831 case ISCSI_BOOT_TGT_CHAP_NAME: 5832 rc = sprintf(str, "%.*s\n", 5833 boot_conn->chap.target_chap_name_length, 5834 (char *)&boot_conn->chap.target_chap_name); 5835 break; 5836 case ISCSI_BOOT_TGT_CHAP_SECRET: 5837 rc = sprintf(str, "%.*s\n", 5838 boot_conn->chap.target_secret_length, 5839 (char *)&boot_conn->chap.target_secret); 5840 break; 5841 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5842 rc = sprintf(str, "%.*s\n", 5843 boot_conn->chap.intr_chap_name_length, 5844 (char *)&boot_conn->chap.intr_chap_name); 5845 break; 5846 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5847 rc = sprintf(str, "%.*s\n", 5848 boot_conn->chap.intr_secret_length, 5849 (char *)&boot_conn->chap.intr_secret); 5850 break; 5851 case ISCSI_BOOT_TGT_FLAGS: 5852 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5853 break; 5854 case ISCSI_BOOT_TGT_NIC_ASSOC: 5855 rc = sprintf(str, "0\n"); 5856 break; 5857 default: 5858 rc = -ENOSYS; 5859 break; 5860 } 5861 return rc; 5862 } 5863 5864 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5865 { 5866 struct scsi_qla_host *ha = data; 5867 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5868 5869 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5870 } 5871 5872 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5873 { 5874 struct scsi_qla_host *ha = data; 5875 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5876 5877 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5878 } 5879 5880 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5881 { 5882 int rc; 5883 5884 switch (type) { 5885 case ISCSI_BOOT_TGT_NAME: 5886 case ISCSI_BOOT_TGT_IP_ADDR: 5887 case ISCSI_BOOT_TGT_PORT: 5888 case ISCSI_BOOT_TGT_CHAP_NAME: 5889 case ISCSI_BOOT_TGT_CHAP_SECRET: 5890 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5891 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5892 case ISCSI_BOOT_TGT_NIC_ASSOC: 5893 case ISCSI_BOOT_TGT_FLAGS: 5894 rc = S_IRUGO; 5895 break; 5896 default: 5897 rc = 0; 5898 break; 5899 } 5900 return rc; 5901 } 5902 5903 static void qla4xxx_boot_release(void *data) 5904 { 5905 struct scsi_qla_host *ha = data; 5906 5907 scsi_host_put(ha->host); 5908 } 5909 5910 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5911 { 5912 dma_addr_t buf_dma; 5913 uint32_t addr, pri_addr, sec_addr; 5914 uint32_t offset; 5915 uint16_t func_num; 5916 uint8_t val; 5917 uint8_t *buf = NULL; 5918 size_t size = 13 * sizeof(uint8_t); 5919 int ret = QLA_SUCCESS; 5920 5921 func_num = PCI_FUNC(ha->pdev->devfn); 5922 5923 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5924 __func__, ha->pdev->device, func_num); 5925 5926 if (is_qla40XX(ha)) { 5927 if (func_num == 1) { 5928 addr = NVRAM_PORT0_BOOT_MODE; 5929 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5930 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5931 } else if (func_num == 3) { 5932 addr = NVRAM_PORT1_BOOT_MODE; 5933 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5934 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5935 } else { 5936 ret = QLA_ERROR; 5937 goto exit_boot_info; 5938 } 5939 5940 /* Check Boot Mode */ 5941 val = rd_nvram_byte(ha, addr); 5942 if (!(val & 0x07)) { 5943 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5944 "options : 0x%x\n", __func__, val)); 5945 ret = QLA_ERROR; 5946 goto exit_boot_info; 5947 } 5948 5949 /* get primary valid target index */ 5950 val = rd_nvram_byte(ha, pri_addr); 5951 if (val & BIT_7) 5952 ddb_index[0] = (val & 0x7f); 5953 5954 /* get secondary valid target index */ 5955 val = rd_nvram_byte(ha, sec_addr); 5956 if (val & BIT_7) 5957 ddb_index[1] = (val & 0x7f); 5958 goto exit_boot_info; 5959 } else if (is_qla80XX(ha)) { 5960 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5961 &buf_dma, GFP_KERNEL); 5962 if (!buf) { 5963 DEBUG2(ql4_printk(KERN_ERR, ha, 5964 "%s: Unable to allocate dma buffer\n", 5965 __func__)); 5966 ret = QLA_ERROR; 5967 goto exit_boot_info; 5968 } 5969 5970 if (ha->port_num == 0) 5971 offset = BOOT_PARAM_OFFSET_PORT0; 5972 else if (ha->port_num == 1) 5973 offset = BOOT_PARAM_OFFSET_PORT1; 5974 else { 5975 ret = QLA_ERROR; 5976 goto exit_boot_info_free; 5977 } 5978 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5979 offset; 5980 if (qla4xxx_get_flash(ha, buf_dma, addr, 5981 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5982 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5983 " failed\n", ha->host_no, __func__)); 5984 ret = QLA_ERROR; 5985 goto exit_boot_info_free; 5986 } 5987 /* Check Boot Mode */ 5988 if (!(buf[1] & 0x07)) { 5989 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5990 " : 0x%x\n", buf[1])); 5991 ret = QLA_ERROR; 5992 goto exit_boot_info_free; 5993 } 5994 5995 /* get primary valid target index */ 5996 if (buf[2] & BIT_7) 5997 ddb_index[0] = buf[2] & 0x7f; 5998 5999 /* get secondary valid target index */ 6000 if (buf[11] & BIT_7) 6001 ddb_index[1] = buf[11] & 0x7f; 6002 } else { 6003 ret = QLA_ERROR; 6004 goto exit_boot_info; 6005 } 6006 6007 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 6008 " target ID %d\n", __func__, ddb_index[0], 6009 ddb_index[1])); 6010 6011 exit_boot_info_free: 6012 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 6013 exit_boot_info: 6014 ha->pri_ddb_idx = ddb_index[0]; 6015 ha->sec_ddb_idx = ddb_index[1]; 6016 return ret; 6017 } 6018 6019 /** 6020 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 6021 * @ha: pointer to adapter structure 6022 * @username: CHAP username to be returned 6023 * @password: CHAP password to be returned 6024 * 6025 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 6026 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 6027 * So from the CHAP cache find the first BIDI CHAP entry and set it 6028 * to the boot record in sysfs. 6029 **/ 6030 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 6031 char *password) 6032 { 6033 int i, ret = -EINVAL; 6034 int max_chap_entries = 0; 6035 struct ql4_chap_table *chap_table; 6036 6037 if (is_qla80XX(ha)) 6038 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6039 sizeof(struct ql4_chap_table); 6040 else 6041 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6042 6043 if (!ha->chap_list) { 6044 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6045 return ret; 6046 } 6047 6048 mutex_lock(&ha->chap_sem); 6049 for (i = 0; i < max_chap_entries; i++) { 6050 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6051 if (chap_table->cookie != 6052 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 6053 continue; 6054 } 6055 6056 if (chap_table->flags & BIT_7) /* local */ 6057 continue; 6058 6059 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6060 continue; 6061 6062 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6063 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6064 ret = 0; 6065 break; 6066 } 6067 mutex_unlock(&ha->chap_sem); 6068 6069 return ret; 6070 } 6071 6072 6073 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6074 struct ql4_boot_session_info *boot_sess, 6075 uint16_t ddb_index) 6076 { 6077 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6078 struct dev_db_entry *fw_ddb_entry; 6079 dma_addr_t fw_ddb_entry_dma; 6080 uint16_t idx; 6081 uint16_t options; 6082 int ret = QLA_SUCCESS; 6083 6084 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6085 &fw_ddb_entry_dma, GFP_KERNEL); 6086 if (!fw_ddb_entry) { 6087 DEBUG2(ql4_printk(KERN_ERR, ha, 6088 "%s: Unable to allocate dma buffer.\n", 6089 __func__)); 6090 ret = QLA_ERROR; 6091 return ret; 6092 } 6093 6094 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6095 fw_ddb_entry_dma, ddb_index)) { 6096 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6097 "index [%d]\n", __func__, ddb_index)); 6098 ret = QLA_ERROR; 6099 goto exit_boot_target; 6100 } 6101 6102 /* Update target name and IP from DDB */ 6103 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6104 min(sizeof(boot_sess->target_name), 6105 sizeof(fw_ddb_entry->iscsi_name))); 6106 6107 options = le16_to_cpu(fw_ddb_entry->options); 6108 if (options & DDB_OPT_IPV6_DEVICE) { 6109 memcpy(&boot_conn->dest_ipaddr.ip_address, 6110 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6111 } else { 6112 boot_conn->dest_ipaddr.ip_type = 0x1; 6113 memcpy(&boot_conn->dest_ipaddr.ip_address, 6114 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6115 } 6116 6117 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6118 6119 /* update chap information */ 6120 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6121 6122 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6123 6124 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6125 6126 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6127 target_chap_name, 6128 (char *)&boot_conn->chap.target_secret, 6129 idx); 6130 if (ret) { 6131 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6132 ret = QLA_ERROR; 6133 goto exit_boot_target; 6134 } 6135 6136 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6137 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6138 } 6139 6140 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6141 6142 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6143 6144 ret = qla4xxx_get_bidi_chap(ha, 6145 (char *)&boot_conn->chap.intr_chap_name, 6146 (char *)&boot_conn->chap.intr_secret); 6147 6148 if (ret) { 6149 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6150 ret = QLA_ERROR; 6151 goto exit_boot_target; 6152 } 6153 6154 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6155 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6156 } 6157 6158 exit_boot_target: 6159 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6160 fw_ddb_entry, fw_ddb_entry_dma); 6161 return ret; 6162 } 6163 6164 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6165 { 6166 uint16_t ddb_index[2]; 6167 int ret = QLA_ERROR; 6168 int rval; 6169 6170 memset(ddb_index, 0, sizeof(ddb_index)); 6171 ddb_index[0] = 0xffff; 6172 ddb_index[1] = 0xffff; 6173 ret = get_fw_boot_info(ha, ddb_index); 6174 if (ret != QLA_SUCCESS) { 6175 DEBUG2(ql4_printk(KERN_INFO, ha, 6176 "%s: No boot target configured.\n", __func__)); 6177 return ret; 6178 } 6179 6180 if (ql4xdisablesysfsboot) 6181 return QLA_SUCCESS; 6182 6183 if (ddb_index[0] == 0xffff) 6184 goto sec_target; 6185 6186 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6187 ddb_index[0]); 6188 if (rval != QLA_SUCCESS) { 6189 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6190 "configured\n", __func__)); 6191 } else 6192 ret = QLA_SUCCESS; 6193 6194 sec_target: 6195 if (ddb_index[1] == 0xffff) 6196 goto exit_get_boot_info; 6197 6198 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6199 ddb_index[1]); 6200 if (rval != QLA_SUCCESS) { 6201 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6202 " configured\n", __func__)); 6203 } else 6204 ret = QLA_SUCCESS; 6205 6206 exit_get_boot_info: 6207 return ret; 6208 } 6209 6210 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6211 { 6212 struct iscsi_boot_kobj *boot_kobj; 6213 6214 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6215 return QLA_ERROR; 6216 6217 if (ql4xdisablesysfsboot) { 6218 ql4_printk(KERN_INFO, ha, 6219 "%s: syfsboot disabled - driver will trigger login " 6220 "and publish session for discovery .\n", __func__); 6221 return QLA_SUCCESS; 6222 } 6223 6224 6225 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6226 if (!ha->boot_kset) 6227 goto kset_free; 6228 6229 if (!scsi_host_get(ha->host)) 6230 goto kset_free; 6231 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6232 qla4xxx_show_boot_tgt_pri_info, 6233 qla4xxx_tgt_get_attr_visibility, 6234 qla4xxx_boot_release); 6235 if (!boot_kobj) 6236 goto put_host; 6237 6238 if (!scsi_host_get(ha->host)) 6239 goto kset_free; 6240 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6241 qla4xxx_show_boot_tgt_sec_info, 6242 qla4xxx_tgt_get_attr_visibility, 6243 qla4xxx_boot_release); 6244 if (!boot_kobj) 6245 goto put_host; 6246 6247 if (!scsi_host_get(ha->host)) 6248 goto kset_free; 6249 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6250 qla4xxx_show_boot_ini_info, 6251 qla4xxx_ini_get_attr_visibility, 6252 qla4xxx_boot_release); 6253 if (!boot_kobj) 6254 goto put_host; 6255 6256 if (!scsi_host_get(ha->host)) 6257 goto kset_free; 6258 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6259 qla4xxx_show_boot_eth_info, 6260 qla4xxx_eth_get_attr_visibility, 6261 qla4xxx_boot_release); 6262 if (!boot_kobj) 6263 goto put_host; 6264 6265 return QLA_SUCCESS; 6266 6267 put_host: 6268 scsi_host_put(ha->host); 6269 kset_free: 6270 iscsi_boot_destroy_kset(ha->boot_kset); 6271 return -ENOMEM; 6272 } 6273 6274 6275 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6276 struct ql4_tuple_ddb *tddb) 6277 { 6278 struct iscsi_cls_session *cls_sess; 6279 struct iscsi_cls_conn *cls_conn; 6280 struct iscsi_session *sess; 6281 struct iscsi_conn *conn; 6282 6283 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6284 cls_sess = ddb_entry->sess; 6285 sess = cls_sess->dd_data; 6286 cls_conn = ddb_entry->conn; 6287 conn = cls_conn->dd_data; 6288 6289 tddb->tpgt = sess->tpgt; 6290 tddb->port = conn->persistent_port; 6291 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6292 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6293 } 6294 6295 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6296 struct ql4_tuple_ddb *tddb, 6297 uint8_t *flash_isid) 6298 { 6299 uint16_t options = 0; 6300 6301 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6302 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6303 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6304 6305 options = le16_to_cpu(fw_ddb_entry->options); 6306 if (options & DDB_OPT_IPV6_DEVICE) 6307 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6308 else 6309 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6310 6311 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6312 6313 if (flash_isid == NULL) 6314 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6315 sizeof(tddb->isid)); 6316 else 6317 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6318 } 6319 6320 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6321 struct ql4_tuple_ddb *old_tddb, 6322 struct ql4_tuple_ddb *new_tddb, 6323 uint8_t is_isid_compare) 6324 { 6325 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6326 return QLA_ERROR; 6327 6328 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6329 return QLA_ERROR; 6330 6331 if (old_tddb->port != new_tddb->port) 6332 return QLA_ERROR; 6333 6334 /* For multi sessions, driver generates the ISID, so do not compare 6335 * ISID in reset path since it would be a comparison between the 6336 * driver generated ISID and firmware generated ISID. This could 6337 * lead to adding duplicated DDBs in the list as driver generated 6338 * ISID would not match firmware generated ISID. 6339 */ 6340 if (is_isid_compare) { 6341 DEBUG2(ql4_printk(KERN_INFO, ha, 6342 "%s: old ISID [%pmR] New ISID [%pmR]\n", 6343 __func__, old_tddb->isid, new_tddb->isid)); 6344 6345 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6346 sizeof(old_tddb->isid))) 6347 return QLA_ERROR; 6348 } 6349 6350 DEBUG2(ql4_printk(KERN_INFO, ha, 6351 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6352 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6353 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6354 new_tddb->ip_addr, new_tddb->iscsi_name)); 6355 6356 return QLA_SUCCESS; 6357 } 6358 6359 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6360 struct dev_db_entry *fw_ddb_entry, 6361 uint32_t *index) 6362 { 6363 struct ddb_entry *ddb_entry; 6364 struct ql4_tuple_ddb *fw_tddb = NULL; 6365 struct ql4_tuple_ddb *tmp_tddb = NULL; 6366 int idx; 6367 int ret = QLA_ERROR; 6368 6369 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6370 if (!fw_tddb) { 6371 DEBUG2(ql4_printk(KERN_WARNING, ha, 6372 "Memory Allocation failed.\n")); 6373 ret = QLA_SUCCESS; 6374 goto exit_check; 6375 } 6376 6377 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6378 if (!tmp_tddb) { 6379 DEBUG2(ql4_printk(KERN_WARNING, ha, 6380 "Memory Allocation failed.\n")); 6381 ret = QLA_SUCCESS; 6382 goto exit_check; 6383 } 6384 6385 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6386 6387 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6388 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6389 if (ddb_entry == NULL) 6390 continue; 6391 6392 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6393 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6394 ret = QLA_SUCCESS; /* found */ 6395 if (index != NULL) 6396 *index = idx; 6397 goto exit_check; 6398 } 6399 } 6400 6401 exit_check: 6402 if (fw_tddb) 6403 vfree(fw_tddb); 6404 if (tmp_tddb) 6405 vfree(tmp_tddb); 6406 return ret; 6407 } 6408 6409 /** 6410 * qla4xxx_check_existing_isid - check if target with same isid exist 6411 * in target list 6412 * @list_nt: list of target 6413 * @isid: isid to check 6414 * 6415 * This routine return QLA_SUCCESS if target with same isid exist 6416 **/ 6417 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6418 { 6419 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6420 struct dev_db_entry *fw_ddb_entry; 6421 6422 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6423 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6424 6425 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6426 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6427 return QLA_SUCCESS; 6428 } 6429 } 6430 return QLA_ERROR; 6431 } 6432 6433 /** 6434 * qla4xxx_update_isid - compare ddbs and updated isid 6435 * @ha: Pointer to host adapter structure. 6436 * @list_nt: list of nt target 6437 * @fw_ddb_entry: firmware ddb entry 6438 * 6439 * This routine update isid if ddbs have same iqn, same isid and 6440 * different IP addr. 6441 * Return QLA_SUCCESS if isid is updated. 6442 **/ 6443 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6444 struct list_head *list_nt, 6445 struct dev_db_entry *fw_ddb_entry) 6446 { 6447 uint8_t base_value, i; 6448 6449 base_value = fw_ddb_entry->isid[1] & 0x1f; 6450 for (i = 0; i < 8; i++) { 6451 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6452 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6453 break; 6454 } 6455 6456 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6457 return QLA_ERROR; 6458 6459 return QLA_SUCCESS; 6460 } 6461 6462 /** 6463 * qla4xxx_should_update_isid - check if isid need to update 6464 * @ha: Pointer to host adapter structure. 6465 * @old_tddb: ddb tuple 6466 * @new_tddb: ddb tuple 6467 * 6468 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6469 * same isid 6470 **/ 6471 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6472 struct ql4_tuple_ddb *old_tddb, 6473 struct ql4_tuple_ddb *new_tddb) 6474 { 6475 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6476 /* Same ip */ 6477 if (old_tddb->port == new_tddb->port) 6478 return QLA_ERROR; 6479 } 6480 6481 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6482 /* different iqn */ 6483 return QLA_ERROR; 6484 6485 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6486 sizeof(old_tddb->isid))) 6487 /* different isid */ 6488 return QLA_ERROR; 6489 6490 return QLA_SUCCESS; 6491 } 6492 6493 /** 6494 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6495 * @ha: Pointer to host adapter structure. 6496 * @list_nt: list of nt target. 6497 * @fw_ddb_entry: firmware ddb entry. 6498 * 6499 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6500 * duplicate ddb in list_nt. 6501 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6502 * Note: This function also update isid of DDB if required. 6503 **/ 6504 6505 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6506 struct list_head *list_nt, 6507 struct dev_db_entry *fw_ddb_entry) 6508 { 6509 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6510 struct ql4_tuple_ddb *fw_tddb = NULL; 6511 struct ql4_tuple_ddb *tmp_tddb = NULL; 6512 int rval, ret = QLA_ERROR; 6513 6514 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6515 if (!fw_tddb) { 6516 DEBUG2(ql4_printk(KERN_WARNING, ha, 6517 "Memory Allocation failed.\n")); 6518 ret = QLA_SUCCESS; 6519 goto exit_check; 6520 } 6521 6522 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6523 if (!tmp_tddb) { 6524 DEBUG2(ql4_printk(KERN_WARNING, ha, 6525 "Memory Allocation failed.\n")); 6526 ret = QLA_SUCCESS; 6527 goto exit_check; 6528 } 6529 6530 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6531 6532 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6533 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6534 nt_ddb_idx->flash_isid); 6535 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6536 /* found duplicate ddb */ 6537 if (ret == QLA_SUCCESS) 6538 goto exit_check; 6539 } 6540 6541 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6542 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6543 6544 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6545 if (ret == QLA_SUCCESS) { 6546 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6547 if (rval == QLA_SUCCESS) 6548 ret = QLA_ERROR; 6549 else 6550 ret = QLA_SUCCESS; 6551 6552 goto exit_check; 6553 } 6554 } 6555 6556 exit_check: 6557 if (fw_tddb) 6558 vfree(fw_tddb); 6559 if (tmp_tddb) 6560 vfree(tmp_tddb); 6561 return ret; 6562 } 6563 6564 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6565 { 6566 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6567 6568 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6569 list_del_init(&ddb_idx->list); 6570 vfree(ddb_idx); 6571 } 6572 } 6573 6574 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6575 struct dev_db_entry *fw_ddb_entry) 6576 { 6577 struct iscsi_endpoint *ep; 6578 struct sockaddr_in *addr; 6579 struct sockaddr_in6 *addr6; 6580 struct sockaddr *t_addr; 6581 struct sockaddr_storage *dst_addr; 6582 char *ip; 6583 6584 /* TODO: need to destroy on unload iscsi_endpoint*/ 6585 dst_addr = vmalloc(sizeof(*dst_addr)); 6586 if (!dst_addr) 6587 return NULL; 6588 6589 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6590 t_addr = (struct sockaddr *)dst_addr; 6591 t_addr->sa_family = AF_INET6; 6592 addr6 = (struct sockaddr_in6 *)dst_addr; 6593 ip = (char *)&addr6->sin6_addr; 6594 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6595 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6596 6597 } else { 6598 t_addr = (struct sockaddr *)dst_addr; 6599 t_addr->sa_family = AF_INET; 6600 addr = (struct sockaddr_in *)dst_addr; 6601 ip = (char *)&addr->sin_addr; 6602 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6603 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6604 } 6605 6606 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6607 vfree(dst_addr); 6608 return ep; 6609 } 6610 6611 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6612 { 6613 if (ql4xdisablesysfsboot) 6614 return QLA_SUCCESS; 6615 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6616 return QLA_ERROR; 6617 return QLA_SUCCESS; 6618 } 6619 6620 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6621 struct ddb_entry *ddb_entry, 6622 uint16_t idx) 6623 { 6624 uint16_t def_timeout; 6625 6626 ddb_entry->ddb_type = FLASH_DDB; 6627 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6628 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6629 ddb_entry->ha = ha; 6630 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6631 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6632 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6633 6634 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6635 atomic_set(&ddb_entry->relogin_timer, 0); 6636 atomic_set(&ddb_entry->relogin_retry_count, 0); 6637 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6638 ddb_entry->default_relogin_timeout = 6639 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6640 def_timeout : LOGIN_TOV; 6641 ddb_entry->default_time2wait = 6642 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6643 6644 if (ql4xdisablesysfsboot && 6645 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6646 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6647 } 6648 6649 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6650 { 6651 uint32_t idx = 0; 6652 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6653 uint32_t sts[MBOX_REG_COUNT]; 6654 uint32_t ip_state; 6655 unsigned long wtime; 6656 int ret; 6657 6658 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6659 do { 6660 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6661 if (ip_idx[idx] == -1) 6662 continue; 6663 6664 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6665 6666 if (ret == QLA_ERROR) { 6667 ip_idx[idx] = -1; 6668 continue; 6669 } 6670 6671 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6672 6673 DEBUG2(ql4_printk(KERN_INFO, ha, 6674 "Waiting for IP state for idx = %d, state = 0x%x\n", 6675 ip_idx[idx], ip_state)); 6676 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6677 ip_state == IP_ADDRSTATE_INVALID || 6678 ip_state == IP_ADDRSTATE_PREFERRED || 6679 ip_state == IP_ADDRSTATE_DEPRICATED || 6680 ip_state == IP_ADDRSTATE_DISABLING) 6681 ip_idx[idx] = -1; 6682 } 6683 6684 /* Break if all IP states checked */ 6685 if ((ip_idx[0] == -1) && 6686 (ip_idx[1] == -1) && 6687 (ip_idx[2] == -1) && 6688 (ip_idx[3] == -1)) 6689 break; 6690 schedule_timeout_uninterruptible(HZ); 6691 } while (time_after(wtime, jiffies)); 6692 } 6693 6694 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6695 struct dev_db_entry *flash_ddb_entry) 6696 { 6697 uint16_t options = 0; 6698 size_t ip_len = IP_ADDR_LEN; 6699 6700 options = le16_to_cpu(fw_ddb_entry->options); 6701 if (options & DDB_OPT_IPV6_DEVICE) 6702 ip_len = IPv6_ADDR_LEN; 6703 6704 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6705 return QLA_ERROR; 6706 6707 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6708 sizeof(fw_ddb_entry->isid))) 6709 return QLA_ERROR; 6710 6711 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6712 sizeof(fw_ddb_entry->port))) 6713 return QLA_ERROR; 6714 6715 return QLA_SUCCESS; 6716 } 6717 6718 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6719 struct dev_db_entry *fw_ddb_entry, 6720 uint32_t fw_idx, uint32_t *flash_index) 6721 { 6722 struct dev_db_entry *flash_ddb_entry; 6723 dma_addr_t flash_ddb_entry_dma; 6724 uint32_t idx = 0; 6725 int max_ddbs; 6726 int ret = QLA_ERROR, status; 6727 6728 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6729 MAX_DEV_DB_ENTRIES; 6730 6731 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6732 &flash_ddb_entry_dma); 6733 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6734 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6735 goto exit_find_st_idx; 6736 } 6737 6738 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6739 flash_ddb_entry_dma, fw_idx); 6740 if (status == QLA_SUCCESS) { 6741 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6742 if (status == QLA_SUCCESS) { 6743 *flash_index = fw_idx; 6744 ret = QLA_SUCCESS; 6745 goto exit_find_st_idx; 6746 } 6747 } 6748 6749 for (idx = 0; idx < max_ddbs; idx++) { 6750 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6751 flash_ddb_entry_dma, idx); 6752 if (status == QLA_ERROR) 6753 continue; 6754 6755 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6756 if (status == QLA_SUCCESS) { 6757 *flash_index = idx; 6758 ret = QLA_SUCCESS; 6759 goto exit_find_st_idx; 6760 } 6761 } 6762 6763 if (idx == max_ddbs) 6764 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6765 fw_idx); 6766 6767 exit_find_st_idx: 6768 if (flash_ddb_entry) 6769 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6770 flash_ddb_entry_dma); 6771 6772 return ret; 6773 } 6774 6775 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6776 struct list_head *list_st) 6777 { 6778 struct qla_ddb_index *st_ddb_idx; 6779 int max_ddbs; 6780 int fw_idx_size; 6781 struct dev_db_entry *fw_ddb_entry; 6782 dma_addr_t fw_ddb_dma; 6783 int ret; 6784 uint32_t idx = 0, next_idx = 0; 6785 uint32_t state = 0, conn_err = 0; 6786 uint32_t flash_index = -1; 6787 uint16_t conn_id = 0; 6788 6789 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6790 &fw_ddb_dma); 6791 if (fw_ddb_entry == NULL) { 6792 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6793 goto exit_st_list; 6794 } 6795 6796 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6797 MAX_DEV_DB_ENTRIES; 6798 fw_idx_size = sizeof(struct qla_ddb_index); 6799 6800 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6801 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6802 NULL, &next_idx, &state, 6803 &conn_err, NULL, &conn_id); 6804 if (ret == QLA_ERROR) 6805 break; 6806 6807 /* Ignore DDB if invalid state (unassigned) */ 6808 if (state == DDB_DS_UNASSIGNED) 6809 goto continue_next_st; 6810 6811 /* Check if ST, add to the list_st */ 6812 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6813 goto continue_next_st; 6814 6815 st_ddb_idx = vzalloc(fw_idx_size); 6816 if (!st_ddb_idx) 6817 break; 6818 6819 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6820 &flash_index); 6821 if (ret == QLA_ERROR) { 6822 ql4_printk(KERN_ERR, ha, 6823 "No flash entry for ST at idx [%d]\n", idx); 6824 st_ddb_idx->flash_ddb_idx = idx; 6825 } else { 6826 ql4_printk(KERN_INFO, ha, 6827 "ST at idx [%d] is stored at flash [%d]\n", 6828 idx, flash_index); 6829 st_ddb_idx->flash_ddb_idx = flash_index; 6830 } 6831 6832 st_ddb_idx->fw_ddb_idx = idx; 6833 6834 list_add_tail(&st_ddb_idx->list, list_st); 6835 continue_next_st: 6836 if (next_idx == 0) 6837 break; 6838 } 6839 6840 exit_st_list: 6841 if (fw_ddb_entry) 6842 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6843 } 6844 6845 /** 6846 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6847 * @ha: pointer to adapter structure 6848 * @list_ddb: List from which failed ddb to be removed 6849 * 6850 * Iterate over the list of DDBs and find and remove DDBs that are either in 6851 * no connection active state or failed state 6852 **/ 6853 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6854 struct list_head *list_ddb) 6855 { 6856 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6857 uint32_t next_idx = 0; 6858 uint32_t state = 0, conn_err = 0; 6859 int ret; 6860 6861 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6862 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6863 NULL, 0, NULL, &next_idx, &state, 6864 &conn_err, NULL, NULL); 6865 if (ret == QLA_ERROR) 6866 continue; 6867 6868 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6869 state == DDB_DS_SESSION_FAILED) { 6870 list_del_init(&ddb_idx->list); 6871 vfree(ddb_idx); 6872 } 6873 } 6874 } 6875 6876 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6877 struct ddb_entry *ddb_entry, 6878 struct dev_db_entry *fw_ddb_entry) 6879 { 6880 struct iscsi_cls_session *cls_sess; 6881 struct iscsi_session *sess; 6882 uint32_t max_ddbs = 0; 6883 uint16_t ddb_link = -1; 6884 6885 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6886 MAX_DEV_DB_ENTRIES; 6887 6888 cls_sess = ddb_entry->sess; 6889 sess = cls_sess->dd_data; 6890 6891 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6892 if (ddb_link < max_ddbs) 6893 sess->discovery_parent_idx = ddb_link; 6894 else 6895 sess->discovery_parent_idx = DDB_NO_LINK; 6896 } 6897 6898 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6899 struct dev_db_entry *fw_ddb_entry, 6900 int is_reset, uint16_t idx) 6901 { 6902 struct iscsi_cls_session *cls_sess; 6903 struct iscsi_session *sess; 6904 struct iscsi_cls_conn *cls_conn; 6905 struct iscsi_endpoint *ep; 6906 uint16_t cmds_max = 32; 6907 uint16_t conn_id = 0; 6908 uint32_t initial_cmdsn = 0; 6909 int ret = QLA_SUCCESS; 6910 6911 struct ddb_entry *ddb_entry = NULL; 6912 6913 /* Create session object, with INVALID_ENTRY, 6914 * the targer_id would get set when we issue the login 6915 */ 6916 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6917 cmds_max, sizeof(struct ddb_entry), 6918 sizeof(struct ql4_task_data), 6919 initial_cmdsn, INVALID_ENTRY); 6920 if (!cls_sess) { 6921 ret = QLA_ERROR; 6922 goto exit_setup; 6923 } 6924 6925 /* 6926 * so calling module_put function to decrement the 6927 * reference count. 6928 **/ 6929 module_put(qla4xxx_iscsi_transport.owner); 6930 sess = cls_sess->dd_data; 6931 ddb_entry = sess->dd_data; 6932 ddb_entry->sess = cls_sess; 6933 6934 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6935 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6936 sizeof(struct dev_db_entry)); 6937 6938 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6939 6940 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6941 6942 if (!cls_conn) { 6943 ret = QLA_ERROR; 6944 goto exit_setup; 6945 } 6946 6947 ddb_entry->conn = cls_conn; 6948 6949 /* Setup ep, for displaying attributes in sysfs */ 6950 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6951 if (ep) { 6952 ep->conn = cls_conn; 6953 cls_conn->ep = ep; 6954 } else { 6955 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6956 ret = QLA_ERROR; 6957 goto exit_setup; 6958 } 6959 6960 /* Update sess/conn params */ 6961 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6962 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6963 6964 if (is_reset == RESET_ADAPTER) { 6965 iscsi_block_session(cls_sess); 6966 /* Use the relogin path to discover new devices 6967 * by short-circuting the logic of setting 6968 * timer to relogin - instead set the flags 6969 * to initiate login right away. 6970 */ 6971 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6972 set_bit(DF_RELOGIN, &ddb_entry->flags); 6973 } 6974 6975 exit_setup: 6976 return ret; 6977 } 6978 6979 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6980 struct list_head *list_ddb, 6981 struct dev_db_entry *fw_ddb_entry) 6982 { 6983 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6984 uint16_t ddb_link; 6985 6986 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6987 6988 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6989 if (ddb_idx->fw_ddb_idx == ddb_link) { 6990 DEBUG2(ql4_printk(KERN_INFO, ha, 6991 "Updating NT parent idx from [%d] to [%d]\n", 6992 ddb_link, ddb_idx->flash_ddb_idx)); 6993 fw_ddb_entry->ddb_link = 6994 cpu_to_le16(ddb_idx->flash_ddb_idx); 6995 return; 6996 } 6997 } 6998 } 6999 7000 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 7001 struct list_head *list_nt, 7002 struct list_head *list_st, 7003 int is_reset) 7004 { 7005 struct dev_db_entry *fw_ddb_entry; 7006 struct ddb_entry *ddb_entry = NULL; 7007 dma_addr_t fw_ddb_dma; 7008 int max_ddbs; 7009 int fw_idx_size; 7010 int ret; 7011 uint32_t idx = 0, next_idx = 0; 7012 uint32_t state = 0, conn_err = 0; 7013 uint32_t ddb_idx = -1; 7014 uint16_t conn_id = 0; 7015 uint16_t ddb_link = -1; 7016 struct qla_ddb_index *nt_ddb_idx; 7017 7018 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7019 &fw_ddb_dma); 7020 if (fw_ddb_entry == NULL) { 7021 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7022 goto exit_nt_list; 7023 } 7024 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7025 MAX_DEV_DB_ENTRIES; 7026 fw_idx_size = sizeof(struct qla_ddb_index); 7027 7028 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7029 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7030 NULL, &next_idx, &state, 7031 &conn_err, NULL, &conn_id); 7032 if (ret == QLA_ERROR) 7033 break; 7034 7035 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7036 goto continue_next_nt; 7037 7038 /* Check if NT, then add to list it */ 7039 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7040 goto continue_next_nt; 7041 7042 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7043 if (ddb_link < max_ddbs) 7044 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7045 7046 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7047 state == DDB_DS_SESSION_FAILED) && 7048 (is_reset == INIT_ADAPTER)) 7049 goto continue_next_nt; 7050 7051 DEBUG2(ql4_printk(KERN_INFO, ha, 7052 "Adding DDB to session = 0x%x\n", idx)); 7053 7054 if (is_reset == INIT_ADAPTER) { 7055 nt_ddb_idx = vmalloc(fw_idx_size); 7056 if (!nt_ddb_idx) 7057 break; 7058 7059 nt_ddb_idx->fw_ddb_idx = idx; 7060 7061 /* Copy original isid as it may get updated in function 7062 * qla4xxx_update_isid(). We need original isid in 7063 * function qla4xxx_compare_tuple_ddb to find duplicate 7064 * target */ 7065 memcpy(&nt_ddb_idx->flash_isid[0], 7066 &fw_ddb_entry->isid[0], 7067 sizeof(nt_ddb_idx->flash_isid)); 7068 7069 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7070 fw_ddb_entry); 7071 if (ret == QLA_SUCCESS) { 7072 /* free nt_ddb_idx and do not add to list_nt */ 7073 vfree(nt_ddb_idx); 7074 goto continue_next_nt; 7075 } 7076 7077 /* Copy updated isid */ 7078 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7079 sizeof(struct dev_db_entry)); 7080 7081 list_add_tail(&nt_ddb_idx->list, list_nt); 7082 } else if (is_reset == RESET_ADAPTER) { 7083 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7084 &ddb_idx); 7085 if (ret == QLA_SUCCESS) { 7086 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7087 ddb_idx); 7088 if (ddb_entry != NULL) 7089 qla4xxx_update_sess_disc_idx(ha, 7090 ddb_entry, 7091 fw_ddb_entry); 7092 goto continue_next_nt; 7093 } 7094 } 7095 7096 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7097 if (ret == QLA_ERROR) 7098 goto exit_nt_list; 7099 7100 continue_next_nt: 7101 if (next_idx == 0) 7102 break; 7103 } 7104 7105 exit_nt_list: 7106 if (fw_ddb_entry) 7107 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7108 } 7109 7110 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7111 struct list_head *list_nt, 7112 uint16_t target_id) 7113 { 7114 struct dev_db_entry *fw_ddb_entry; 7115 dma_addr_t fw_ddb_dma; 7116 int max_ddbs; 7117 int fw_idx_size; 7118 int ret; 7119 uint32_t idx = 0, next_idx = 0; 7120 uint32_t state = 0, conn_err = 0; 7121 uint16_t conn_id = 0; 7122 struct qla_ddb_index *nt_ddb_idx; 7123 7124 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7125 &fw_ddb_dma); 7126 if (fw_ddb_entry == NULL) { 7127 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7128 goto exit_new_nt_list; 7129 } 7130 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7131 MAX_DEV_DB_ENTRIES; 7132 fw_idx_size = sizeof(struct qla_ddb_index); 7133 7134 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7135 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7136 NULL, &next_idx, &state, 7137 &conn_err, NULL, &conn_id); 7138 if (ret == QLA_ERROR) 7139 break; 7140 7141 /* Check if NT, then add it to list */ 7142 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7143 goto continue_next_new_nt; 7144 7145 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7146 goto continue_next_new_nt; 7147 7148 DEBUG2(ql4_printk(KERN_INFO, ha, 7149 "Adding DDB to session = 0x%x\n", idx)); 7150 7151 nt_ddb_idx = vmalloc(fw_idx_size); 7152 if (!nt_ddb_idx) 7153 break; 7154 7155 nt_ddb_idx->fw_ddb_idx = idx; 7156 7157 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7158 if (ret == QLA_SUCCESS) { 7159 /* free nt_ddb_idx and do not add to list_nt */ 7160 vfree(nt_ddb_idx); 7161 goto continue_next_new_nt; 7162 } 7163 7164 if (target_id < max_ddbs) 7165 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7166 7167 list_add_tail(&nt_ddb_idx->list, list_nt); 7168 7169 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7170 idx); 7171 if (ret == QLA_ERROR) 7172 goto exit_new_nt_list; 7173 7174 continue_next_new_nt: 7175 if (next_idx == 0) 7176 break; 7177 } 7178 7179 exit_new_nt_list: 7180 if (fw_ddb_entry) 7181 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7182 } 7183 7184 /** 7185 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7186 * @dev: dev associated with the sysfs entry 7187 * @data: pointer to flashnode session object 7188 * 7189 * Returns: 7190 * 1: if flashnode entry is non-persistent 7191 * 0: if flashnode entry is persistent 7192 **/ 7193 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7194 { 7195 struct iscsi_bus_flash_session *fnode_sess; 7196 7197 if (!iscsi_flashnode_bus_match(dev, NULL)) 7198 return 0; 7199 7200 fnode_sess = iscsi_dev_to_flash_session(dev); 7201 7202 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7203 } 7204 7205 /** 7206 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7207 * @ha: pointer to host 7208 * @fw_ddb_entry: flash ddb data 7209 * @idx: target index 7210 * @user: if set then this call is made from userland else from kernel 7211 * 7212 * Returns: 7213 * On sucess: QLA_SUCCESS 7214 * On failure: QLA_ERROR 7215 * 7216 * This create separate sysfs entries for session and connection attributes of 7217 * the given fw ddb entry. 7218 * If this is invoked as a result of a userspace call then the entry is marked 7219 * as nonpersistent using flash_state field. 7220 **/ 7221 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7222 struct dev_db_entry *fw_ddb_entry, 7223 uint16_t *idx, int user) 7224 { 7225 struct iscsi_bus_flash_session *fnode_sess = NULL; 7226 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7227 int rc = QLA_ERROR; 7228 7229 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7230 &qla4xxx_iscsi_transport, 0); 7231 if (!fnode_sess) { 7232 ql4_printk(KERN_ERR, ha, 7233 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7234 __func__, *idx, ha->host_no); 7235 goto exit_tgt_create; 7236 } 7237 7238 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7239 &qla4xxx_iscsi_transport, 0); 7240 if (!fnode_conn) { 7241 ql4_printk(KERN_ERR, ha, 7242 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7243 __func__, *idx, ha->host_no); 7244 goto free_sess; 7245 } 7246 7247 if (user) { 7248 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7249 } else { 7250 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7251 7252 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7253 fnode_sess->is_boot_target = 1; 7254 else 7255 fnode_sess->is_boot_target = 0; 7256 } 7257 7258 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7259 fw_ddb_entry); 7260 if (rc) 7261 goto free_sess; 7262 7263 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7264 __func__, fnode_sess->dev.kobj.name); 7265 7266 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7267 __func__, fnode_conn->dev.kobj.name); 7268 7269 return QLA_SUCCESS; 7270 7271 free_sess: 7272 iscsi_destroy_flashnode_sess(fnode_sess); 7273 7274 exit_tgt_create: 7275 return QLA_ERROR; 7276 } 7277 7278 /** 7279 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7280 * @shost: pointer to host 7281 * @buf: type of ddb entry (ipv4/ipv6) 7282 * @len: length of buf 7283 * 7284 * This creates new ddb entry in the flash by finding first free index and 7285 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7286 **/ 7287 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7288 int len) 7289 { 7290 struct scsi_qla_host *ha = to_qla_host(shost); 7291 struct dev_db_entry *fw_ddb_entry = NULL; 7292 dma_addr_t fw_ddb_entry_dma; 7293 struct device *dev; 7294 uint16_t idx = 0; 7295 uint16_t max_ddbs = 0; 7296 uint32_t options = 0; 7297 uint32_t rval = QLA_ERROR; 7298 7299 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7300 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7301 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7302 __func__)); 7303 goto exit_ddb_add; 7304 } 7305 7306 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7307 MAX_DEV_DB_ENTRIES; 7308 7309 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7310 &fw_ddb_entry_dma, GFP_KERNEL); 7311 if (!fw_ddb_entry) { 7312 DEBUG2(ql4_printk(KERN_ERR, ha, 7313 "%s: Unable to allocate dma buffer\n", 7314 __func__)); 7315 goto exit_ddb_add; 7316 } 7317 7318 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7319 qla4xxx_sysfs_ddb_is_non_persistent); 7320 if (dev) { 7321 ql4_printk(KERN_ERR, ha, 7322 "%s: A non-persistent entry %s found\n", 7323 __func__, dev->kobj.name); 7324 put_device(dev); 7325 goto exit_ddb_add; 7326 } 7327 7328 /* Index 0 and 1 are reserved for boot target entries */ 7329 for (idx = 2; idx < max_ddbs; idx++) { 7330 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7331 fw_ddb_entry_dma, idx)) 7332 break; 7333 } 7334 7335 if (idx == max_ddbs) 7336 goto exit_ddb_add; 7337 7338 if (!strncasecmp("ipv6", buf, 4)) 7339 options |= IPV6_DEFAULT_DDB_ENTRY; 7340 7341 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7342 if (rval == QLA_ERROR) 7343 goto exit_ddb_add; 7344 7345 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7346 7347 exit_ddb_add: 7348 if (fw_ddb_entry) 7349 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7350 fw_ddb_entry, fw_ddb_entry_dma); 7351 if (rval == QLA_SUCCESS) 7352 return idx; 7353 else 7354 return -EIO; 7355 } 7356 7357 /** 7358 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7359 * @fnode_sess: pointer to session attrs of flash ddb entry 7360 * @fnode_conn: pointer to connection attrs of flash ddb entry 7361 * 7362 * This writes the contents of target ddb buffer to Flash with a valid cookie 7363 * value in order to make the ddb entry persistent. 7364 **/ 7365 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7366 struct iscsi_bus_flash_conn *fnode_conn) 7367 { 7368 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7369 struct scsi_qla_host *ha = to_qla_host(shost); 7370 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7371 struct dev_db_entry *fw_ddb_entry = NULL; 7372 dma_addr_t fw_ddb_entry_dma; 7373 uint32_t options = 0; 7374 int rval = 0; 7375 7376 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7377 &fw_ddb_entry_dma, GFP_KERNEL); 7378 if (!fw_ddb_entry) { 7379 DEBUG2(ql4_printk(KERN_ERR, ha, 7380 "%s: Unable to allocate dma buffer\n", 7381 __func__)); 7382 rval = -ENOMEM; 7383 goto exit_ddb_apply; 7384 } 7385 7386 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7387 options |= IPV6_DEFAULT_DDB_ENTRY; 7388 7389 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7390 if (rval == QLA_ERROR) 7391 goto exit_ddb_apply; 7392 7393 dev_db_start_offset += (fnode_sess->target_id * 7394 sizeof(*fw_ddb_entry)); 7395 7396 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7397 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7398 7399 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7400 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7401 7402 if (rval == QLA_SUCCESS) { 7403 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7404 ql4_printk(KERN_INFO, ha, 7405 "%s: flash node %u of host %lu written to flash\n", 7406 __func__, fnode_sess->target_id, ha->host_no); 7407 } else { 7408 rval = -EIO; 7409 ql4_printk(KERN_ERR, ha, 7410 "%s: Error while writing flash node %u of host %lu to flash\n", 7411 __func__, fnode_sess->target_id, ha->host_no); 7412 } 7413 7414 exit_ddb_apply: 7415 if (fw_ddb_entry) 7416 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7417 fw_ddb_entry, fw_ddb_entry_dma); 7418 return rval; 7419 } 7420 7421 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7422 struct dev_db_entry *fw_ddb_entry, 7423 uint16_t idx) 7424 { 7425 struct dev_db_entry *ddb_entry = NULL; 7426 dma_addr_t ddb_entry_dma; 7427 unsigned long wtime; 7428 uint32_t mbx_sts = 0; 7429 uint32_t state = 0, conn_err = 0; 7430 uint16_t tmo = 0; 7431 int ret = 0; 7432 7433 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7434 &ddb_entry_dma, GFP_KERNEL); 7435 if (!ddb_entry) { 7436 DEBUG2(ql4_printk(KERN_ERR, ha, 7437 "%s: Unable to allocate dma buffer\n", 7438 __func__)); 7439 return QLA_ERROR; 7440 } 7441 7442 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7443 7444 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7445 if (ret != QLA_SUCCESS) { 7446 DEBUG2(ql4_printk(KERN_ERR, ha, 7447 "%s: Unable to set ddb entry for index %d\n", 7448 __func__, idx)); 7449 goto exit_ddb_conn_open; 7450 } 7451 7452 qla4xxx_conn_open(ha, idx); 7453 7454 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7455 tmo = ((ha->def_timeout > LOGIN_TOV) && 7456 (ha->def_timeout < LOGIN_TOV * 10) ? 7457 ha->def_timeout : LOGIN_TOV); 7458 7459 DEBUG2(ql4_printk(KERN_INFO, ha, 7460 "Default time to wait for login to ddb %d\n", tmo)); 7461 7462 wtime = jiffies + (HZ * tmo); 7463 do { 7464 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7465 NULL, &state, &conn_err, NULL, 7466 NULL); 7467 if (ret == QLA_ERROR) 7468 continue; 7469 7470 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7471 state == DDB_DS_SESSION_FAILED) 7472 break; 7473 7474 schedule_timeout_uninterruptible(HZ / 10); 7475 } while (time_after(wtime, jiffies)); 7476 7477 exit_ddb_conn_open: 7478 if (ddb_entry) 7479 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7480 ddb_entry, ddb_entry_dma); 7481 return ret; 7482 } 7483 7484 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7485 struct dev_db_entry *fw_ddb_entry, 7486 uint16_t target_id) 7487 { 7488 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7489 struct list_head list_nt; 7490 uint16_t ddb_index; 7491 int ret = 0; 7492 7493 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7494 ql4_printk(KERN_WARNING, ha, 7495 "%s: A discovery already in progress!\n", __func__); 7496 return QLA_ERROR; 7497 } 7498 7499 INIT_LIST_HEAD(&list_nt); 7500 7501 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7502 7503 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7504 if (ret == QLA_ERROR) 7505 goto exit_login_st_clr_bit; 7506 7507 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7508 if (ret == QLA_ERROR) 7509 goto exit_login_st; 7510 7511 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7512 7513 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7514 list_del_init(&ddb_idx->list); 7515 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7516 vfree(ddb_idx); 7517 } 7518 7519 exit_login_st: 7520 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7521 ql4_printk(KERN_ERR, ha, 7522 "Unable to clear DDB index = 0x%x\n", ddb_index); 7523 } 7524 7525 clear_bit(ddb_index, ha->ddb_idx_map); 7526 7527 exit_login_st_clr_bit: 7528 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7529 return ret; 7530 } 7531 7532 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7533 struct dev_db_entry *fw_ddb_entry, 7534 uint16_t idx) 7535 { 7536 int ret = QLA_ERROR; 7537 7538 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7539 if (ret != QLA_SUCCESS) 7540 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7541 idx); 7542 else 7543 ret = -EPERM; 7544 7545 return ret; 7546 } 7547 7548 /** 7549 * qla4xxx_sysfs_ddb_login - Login to the specified target 7550 * @fnode_sess: pointer to session attrs of flash ddb entry 7551 * @fnode_conn: pointer to connection attrs of flash ddb entry 7552 * 7553 * This logs in to the specified target 7554 **/ 7555 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7556 struct iscsi_bus_flash_conn *fnode_conn) 7557 { 7558 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7559 struct scsi_qla_host *ha = to_qla_host(shost); 7560 struct dev_db_entry *fw_ddb_entry = NULL; 7561 dma_addr_t fw_ddb_entry_dma; 7562 uint32_t options = 0; 7563 int ret = 0; 7564 7565 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7566 ql4_printk(KERN_ERR, ha, 7567 "%s: Target info is not persistent\n", __func__); 7568 ret = -EIO; 7569 goto exit_ddb_login; 7570 } 7571 7572 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7573 &fw_ddb_entry_dma, GFP_KERNEL); 7574 if (!fw_ddb_entry) { 7575 DEBUG2(ql4_printk(KERN_ERR, ha, 7576 "%s: Unable to allocate dma buffer\n", 7577 __func__)); 7578 ret = -ENOMEM; 7579 goto exit_ddb_login; 7580 } 7581 7582 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7583 options |= IPV6_DEFAULT_DDB_ENTRY; 7584 7585 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7586 if (ret == QLA_ERROR) 7587 goto exit_ddb_login; 7588 7589 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7590 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7591 7592 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7593 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7594 fnode_sess->target_id); 7595 else 7596 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7597 fnode_sess->target_id); 7598 7599 if (ret > 0) 7600 ret = -EIO; 7601 7602 exit_ddb_login: 7603 if (fw_ddb_entry) 7604 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7605 fw_ddb_entry, fw_ddb_entry_dma); 7606 return ret; 7607 } 7608 7609 /** 7610 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7611 * @cls_sess: pointer to session to be logged out 7612 * 7613 * This performs session log out from the specified target 7614 **/ 7615 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7616 { 7617 struct iscsi_session *sess; 7618 struct ddb_entry *ddb_entry = NULL; 7619 struct scsi_qla_host *ha; 7620 struct dev_db_entry *fw_ddb_entry = NULL; 7621 dma_addr_t fw_ddb_entry_dma; 7622 unsigned long flags; 7623 unsigned long wtime; 7624 uint32_t ddb_state; 7625 int options; 7626 int ret = 0; 7627 7628 sess = cls_sess->dd_data; 7629 ddb_entry = sess->dd_data; 7630 ha = ddb_entry->ha; 7631 7632 if (ddb_entry->ddb_type != FLASH_DDB) { 7633 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7634 __func__); 7635 ret = -ENXIO; 7636 goto exit_ddb_logout; 7637 } 7638 7639 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7640 ql4_printk(KERN_ERR, ha, 7641 "%s: Logout from boot target entry is not permitted.\n", 7642 __func__); 7643 ret = -EPERM; 7644 goto exit_ddb_logout; 7645 } 7646 7647 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7648 &fw_ddb_entry_dma, GFP_KERNEL); 7649 if (!fw_ddb_entry) { 7650 ql4_printk(KERN_ERR, ha, 7651 "%s: Unable to allocate dma buffer\n", __func__); 7652 ret = -ENOMEM; 7653 goto exit_ddb_logout; 7654 } 7655 7656 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7657 goto ddb_logout_init; 7658 7659 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7660 fw_ddb_entry, fw_ddb_entry_dma, 7661 NULL, NULL, &ddb_state, NULL, 7662 NULL, NULL); 7663 if (ret == QLA_ERROR) 7664 goto ddb_logout_init; 7665 7666 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7667 goto ddb_logout_init; 7668 7669 /* wait until next relogin is triggered using DF_RELOGIN and 7670 * clear DF_RELOGIN to avoid invocation of further relogin 7671 */ 7672 wtime = jiffies + (HZ * RELOGIN_TOV); 7673 do { 7674 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7675 goto ddb_logout_init; 7676 7677 schedule_timeout_uninterruptible(HZ); 7678 } while ((time_after(wtime, jiffies))); 7679 7680 ddb_logout_init: 7681 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7682 atomic_set(&ddb_entry->relogin_timer, 0); 7683 7684 options = LOGOUT_OPTION_CLOSE_SESSION; 7685 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7686 7687 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7688 wtime = jiffies + (HZ * LOGOUT_TOV); 7689 do { 7690 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7691 fw_ddb_entry, fw_ddb_entry_dma, 7692 NULL, NULL, &ddb_state, NULL, 7693 NULL, NULL); 7694 if (ret == QLA_ERROR) 7695 goto ddb_logout_clr_sess; 7696 7697 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7698 (ddb_state == DDB_DS_SESSION_FAILED)) 7699 goto ddb_logout_clr_sess; 7700 7701 schedule_timeout_uninterruptible(HZ); 7702 } while ((time_after(wtime, jiffies))); 7703 7704 ddb_logout_clr_sess: 7705 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7706 /* 7707 * we have decremented the reference count of the driver 7708 * when we setup the session to have the driver unload 7709 * to be seamless without actually destroying the 7710 * session 7711 **/ 7712 try_module_get(qla4xxx_iscsi_transport.owner); 7713 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7714 7715 spin_lock_irqsave(&ha->hardware_lock, flags); 7716 qla4xxx_free_ddb(ha, ddb_entry); 7717 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7718 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7719 7720 iscsi_session_teardown(ddb_entry->sess); 7721 7722 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7723 ret = QLA_SUCCESS; 7724 7725 exit_ddb_logout: 7726 if (fw_ddb_entry) 7727 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7728 fw_ddb_entry, fw_ddb_entry_dma); 7729 return ret; 7730 } 7731 7732 /** 7733 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7734 * @fnode_sess: pointer to session attrs of flash ddb entry 7735 * @fnode_conn: pointer to connection attrs of flash ddb entry 7736 * 7737 * This performs log out from the specified target 7738 **/ 7739 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7740 struct iscsi_bus_flash_conn *fnode_conn) 7741 { 7742 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7743 struct scsi_qla_host *ha = to_qla_host(shost); 7744 struct ql4_tuple_ddb *flash_tddb = NULL; 7745 struct ql4_tuple_ddb *tmp_tddb = NULL; 7746 struct dev_db_entry *fw_ddb_entry = NULL; 7747 struct ddb_entry *ddb_entry = NULL; 7748 dma_addr_t fw_ddb_dma; 7749 uint32_t next_idx = 0; 7750 uint32_t state = 0, conn_err = 0; 7751 uint16_t conn_id = 0; 7752 int idx, index; 7753 int status, ret = 0; 7754 7755 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7756 &fw_ddb_dma); 7757 if (fw_ddb_entry == NULL) { 7758 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7759 ret = -ENOMEM; 7760 goto exit_ddb_logout; 7761 } 7762 7763 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7764 if (!flash_tddb) { 7765 ql4_printk(KERN_WARNING, ha, 7766 "%s:Memory Allocation failed.\n", __func__); 7767 ret = -ENOMEM; 7768 goto exit_ddb_logout; 7769 } 7770 7771 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7772 if (!tmp_tddb) { 7773 ql4_printk(KERN_WARNING, ha, 7774 "%s:Memory Allocation failed.\n", __func__); 7775 ret = -ENOMEM; 7776 goto exit_ddb_logout; 7777 } 7778 7779 if (!fnode_sess->targetname) { 7780 ql4_printk(KERN_ERR, ha, 7781 "%s:Cannot logout from SendTarget entry\n", 7782 __func__); 7783 ret = -EPERM; 7784 goto exit_ddb_logout; 7785 } 7786 7787 if (fnode_sess->is_boot_target) { 7788 ql4_printk(KERN_ERR, ha, 7789 "%s: Logout from boot target entry is not permitted.\n", 7790 __func__); 7791 ret = -EPERM; 7792 goto exit_ddb_logout; 7793 } 7794 7795 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7796 ISCSI_NAME_SIZE); 7797 7798 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7799 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7800 else 7801 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7802 7803 flash_tddb->tpgt = fnode_sess->tpgt; 7804 flash_tddb->port = fnode_conn->port; 7805 7806 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7807 7808 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7809 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7810 if (ddb_entry == NULL) 7811 continue; 7812 7813 if (ddb_entry->ddb_type != FLASH_DDB) 7814 continue; 7815 7816 index = ddb_entry->sess->target_id; 7817 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7818 fw_ddb_dma, NULL, &next_idx, 7819 &state, &conn_err, NULL, 7820 &conn_id); 7821 if (status == QLA_ERROR) { 7822 ret = -ENOMEM; 7823 break; 7824 } 7825 7826 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7827 7828 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7829 true); 7830 if (status == QLA_SUCCESS) { 7831 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7832 break; 7833 } 7834 } 7835 7836 if (idx == MAX_DDB_ENTRIES) 7837 ret = -ESRCH; 7838 7839 exit_ddb_logout: 7840 if (flash_tddb) 7841 vfree(flash_tddb); 7842 if (tmp_tddb) 7843 vfree(tmp_tddb); 7844 if (fw_ddb_entry) 7845 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7846 7847 return ret; 7848 } 7849 7850 static int 7851 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7852 int param, char *buf) 7853 { 7854 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7855 struct scsi_qla_host *ha = to_qla_host(shost); 7856 struct iscsi_bus_flash_conn *fnode_conn; 7857 struct ql4_chap_table chap_tbl; 7858 struct device *dev; 7859 int parent_type; 7860 int rc = 0; 7861 7862 dev = iscsi_find_flashnode_conn(fnode_sess); 7863 if (!dev) 7864 return -EIO; 7865 7866 fnode_conn = iscsi_dev_to_flash_conn(dev); 7867 7868 switch (param) { 7869 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7870 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7871 break; 7872 case ISCSI_FLASHNODE_PORTAL_TYPE: 7873 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7874 break; 7875 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7876 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7877 break; 7878 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7879 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7880 break; 7881 case ISCSI_FLASHNODE_ENTRY_EN: 7882 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7883 break; 7884 case ISCSI_FLASHNODE_HDR_DGST_EN: 7885 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7886 break; 7887 case ISCSI_FLASHNODE_DATA_DGST_EN: 7888 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7889 break; 7890 case ISCSI_FLASHNODE_IMM_DATA_EN: 7891 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7892 break; 7893 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7894 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7895 break; 7896 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7897 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7898 break; 7899 case ISCSI_FLASHNODE_PDU_INORDER: 7900 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7901 break; 7902 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7903 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7904 break; 7905 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7906 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7907 break; 7908 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7909 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7910 break; 7911 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7912 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7913 break; 7914 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7915 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7916 break; 7917 case ISCSI_FLASHNODE_ERL: 7918 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7919 break; 7920 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7921 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7922 break; 7923 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7924 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7925 break; 7926 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7927 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7928 break; 7929 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7930 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7931 break; 7932 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7933 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7934 break; 7935 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7936 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7937 break; 7938 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7939 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7940 break; 7941 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7942 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7943 break; 7944 case ISCSI_FLASHNODE_FIRST_BURST: 7945 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7946 break; 7947 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7948 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7949 break; 7950 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7951 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7952 break; 7953 case ISCSI_FLASHNODE_MAX_R2T: 7954 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7955 break; 7956 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7957 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7958 break; 7959 case ISCSI_FLASHNODE_ISID: 7960 rc = sprintf(buf, "%pm\n", fnode_sess->isid); 7961 break; 7962 case ISCSI_FLASHNODE_TSID: 7963 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7964 break; 7965 case ISCSI_FLASHNODE_PORT: 7966 rc = sprintf(buf, "%d\n", fnode_conn->port); 7967 break; 7968 case ISCSI_FLASHNODE_MAX_BURST: 7969 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7970 break; 7971 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7972 rc = sprintf(buf, "%u\n", 7973 fnode_sess->default_taskmgmt_timeout); 7974 break; 7975 case ISCSI_FLASHNODE_IPADDR: 7976 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7977 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7978 else 7979 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7980 break; 7981 case ISCSI_FLASHNODE_ALIAS: 7982 if (fnode_sess->targetalias) 7983 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7984 else 7985 rc = sprintf(buf, "\n"); 7986 break; 7987 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7988 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7989 rc = sprintf(buf, "%pI6\n", 7990 fnode_conn->redirect_ipaddr); 7991 else 7992 rc = sprintf(buf, "%pI4\n", 7993 fnode_conn->redirect_ipaddr); 7994 break; 7995 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7996 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7997 break; 7998 case ISCSI_FLASHNODE_LOCAL_PORT: 7999 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 8000 break; 8001 case ISCSI_FLASHNODE_IPV4_TOS: 8002 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 8003 break; 8004 case ISCSI_FLASHNODE_IPV6_TC: 8005 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8006 rc = sprintf(buf, "%u\n", 8007 fnode_conn->ipv6_traffic_class); 8008 else 8009 rc = sprintf(buf, "\n"); 8010 break; 8011 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8012 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 8013 break; 8014 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8015 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8016 rc = sprintf(buf, "%pI6\n", 8017 fnode_conn->link_local_ipv6_addr); 8018 else 8019 rc = sprintf(buf, "\n"); 8020 break; 8021 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8022 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 8023 break; 8024 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 8025 if (fnode_sess->discovery_parent_type == DDB_ISNS) 8026 parent_type = ISCSI_DISC_PARENT_ISNS; 8027 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 8028 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8029 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8030 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8031 else 8032 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8033 8034 rc = sprintf(buf, "%s\n", 8035 iscsi_get_discovery_parent_name(parent_type)); 8036 break; 8037 case ISCSI_FLASHNODE_NAME: 8038 if (fnode_sess->targetname) 8039 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8040 else 8041 rc = sprintf(buf, "\n"); 8042 break; 8043 case ISCSI_FLASHNODE_TPGT: 8044 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8045 break; 8046 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8047 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8048 break; 8049 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8050 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8051 break; 8052 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8053 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8054 break; 8055 case ISCSI_FLASHNODE_USERNAME: 8056 if (fnode_sess->chap_auth_en) { 8057 qla4xxx_get_uni_chap_at_index(ha, 8058 chap_tbl.name, 8059 chap_tbl.secret, 8060 fnode_sess->chap_out_idx); 8061 rc = sprintf(buf, "%s\n", chap_tbl.name); 8062 } else { 8063 rc = sprintf(buf, "\n"); 8064 } 8065 break; 8066 case ISCSI_FLASHNODE_PASSWORD: 8067 if (fnode_sess->chap_auth_en) { 8068 qla4xxx_get_uni_chap_at_index(ha, 8069 chap_tbl.name, 8070 chap_tbl.secret, 8071 fnode_sess->chap_out_idx); 8072 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8073 } else { 8074 rc = sprintf(buf, "\n"); 8075 } 8076 break; 8077 case ISCSI_FLASHNODE_STATSN: 8078 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8079 break; 8080 case ISCSI_FLASHNODE_EXP_STATSN: 8081 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8082 break; 8083 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8084 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8085 break; 8086 default: 8087 rc = -ENOSYS; 8088 break; 8089 } 8090 8091 put_device(dev); 8092 return rc; 8093 } 8094 8095 /** 8096 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8097 * @fnode_sess: pointer to session attrs of flash ddb entry 8098 * @fnode_conn: pointer to connection attrs of flash ddb entry 8099 * @data: Parameters and their values to update 8100 * @len: len of data 8101 * 8102 * This sets the parameter of flash ddb entry and writes them to flash 8103 **/ 8104 static int 8105 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8106 struct iscsi_bus_flash_conn *fnode_conn, 8107 void *data, int len) 8108 { 8109 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8110 struct scsi_qla_host *ha = to_qla_host(shost); 8111 struct iscsi_flashnode_param_info *fnode_param; 8112 struct ql4_chap_table chap_tbl; 8113 struct nlattr *attr; 8114 uint16_t chap_out_idx = INVALID_ENTRY; 8115 int rc = QLA_ERROR; 8116 uint32_t rem = len; 8117 8118 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8119 nla_for_each_attr(attr, data, len, rem) { 8120 fnode_param = nla_data(attr); 8121 8122 switch (fnode_param->param) { 8123 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8124 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8125 break; 8126 case ISCSI_FLASHNODE_PORTAL_TYPE: 8127 memcpy(fnode_sess->portal_type, fnode_param->value, 8128 strlen(fnode_sess->portal_type)); 8129 break; 8130 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8131 fnode_sess->auto_snd_tgt_disable = 8132 fnode_param->value[0]; 8133 break; 8134 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8135 fnode_sess->discovery_sess = fnode_param->value[0]; 8136 break; 8137 case ISCSI_FLASHNODE_ENTRY_EN: 8138 fnode_sess->entry_state = fnode_param->value[0]; 8139 break; 8140 case ISCSI_FLASHNODE_HDR_DGST_EN: 8141 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8142 break; 8143 case ISCSI_FLASHNODE_DATA_DGST_EN: 8144 fnode_conn->datadgst_en = fnode_param->value[0]; 8145 break; 8146 case ISCSI_FLASHNODE_IMM_DATA_EN: 8147 fnode_sess->imm_data_en = fnode_param->value[0]; 8148 break; 8149 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8150 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8151 break; 8152 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8153 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8154 break; 8155 case ISCSI_FLASHNODE_PDU_INORDER: 8156 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8157 break; 8158 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8159 fnode_sess->chap_auth_en = fnode_param->value[0]; 8160 /* Invalidate chap index if chap auth is disabled */ 8161 if (!fnode_sess->chap_auth_en) 8162 fnode_sess->chap_out_idx = INVALID_ENTRY; 8163 8164 break; 8165 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8166 fnode_conn->snack_req_en = fnode_param->value[0]; 8167 break; 8168 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8169 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8170 break; 8171 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8172 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8173 break; 8174 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8175 fnode_sess->discovery_auth_optional = 8176 fnode_param->value[0]; 8177 break; 8178 case ISCSI_FLASHNODE_ERL: 8179 fnode_sess->erl = fnode_param->value[0]; 8180 break; 8181 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8182 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8183 break; 8184 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8185 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8186 break; 8187 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8188 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8189 break; 8190 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8191 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8192 break; 8193 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8194 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8195 break; 8196 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8197 fnode_conn->fragment_disable = fnode_param->value[0]; 8198 break; 8199 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8200 fnode_conn->max_recv_dlength = 8201 *(unsigned *)fnode_param->value; 8202 break; 8203 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8204 fnode_conn->max_xmit_dlength = 8205 *(unsigned *)fnode_param->value; 8206 break; 8207 case ISCSI_FLASHNODE_FIRST_BURST: 8208 fnode_sess->first_burst = 8209 *(unsigned *)fnode_param->value; 8210 break; 8211 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8212 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8213 break; 8214 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8215 fnode_sess->time2retain = 8216 *(uint16_t *)fnode_param->value; 8217 break; 8218 case ISCSI_FLASHNODE_MAX_R2T: 8219 fnode_sess->max_r2t = 8220 *(uint16_t *)fnode_param->value; 8221 break; 8222 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8223 fnode_conn->keepalive_timeout = 8224 *(uint16_t *)fnode_param->value; 8225 break; 8226 case ISCSI_FLASHNODE_ISID: 8227 memcpy(fnode_sess->isid, fnode_param->value, 8228 sizeof(fnode_sess->isid)); 8229 break; 8230 case ISCSI_FLASHNODE_TSID: 8231 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8232 break; 8233 case ISCSI_FLASHNODE_PORT: 8234 fnode_conn->port = *(uint16_t *)fnode_param->value; 8235 break; 8236 case ISCSI_FLASHNODE_MAX_BURST: 8237 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8238 break; 8239 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8240 fnode_sess->default_taskmgmt_timeout = 8241 *(uint16_t *)fnode_param->value; 8242 break; 8243 case ISCSI_FLASHNODE_IPADDR: 8244 memcpy(fnode_conn->ipaddress, fnode_param->value, 8245 IPv6_ADDR_LEN); 8246 break; 8247 case ISCSI_FLASHNODE_ALIAS: 8248 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8249 (char *)fnode_param->value); 8250 break; 8251 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8252 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8253 IPv6_ADDR_LEN); 8254 break; 8255 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8256 fnode_conn->max_segment_size = 8257 *(unsigned *)fnode_param->value; 8258 break; 8259 case ISCSI_FLASHNODE_LOCAL_PORT: 8260 fnode_conn->local_port = 8261 *(uint16_t *)fnode_param->value; 8262 break; 8263 case ISCSI_FLASHNODE_IPV4_TOS: 8264 fnode_conn->ipv4_tos = fnode_param->value[0]; 8265 break; 8266 case ISCSI_FLASHNODE_IPV6_TC: 8267 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8268 break; 8269 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8270 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8271 break; 8272 case ISCSI_FLASHNODE_NAME: 8273 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8274 (char *)fnode_param->value); 8275 break; 8276 case ISCSI_FLASHNODE_TPGT: 8277 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8278 break; 8279 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8280 memcpy(fnode_conn->link_local_ipv6_addr, 8281 fnode_param->value, IPv6_ADDR_LEN); 8282 break; 8283 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8284 fnode_sess->discovery_parent_idx = 8285 *(uint16_t *)fnode_param->value; 8286 break; 8287 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8288 fnode_conn->tcp_xmit_wsf = 8289 *(uint8_t *)fnode_param->value; 8290 break; 8291 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8292 fnode_conn->tcp_recv_wsf = 8293 *(uint8_t *)fnode_param->value; 8294 break; 8295 case ISCSI_FLASHNODE_STATSN: 8296 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8297 break; 8298 case ISCSI_FLASHNODE_EXP_STATSN: 8299 fnode_conn->exp_statsn = 8300 *(uint32_t *)fnode_param->value; 8301 break; 8302 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8303 chap_out_idx = *(uint16_t *)fnode_param->value; 8304 if (!qla4xxx_get_uni_chap_at_index(ha, 8305 chap_tbl.name, 8306 chap_tbl.secret, 8307 chap_out_idx)) { 8308 fnode_sess->chap_out_idx = chap_out_idx; 8309 /* Enable chap auth if chap index is valid */ 8310 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8311 } 8312 break; 8313 default: 8314 ql4_printk(KERN_ERR, ha, 8315 "%s: No such sysfs attribute\n", __func__); 8316 rc = -ENOSYS; 8317 goto exit_set_param; 8318 } 8319 } 8320 8321 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8322 8323 exit_set_param: 8324 return rc; 8325 } 8326 8327 /** 8328 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8329 * @fnode_sess: pointer to session attrs of flash ddb entry 8330 * 8331 * This invalidates the flash ddb entry at the given index 8332 **/ 8333 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8334 { 8335 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8336 struct scsi_qla_host *ha = to_qla_host(shost); 8337 uint32_t dev_db_start_offset; 8338 uint32_t dev_db_end_offset; 8339 struct dev_db_entry *fw_ddb_entry = NULL; 8340 dma_addr_t fw_ddb_entry_dma; 8341 uint16_t *ddb_cookie = NULL; 8342 size_t ddb_size = 0; 8343 void *pddb = NULL; 8344 int target_id; 8345 int rc = 0; 8346 8347 if (fnode_sess->is_boot_target) { 8348 rc = -EPERM; 8349 DEBUG2(ql4_printk(KERN_ERR, ha, 8350 "%s: Deletion of boot target entry is not permitted.\n", 8351 __func__)); 8352 goto exit_ddb_del; 8353 } 8354 8355 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8356 goto sysfs_ddb_del; 8357 8358 if (is_qla40XX(ha)) { 8359 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8360 dev_db_end_offset = FLASH_OFFSET_DB_END; 8361 dev_db_start_offset += (fnode_sess->target_id * 8362 sizeof(*fw_ddb_entry)); 8363 ddb_size = sizeof(*fw_ddb_entry); 8364 } else { 8365 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8366 (ha->hw.flt_region_ddb << 2); 8367 /* flt_ddb_size is DDB table size for both ports 8368 * so divide it by 2 to calculate the offset for second port 8369 */ 8370 if (ha->port_num == 1) 8371 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8372 8373 dev_db_end_offset = dev_db_start_offset + 8374 (ha->hw.flt_ddb_size / 2); 8375 8376 dev_db_start_offset += (fnode_sess->target_id * 8377 sizeof(*fw_ddb_entry)); 8378 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8379 8380 ddb_size = sizeof(*ddb_cookie); 8381 } 8382 8383 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8384 __func__, dev_db_start_offset, dev_db_end_offset)); 8385 8386 if (dev_db_start_offset > dev_db_end_offset) { 8387 rc = -EIO; 8388 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8389 __func__, fnode_sess->target_id)); 8390 goto exit_ddb_del; 8391 } 8392 8393 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8394 &fw_ddb_entry_dma, GFP_KERNEL); 8395 if (!pddb) { 8396 rc = -ENOMEM; 8397 DEBUG2(ql4_printk(KERN_ERR, ha, 8398 "%s: Unable to allocate dma buffer\n", 8399 __func__)); 8400 goto exit_ddb_del; 8401 } 8402 8403 if (is_qla40XX(ha)) { 8404 fw_ddb_entry = pddb; 8405 memset(fw_ddb_entry, 0, ddb_size); 8406 ddb_cookie = &fw_ddb_entry->cookie; 8407 } else { 8408 ddb_cookie = pddb; 8409 } 8410 8411 /* invalidate the cookie */ 8412 *ddb_cookie = 0xFFEE; 8413 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8414 ddb_size, FLASH_OPT_RMW_COMMIT); 8415 8416 sysfs_ddb_del: 8417 target_id = fnode_sess->target_id; 8418 iscsi_destroy_flashnode_sess(fnode_sess); 8419 ql4_printk(KERN_INFO, ha, 8420 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8421 __func__, target_id, ha->host_no); 8422 exit_ddb_del: 8423 if (pddb) 8424 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8425 fw_ddb_entry_dma); 8426 return rc; 8427 } 8428 8429 /** 8430 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8431 * @ha: pointer to adapter structure 8432 * 8433 * Export the firmware DDB for all send targets and normal targets to sysfs. 8434 **/ 8435 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8436 { 8437 struct dev_db_entry *fw_ddb_entry = NULL; 8438 dma_addr_t fw_ddb_entry_dma; 8439 uint16_t max_ddbs; 8440 uint16_t idx = 0; 8441 int ret = QLA_SUCCESS; 8442 8443 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8444 sizeof(*fw_ddb_entry), 8445 &fw_ddb_entry_dma, GFP_KERNEL); 8446 if (!fw_ddb_entry) { 8447 DEBUG2(ql4_printk(KERN_ERR, ha, 8448 "%s: Unable to allocate dma buffer\n", 8449 __func__)); 8450 return -ENOMEM; 8451 } 8452 8453 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8454 MAX_DEV_DB_ENTRIES; 8455 8456 for (idx = 0; idx < max_ddbs; idx++) { 8457 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8458 idx)) 8459 continue; 8460 8461 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8462 if (ret) { 8463 ret = -EIO; 8464 break; 8465 } 8466 } 8467 8468 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8469 fw_ddb_entry_dma); 8470 8471 return ret; 8472 } 8473 8474 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8475 { 8476 iscsi_destroy_all_flashnode(ha->host); 8477 } 8478 8479 /** 8480 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8481 * @ha: pointer to adapter structure 8482 * @is_reset: Is this init path or reset path 8483 * 8484 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8485 * using connection open, then create the list of normal targets (nt) 8486 * from firmware DDBs. Based on the list of nt setup session and connection 8487 * objects. 8488 **/ 8489 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8490 { 8491 uint16_t tmo = 0; 8492 struct list_head list_st, list_nt; 8493 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8494 unsigned long wtime; 8495 8496 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8497 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8498 ha->is_reset = is_reset; 8499 return; 8500 } 8501 8502 INIT_LIST_HEAD(&list_st); 8503 INIT_LIST_HEAD(&list_nt); 8504 8505 qla4xxx_build_st_list(ha, &list_st); 8506 8507 /* Before issuing conn open mbox, ensure all IPs states are configured 8508 * Note, conn open fails if IPs are not configured 8509 */ 8510 qla4xxx_wait_for_ip_configuration(ha); 8511 8512 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8513 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8514 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8515 } 8516 8517 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8518 tmo = ((ha->def_timeout > LOGIN_TOV) && 8519 (ha->def_timeout < LOGIN_TOV * 10) ? 8520 ha->def_timeout : LOGIN_TOV); 8521 8522 DEBUG2(ql4_printk(KERN_INFO, ha, 8523 "Default time to wait for build ddb %d\n", tmo)); 8524 8525 wtime = jiffies + (HZ * tmo); 8526 do { 8527 if (list_empty(&list_st)) 8528 break; 8529 8530 qla4xxx_remove_failed_ddb(ha, &list_st); 8531 schedule_timeout_uninterruptible(HZ / 10); 8532 } while (time_after(wtime, jiffies)); 8533 8534 8535 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8536 8537 qla4xxx_free_ddb_list(&list_st); 8538 qla4xxx_free_ddb_list(&list_nt); 8539 8540 qla4xxx_free_ddb_index(ha); 8541 } 8542 8543 /** 8544 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8545 * response. 8546 * @ha: pointer to adapter structure 8547 * 8548 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8549 * set in DDB and we will wait for login response of boot targets during 8550 * probe. 8551 **/ 8552 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8553 { 8554 struct ddb_entry *ddb_entry; 8555 struct dev_db_entry *fw_ddb_entry = NULL; 8556 dma_addr_t fw_ddb_entry_dma; 8557 unsigned long wtime; 8558 uint32_t ddb_state; 8559 int max_ddbs, idx, ret; 8560 8561 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8562 MAX_DEV_DB_ENTRIES; 8563 8564 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8565 &fw_ddb_entry_dma, GFP_KERNEL); 8566 if (!fw_ddb_entry) { 8567 ql4_printk(KERN_ERR, ha, 8568 "%s: Unable to allocate dma buffer\n", __func__); 8569 goto exit_login_resp; 8570 } 8571 8572 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8573 8574 for (idx = 0; idx < max_ddbs; idx++) { 8575 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8576 if (ddb_entry == NULL) 8577 continue; 8578 8579 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8580 DEBUG2(ql4_printk(KERN_INFO, ha, 8581 "%s: DDB index [%d]\n", __func__, 8582 ddb_entry->fw_ddb_index)); 8583 do { 8584 ret = qla4xxx_get_fwddb_entry(ha, 8585 ddb_entry->fw_ddb_index, 8586 fw_ddb_entry, fw_ddb_entry_dma, 8587 NULL, NULL, &ddb_state, NULL, 8588 NULL, NULL); 8589 if (ret == QLA_ERROR) 8590 goto exit_login_resp; 8591 8592 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8593 (ddb_state == DDB_DS_SESSION_FAILED)) 8594 break; 8595 8596 schedule_timeout_uninterruptible(HZ); 8597 8598 } while ((time_after(wtime, jiffies))); 8599 8600 if (!time_after(wtime, jiffies)) { 8601 DEBUG2(ql4_printk(KERN_INFO, ha, 8602 "%s: Login response wait timer expired\n", 8603 __func__)); 8604 goto exit_login_resp; 8605 } 8606 } 8607 } 8608 8609 exit_login_resp: 8610 if (fw_ddb_entry) 8611 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8612 fw_ddb_entry, fw_ddb_entry_dma); 8613 } 8614 8615 /** 8616 * qla4xxx_probe_adapter - callback function to probe HBA 8617 * @pdev: pointer to pci_dev structure 8618 * @ent: pointer to pci_device entry 8619 * 8620 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8621 * It returns zero if successful. It also initializes all data necessary for 8622 * the driver. 8623 **/ 8624 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8625 const struct pci_device_id *ent) 8626 { 8627 int ret = -ENODEV, status; 8628 struct Scsi_Host *host; 8629 struct scsi_qla_host *ha; 8630 uint8_t init_retry_count = 0; 8631 char buf[34]; 8632 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8633 uint32_t dev_state; 8634 8635 if (pci_enable_device(pdev)) 8636 return -1; 8637 8638 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8639 if (host == NULL) { 8640 printk(KERN_WARNING 8641 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8642 goto probe_disable_device; 8643 } 8644 8645 /* Clear our data area */ 8646 ha = to_qla_host(host); 8647 memset(ha, 0, sizeof(*ha)); 8648 8649 /* Save the information from PCI BIOS. */ 8650 ha->pdev = pdev; 8651 ha->host = host; 8652 ha->host_no = host->host_no; 8653 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8654 8655 pci_enable_pcie_error_reporting(pdev); 8656 8657 /* Setup Runtime configurable options */ 8658 if (is_qla8022(ha)) { 8659 ha->isp_ops = &qla4_82xx_isp_ops; 8660 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8661 ha->qdr_sn_window = -1; 8662 ha->ddr_mn_window = -1; 8663 ha->curr_window = 255; 8664 nx_legacy_intr = &legacy_intr[ha->func_num]; 8665 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8666 ha->nx_legacy_intr.tgt_status_reg = 8667 nx_legacy_intr->tgt_status_reg; 8668 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8669 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8670 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8671 ha->isp_ops = &qla4_83xx_isp_ops; 8672 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8673 } else { 8674 ha->isp_ops = &qla4xxx_isp_ops; 8675 } 8676 8677 if (is_qla80XX(ha)) { 8678 rwlock_init(&ha->hw_lock); 8679 ha->pf_bit = ha->func_num << 16; 8680 /* Set EEH reset type to fundamental if required by hba */ 8681 pdev->needs_freset = 1; 8682 } 8683 8684 /* Configure PCI I/O space. */ 8685 ret = ha->isp_ops->iospace_config(ha); 8686 if (ret) 8687 goto probe_failed_ioconfig; 8688 8689 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8690 pdev->device, pdev->irq, ha->reg); 8691 8692 qla4xxx_config_dma_addressing(ha); 8693 8694 /* Initialize lists and spinlocks. */ 8695 INIT_LIST_HEAD(&ha->free_srb_q); 8696 8697 mutex_init(&ha->mbox_sem); 8698 mutex_init(&ha->chap_sem); 8699 init_completion(&ha->mbx_intr_comp); 8700 init_completion(&ha->disable_acb_comp); 8701 init_completion(&ha->idc_comp); 8702 init_completion(&ha->link_up_comp); 8703 8704 spin_lock_init(&ha->hardware_lock); 8705 spin_lock_init(&ha->work_lock); 8706 8707 /* Initialize work list */ 8708 INIT_LIST_HEAD(&ha->work_list); 8709 8710 /* Allocate dma buffers */ 8711 if (qla4xxx_mem_alloc(ha)) { 8712 ql4_printk(KERN_WARNING, ha, 8713 "[ERROR] Failed to allocate memory for adapter\n"); 8714 8715 ret = -ENOMEM; 8716 goto probe_failed; 8717 } 8718 8719 host->cmd_per_lun = 3; 8720 host->max_channel = 0; 8721 host->max_lun = MAX_LUNS - 1; 8722 host->max_id = MAX_TARGETS; 8723 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8724 host->can_queue = MAX_SRBS ; 8725 host->transportt = qla4xxx_scsi_transport; 8726 8727 pci_set_drvdata(pdev, ha); 8728 8729 ret = scsi_add_host(host, &pdev->dev); 8730 if (ret) 8731 goto probe_failed; 8732 8733 if (is_qla80XX(ha)) 8734 qla4_8xxx_get_flash_info(ha); 8735 8736 if (is_qla8032(ha) || is_qla8042(ha)) { 8737 qla4_83xx_read_reset_template(ha); 8738 /* 8739 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8740 * If DONRESET_BIT0 is set, drivers should not set dev_state 8741 * to NEED_RESET. But if NEED_RESET is set, drivers should 8742 * should honor the reset. 8743 */ 8744 if (ql4xdontresethba == 1) 8745 qla4_83xx_set_idc_dontreset(ha); 8746 } 8747 8748 /* 8749 * Initialize the Host adapter request/response queues and 8750 * firmware 8751 * NOTE: interrupts enabled upon successful completion 8752 */ 8753 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8754 8755 /* Dont retry adapter initialization if IRQ allocation failed */ 8756 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8757 goto skip_retry_init; 8758 8759 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8760 init_retry_count++ < MAX_INIT_RETRIES) { 8761 8762 if (is_qla80XX(ha)) { 8763 ha->isp_ops->idc_lock(ha); 8764 dev_state = qla4_8xxx_rd_direct(ha, 8765 QLA8XXX_CRB_DEV_STATE); 8766 ha->isp_ops->idc_unlock(ha); 8767 if (dev_state == QLA8XXX_DEV_FAILED) { 8768 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8769 "initialize adapter. H/W is in failed state\n", 8770 __func__); 8771 break; 8772 } 8773 } 8774 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8775 "(%d)\n", __func__, init_retry_count)); 8776 8777 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8778 continue; 8779 8780 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8781 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8782 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8783 goto skip_retry_init; 8784 } 8785 } 8786 8787 skip_retry_init: 8788 if (!test_bit(AF_ONLINE, &ha->flags)) { 8789 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8790 8791 if ((is_qla8022(ha) && ql4xdontresethba) || 8792 ((is_qla8032(ha) || is_qla8042(ha)) && 8793 qla4_83xx_idc_dontreset(ha))) { 8794 /* Put the device in failed state. */ 8795 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8796 ha->isp_ops->idc_lock(ha); 8797 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8798 QLA8XXX_DEV_FAILED); 8799 ha->isp_ops->idc_unlock(ha); 8800 } 8801 ret = -ENODEV; 8802 goto remove_host; 8803 } 8804 8805 /* Startup the kernel thread for this host adapter. */ 8806 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8807 "qla4xxx_dpc\n", __func__)); 8808 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8809 ha->dpc_thread = create_singlethread_workqueue(buf); 8810 if (!ha->dpc_thread) { 8811 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8812 ret = -ENODEV; 8813 goto remove_host; 8814 } 8815 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8816 8817 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8818 ha->host_no); 8819 if (!ha->task_wq) { 8820 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8821 ret = -ENODEV; 8822 goto remove_host; 8823 } 8824 8825 /* 8826 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8827 * (which is called indirectly by qla4xxx_initialize_adapter), 8828 * so that irqs will be registered after crbinit but before 8829 * mbx_intr_enable. 8830 */ 8831 if (is_qla40XX(ha)) { 8832 ret = qla4xxx_request_irqs(ha); 8833 if (ret) { 8834 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8835 "interrupt %d already in use.\n", pdev->irq); 8836 goto remove_host; 8837 } 8838 } 8839 8840 pci_save_state(ha->pdev); 8841 ha->isp_ops->enable_intrs(ha); 8842 8843 /* Start timer thread. */ 8844 qla4xxx_start_timer(ha, 1); 8845 8846 set_bit(AF_INIT_DONE, &ha->flags); 8847 8848 qla4_8xxx_alloc_sysfs_attr(ha); 8849 8850 printk(KERN_INFO 8851 " QLogic iSCSI HBA Driver version: %s\n" 8852 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8853 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8854 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8855 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8856 8857 /* Set the driver version */ 8858 if (is_qla80XX(ha)) 8859 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8860 8861 if (qla4xxx_setup_boot_info(ha)) 8862 ql4_printk(KERN_ERR, ha, 8863 "%s: No iSCSI boot target configured\n", __func__); 8864 8865 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8866 /* Perform the build ddb list and login to each */ 8867 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8868 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8869 qla4xxx_wait_login_resp_boot_tgt(ha); 8870 8871 qla4xxx_create_chap_list(ha); 8872 8873 qla4xxx_create_ifaces(ha); 8874 return 0; 8875 8876 remove_host: 8877 scsi_remove_host(ha->host); 8878 8879 probe_failed: 8880 qla4xxx_free_adapter(ha); 8881 8882 probe_failed_ioconfig: 8883 pci_disable_pcie_error_reporting(pdev); 8884 scsi_host_put(ha->host); 8885 8886 probe_disable_device: 8887 pci_disable_device(pdev); 8888 8889 return ret; 8890 } 8891 8892 /** 8893 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8894 * @ha: pointer to adapter structure 8895 * 8896 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8897 * so that the other port will not re-initialize while in the process of 8898 * removing the ha due to driver unload or hba hotplug. 8899 **/ 8900 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8901 { 8902 struct scsi_qla_host *other_ha = NULL; 8903 struct pci_dev *other_pdev = NULL; 8904 int fn = ISP4XXX_PCI_FN_2; 8905 8906 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8907 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8908 fn = ISP4XXX_PCI_FN_1; 8909 8910 other_pdev = 8911 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8912 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8913 fn)); 8914 8915 /* Get other_ha if other_pdev is valid and state is enable*/ 8916 if (other_pdev) { 8917 if (atomic_read(&other_pdev->enable_cnt)) { 8918 other_ha = pci_get_drvdata(other_pdev); 8919 if (other_ha) { 8920 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8921 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8922 "Prevent %s reinit\n", __func__, 8923 dev_name(&other_ha->pdev->dev))); 8924 } 8925 } 8926 pci_dev_put(other_pdev); 8927 } 8928 } 8929 8930 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8931 struct ddb_entry *ddb_entry) 8932 { 8933 struct dev_db_entry *fw_ddb_entry = NULL; 8934 dma_addr_t fw_ddb_entry_dma; 8935 unsigned long wtime; 8936 uint32_t ddb_state; 8937 int options; 8938 int status; 8939 8940 options = LOGOUT_OPTION_CLOSE_SESSION; 8941 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8942 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8943 goto clear_ddb; 8944 } 8945 8946 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8947 &fw_ddb_entry_dma, GFP_KERNEL); 8948 if (!fw_ddb_entry) { 8949 ql4_printk(KERN_ERR, ha, 8950 "%s: Unable to allocate dma buffer\n", __func__); 8951 goto clear_ddb; 8952 } 8953 8954 wtime = jiffies + (HZ * LOGOUT_TOV); 8955 do { 8956 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8957 fw_ddb_entry, fw_ddb_entry_dma, 8958 NULL, NULL, &ddb_state, NULL, 8959 NULL, NULL); 8960 if (status == QLA_ERROR) 8961 goto free_ddb; 8962 8963 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8964 (ddb_state == DDB_DS_SESSION_FAILED)) 8965 goto free_ddb; 8966 8967 schedule_timeout_uninterruptible(HZ); 8968 } while ((time_after(wtime, jiffies))); 8969 8970 free_ddb: 8971 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8972 fw_ddb_entry, fw_ddb_entry_dma); 8973 clear_ddb: 8974 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8975 } 8976 8977 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8978 { 8979 struct ddb_entry *ddb_entry; 8980 int idx; 8981 8982 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8983 8984 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8985 if ((ddb_entry != NULL) && 8986 (ddb_entry->ddb_type == FLASH_DDB)) { 8987 8988 qla4xxx_destroy_ddb(ha, ddb_entry); 8989 /* 8990 * we have decremented the reference count of the driver 8991 * when we setup the session to have the driver unload 8992 * to be seamless without actually destroying the 8993 * session 8994 **/ 8995 try_module_get(qla4xxx_iscsi_transport.owner); 8996 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8997 qla4xxx_free_ddb(ha, ddb_entry); 8998 iscsi_session_teardown(ddb_entry->sess); 8999 } 9000 } 9001 } 9002 /** 9003 * qla4xxx_remove_adapter - callback function to remove adapter. 9004 * @pdev: PCI device pointer 9005 **/ 9006 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 9007 { 9008 struct scsi_qla_host *ha; 9009 9010 /* 9011 * If the PCI device is disabled then it means probe_adapter had 9012 * failed and resources already cleaned up on probe_adapter exit. 9013 */ 9014 if (!pci_is_enabled(pdev)) 9015 return; 9016 9017 ha = pci_get_drvdata(pdev); 9018 9019 if (is_qla40XX(ha)) 9020 qla4xxx_prevent_other_port_reinit(ha); 9021 9022 /* destroy iface from sysfs */ 9023 qla4xxx_destroy_ifaces(ha); 9024 9025 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 9026 iscsi_boot_destroy_kset(ha->boot_kset); 9027 9028 qla4xxx_destroy_fw_ddb_session(ha); 9029 qla4_8xxx_free_sysfs_attr(ha); 9030 9031 qla4xxx_sysfs_ddb_remove(ha); 9032 scsi_remove_host(ha->host); 9033 9034 qla4xxx_free_adapter(ha); 9035 9036 scsi_host_put(ha->host); 9037 9038 pci_disable_pcie_error_reporting(pdev); 9039 pci_disable_device(pdev); 9040 } 9041 9042 /** 9043 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9044 * @ha: HA context 9045 */ 9046 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9047 { 9048 /* Update our PCI device dma_mask for full 64 bit mask */ 9049 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { 9050 dev_dbg(&ha->pdev->dev, 9051 "Failed to set 64 bit PCI consistent mask; " 9052 "using 32 bit.\n"); 9053 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32)); 9054 } 9055 } 9056 9057 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9058 { 9059 struct iscsi_cls_session *cls_sess; 9060 struct iscsi_session *sess; 9061 struct ddb_entry *ddb; 9062 int queue_depth = QL4_DEF_QDEPTH; 9063 9064 cls_sess = starget_to_session(sdev->sdev_target); 9065 sess = cls_sess->dd_data; 9066 ddb = sess->dd_data; 9067 9068 sdev->hostdata = ddb; 9069 9070 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9071 queue_depth = ql4xmaxqdepth; 9072 9073 scsi_change_queue_depth(sdev, queue_depth); 9074 return 0; 9075 } 9076 9077 /** 9078 * qla4xxx_del_from_active_array - returns an active srb 9079 * @ha: Pointer to host adapter structure. 9080 * @index: index into the active_array 9081 * 9082 * This routine removes and returns the srb at the specified index 9083 **/ 9084 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9085 uint32_t index) 9086 { 9087 struct srb *srb = NULL; 9088 struct scsi_cmnd *cmd = NULL; 9089 9090 cmd = scsi_host_find_tag(ha->host, index); 9091 if (!cmd) 9092 return srb; 9093 9094 srb = (struct srb *)CMD_SP(cmd); 9095 if (!srb) 9096 return srb; 9097 9098 /* update counters */ 9099 if (srb->flags & SRB_DMA_VALID) { 9100 ha->iocb_cnt -= srb->iocb_cnt; 9101 if (srb->cmd) 9102 srb->cmd->host_scribble = 9103 (unsigned char *)(unsigned long) MAX_SRBS; 9104 } 9105 return srb; 9106 } 9107 9108 /** 9109 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9110 * @ha: Pointer to host adapter structure. 9111 * @cmd: Scsi Command to wait on. 9112 * 9113 * This routine waits for the command to be returned by the Firmware 9114 * for some max time. 9115 **/ 9116 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9117 struct scsi_cmnd *cmd) 9118 { 9119 int done = 0; 9120 struct srb *rp; 9121 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9122 int ret = SUCCESS; 9123 9124 /* Dont wait on command if PCI error is being handled 9125 * by PCI AER driver 9126 */ 9127 if (unlikely(pci_channel_offline(ha->pdev)) || 9128 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9129 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9130 ha->host_no, __func__); 9131 return ret; 9132 } 9133 9134 do { 9135 /* Checking to see if its returned to OS */ 9136 rp = (struct srb *) CMD_SP(cmd); 9137 if (rp == NULL) { 9138 done++; 9139 break; 9140 } 9141 9142 msleep(2000); 9143 } while (max_wait_time--); 9144 9145 return done; 9146 } 9147 9148 /** 9149 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9150 * @ha: Pointer to host adapter structure 9151 **/ 9152 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9153 { 9154 unsigned long wait_online; 9155 9156 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9157 while (time_before(jiffies, wait_online)) { 9158 9159 if (adapter_up(ha)) 9160 return QLA_SUCCESS; 9161 9162 msleep(2000); 9163 } 9164 9165 return QLA_ERROR; 9166 } 9167 9168 /** 9169 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9170 * @ha: pointer to HBA 9171 * @stgt: pointer to SCSI target 9172 * @sdev: pointer to SCSI device 9173 * 9174 * This function waits for all outstanding commands to a lun to complete. It 9175 * returns 0 if all pending commands are returned and 1 otherwise. 9176 **/ 9177 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9178 struct scsi_target *stgt, 9179 struct scsi_device *sdev) 9180 { 9181 int cnt; 9182 int status = 0; 9183 struct scsi_cmnd *cmd; 9184 9185 /* 9186 * Waiting for all commands for the designated target or dev 9187 * in the active array 9188 */ 9189 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9190 cmd = scsi_host_find_tag(ha->host, cnt); 9191 if (cmd && stgt == scsi_target(cmd->device) && 9192 (!sdev || sdev == cmd->device)) { 9193 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9194 status++; 9195 break; 9196 } 9197 } 9198 } 9199 return status; 9200 } 9201 9202 /** 9203 * qla4xxx_eh_abort - callback for abort task. 9204 * @cmd: Pointer to Linux's SCSI command structure 9205 * 9206 * This routine is called by the Linux OS to abort the specified 9207 * command. 9208 **/ 9209 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9210 { 9211 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9212 unsigned int id = cmd->device->id; 9213 uint64_t lun = cmd->device->lun; 9214 unsigned long flags; 9215 struct srb *srb = NULL; 9216 int ret = SUCCESS; 9217 int wait = 0; 9218 int rval; 9219 9220 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9221 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9222 9223 rval = qla4xxx_isp_check_reg(ha); 9224 if (rval != QLA_SUCCESS) { 9225 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9226 return FAILED; 9227 } 9228 9229 spin_lock_irqsave(&ha->hardware_lock, flags); 9230 srb = (struct srb *) CMD_SP(cmd); 9231 if (!srb) { 9232 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9233 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9234 ha->host_no, id, lun); 9235 return SUCCESS; 9236 } 9237 kref_get(&srb->srb_ref); 9238 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9239 9240 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9241 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9242 ha->host_no, id, lun)); 9243 ret = FAILED; 9244 } else { 9245 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9246 ha->host_no, id, lun)); 9247 wait = 1; 9248 } 9249 9250 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9251 9252 /* Wait for command to complete */ 9253 if (wait) { 9254 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9255 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9256 ha->host_no, id, lun)); 9257 ret = FAILED; 9258 } 9259 } 9260 9261 ql4_printk(KERN_INFO, ha, 9262 "scsi%ld:%d:%llu: Abort command - %s\n", 9263 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9264 9265 return ret; 9266 } 9267 9268 /** 9269 * qla4xxx_eh_device_reset - callback for target reset. 9270 * @cmd: Pointer to Linux's SCSI command structure 9271 * 9272 * This routine is called by the Linux OS to reset all luns on the 9273 * specified target. 9274 **/ 9275 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9276 { 9277 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9278 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9279 int ret = FAILED, stat; 9280 int rval; 9281 9282 if (!ddb_entry) 9283 return ret; 9284 9285 ret = iscsi_block_scsi_eh(cmd); 9286 if (ret) 9287 return ret; 9288 ret = FAILED; 9289 9290 ql4_printk(KERN_INFO, ha, 9291 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9292 cmd->device->channel, cmd->device->id, cmd->device->lun); 9293 9294 DEBUG2(printk(KERN_INFO 9295 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9296 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9297 cmd, jiffies, cmd->request->timeout / HZ, 9298 ha->dpc_flags, cmd->result, cmd->allowed)); 9299 9300 rval = qla4xxx_isp_check_reg(ha); 9301 if (rval != QLA_SUCCESS) { 9302 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9303 return FAILED; 9304 } 9305 9306 /* FIXME: wait for hba to go online */ 9307 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9308 if (stat != QLA_SUCCESS) { 9309 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9310 goto eh_dev_reset_done; 9311 } 9312 9313 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9314 cmd->device)) { 9315 ql4_printk(KERN_INFO, ha, 9316 "DEVICE RESET FAILED - waiting for " 9317 "commands.\n"); 9318 goto eh_dev_reset_done; 9319 } 9320 9321 /* Send marker. */ 9322 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9323 MM_LUN_RESET) != QLA_SUCCESS) 9324 goto eh_dev_reset_done; 9325 9326 ql4_printk(KERN_INFO, ha, 9327 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9328 ha->host_no, cmd->device->channel, cmd->device->id, 9329 cmd->device->lun); 9330 9331 ret = SUCCESS; 9332 9333 eh_dev_reset_done: 9334 9335 return ret; 9336 } 9337 9338 /** 9339 * qla4xxx_eh_target_reset - callback for target reset. 9340 * @cmd: Pointer to Linux's SCSI command structure 9341 * 9342 * This routine is called by the Linux OS to reset the target. 9343 **/ 9344 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9345 { 9346 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9347 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9348 int stat, ret; 9349 int rval; 9350 9351 if (!ddb_entry) 9352 return FAILED; 9353 9354 ret = iscsi_block_scsi_eh(cmd); 9355 if (ret) 9356 return ret; 9357 9358 starget_printk(KERN_INFO, scsi_target(cmd->device), 9359 "WARM TARGET RESET ISSUED.\n"); 9360 9361 DEBUG2(printk(KERN_INFO 9362 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9363 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9364 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9365 ha->dpc_flags, cmd->result, cmd->allowed)); 9366 9367 rval = qla4xxx_isp_check_reg(ha); 9368 if (rval != QLA_SUCCESS) { 9369 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9370 return FAILED; 9371 } 9372 9373 stat = qla4xxx_reset_target(ha, ddb_entry); 9374 if (stat != QLA_SUCCESS) { 9375 starget_printk(KERN_INFO, scsi_target(cmd->device), 9376 "WARM TARGET RESET FAILED.\n"); 9377 return FAILED; 9378 } 9379 9380 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9381 NULL)) { 9382 starget_printk(KERN_INFO, scsi_target(cmd->device), 9383 "WARM TARGET DEVICE RESET FAILED - " 9384 "waiting for commands.\n"); 9385 return FAILED; 9386 } 9387 9388 /* Send marker. */ 9389 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9390 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9391 starget_printk(KERN_INFO, scsi_target(cmd->device), 9392 "WARM TARGET DEVICE RESET FAILED - " 9393 "marker iocb failed.\n"); 9394 return FAILED; 9395 } 9396 9397 starget_printk(KERN_INFO, scsi_target(cmd->device), 9398 "WARM TARGET RESET SUCCEEDED.\n"); 9399 return SUCCESS; 9400 } 9401 9402 /** 9403 * qla4xxx_is_eh_active - check if error handler is running 9404 * @shost: Pointer to SCSI Host struct 9405 * 9406 * This routine finds that if reset host is called in EH 9407 * scenario or from some application like sg_reset 9408 **/ 9409 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9410 { 9411 if (shost->shost_state == SHOST_RECOVERY) 9412 return 1; 9413 return 0; 9414 } 9415 9416 /** 9417 * qla4xxx_eh_host_reset - kernel callback 9418 * @cmd: Pointer to Linux's SCSI command structure 9419 * 9420 * This routine is invoked by the Linux kernel to perform fatal error 9421 * recovery on the specified adapter. 9422 **/ 9423 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9424 { 9425 int return_status = FAILED; 9426 struct scsi_qla_host *ha; 9427 int rval; 9428 9429 ha = to_qla_host(cmd->device->host); 9430 9431 rval = qla4xxx_isp_check_reg(ha); 9432 if (rval != QLA_SUCCESS) { 9433 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9434 return FAILED; 9435 } 9436 9437 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9438 qla4_83xx_set_idc_dontreset(ha); 9439 9440 /* 9441 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9442 * protocol drivers, we should not set device_state to NEED_RESET 9443 */ 9444 if (ql4xdontresethba || 9445 ((is_qla8032(ha) || is_qla8042(ha)) && 9446 qla4_83xx_idc_dontreset(ha))) { 9447 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9448 ha->host_no, __func__)); 9449 9450 /* Clear outstanding srb in queues */ 9451 if (qla4xxx_is_eh_active(cmd->device->host)) 9452 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9453 9454 return FAILED; 9455 } 9456 9457 ql4_printk(KERN_INFO, ha, 9458 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9459 cmd->device->channel, cmd->device->id, cmd->device->lun); 9460 9461 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9462 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9463 "DEAD.\n", ha->host_no, cmd->device->channel, 9464 __func__)); 9465 9466 return FAILED; 9467 } 9468 9469 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9470 if (is_qla80XX(ha)) 9471 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9472 else 9473 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9474 } 9475 9476 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9477 return_status = SUCCESS; 9478 9479 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9480 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9481 9482 return return_status; 9483 } 9484 9485 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9486 { 9487 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9488 uint32_t mbox_sts[MBOX_REG_COUNT]; 9489 struct addr_ctrl_blk_def *acb = NULL; 9490 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9491 int rval = QLA_SUCCESS; 9492 dma_addr_t acb_dma; 9493 9494 acb = dma_alloc_coherent(&ha->pdev->dev, 9495 sizeof(struct addr_ctrl_blk_def), 9496 &acb_dma, GFP_KERNEL); 9497 if (!acb) { 9498 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9499 __func__); 9500 rval = -ENOMEM; 9501 goto exit_port_reset; 9502 } 9503 9504 memset(acb, 0, acb_len); 9505 9506 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9507 if (rval != QLA_SUCCESS) { 9508 rval = -EIO; 9509 goto exit_free_acb; 9510 } 9511 9512 rval = qla4xxx_disable_acb(ha); 9513 if (rval != QLA_SUCCESS) { 9514 rval = -EIO; 9515 goto exit_free_acb; 9516 } 9517 9518 wait_for_completion_timeout(&ha->disable_acb_comp, 9519 DISABLE_ACB_TOV * HZ); 9520 9521 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9522 if (rval != QLA_SUCCESS) { 9523 rval = -EIO; 9524 goto exit_free_acb; 9525 } 9526 9527 exit_free_acb: 9528 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9529 acb, acb_dma); 9530 exit_port_reset: 9531 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9532 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9533 return rval; 9534 } 9535 9536 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9537 { 9538 struct scsi_qla_host *ha = to_qla_host(shost); 9539 int rval = QLA_SUCCESS; 9540 uint32_t idc_ctrl; 9541 9542 if (ql4xdontresethba) { 9543 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9544 __func__)); 9545 rval = -EPERM; 9546 goto exit_host_reset; 9547 } 9548 9549 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9550 goto recover_adapter; 9551 9552 switch (reset_type) { 9553 case SCSI_ADAPTER_RESET: 9554 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9555 break; 9556 case SCSI_FIRMWARE_RESET: 9557 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9558 if (is_qla80XX(ha)) 9559 /* set firmware context reset */ 9560 set_bit(DPC_RESET_HA_FW_CONTEXT, 9561 &ha->dpc_flags); 9562 else { 9563 rval = qla4xxx_context_reset(ha); 9564 goto exit_host_reset; 9565 } 9566 } 9567 break; 9568 } 9569 9570 recover_adapter: 9571 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9572 * reset is issued by application */ 9573 if ((is_qla8032(ha) || is_qla8042(ha)) && 9574 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9575 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9576 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9577 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9578 } 9579 9580 rval = qla4xxx_recover_adapter(ha); 9581 if (rval != QLA_SUCCESS) { 9582 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9583 __func__)); 9584 rval = -EIO; 9585 } 9586 9587 exit_host_reset: 9588 return rval; 9589 } 9590 9591 /* PCI AER driver recovers from all correctable errors w/o 9592 * driver intervention. For uncorrectable errors PCI AER 9593 * driver calls the following device driver's callbacks 9594 * 9595 * - Fatal Errors - link_reset 9596 * - Non-Fatal Errors - driver's error_detected() which 9597 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9598 * 9599 * PCI AER driver calls 9600 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() 9601 * returns RECOVERED or NEED_RESET if fw_hung 9602 * NEED_RESET - driver's slot_reset() 9603 * DISCONNECT - device is dead & cannot recover 9604 * RECOVERED - driver's resume() 9605 */ 9606 static pci_ers_result_t 9607 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9608 { 9609 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9610 9611 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9612 ha->host_no, __func__, state); 9613 9614 if (!is_aer_supported(ha)) 9615 return PCI_ERS_RESULT_NONE; 9616 9617 switch (state) { 9618 case pci_channel_io_normal: 9619 clear_bit(AF_EEH_BUSY, &ha->flags); 9620 return PCI_ERS_RESULT_CAN_RECOVER; 9621 case pci_channel_io_frozen: 9622 set_bit(AF_EEH_BUSY, &ha->flags); 9623 qla4xxx_mailbox_premature_completion(ha); 9624 qla4xxx_free_irqs(ha); 9625 pci_disable_device(pdev); 9626 /* Return back all IOs */ 9627 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9628 return PCI_ERS_RESULT_NEED_RESET; 9629 case pci_channel_io_perm_failure: 9630 set_bit(AF_EEH_BUSY, &ha->flags); 9631 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9632 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9633 return PCI_ERS_RESULT_DISCONNECT; 9634 } 9635 return PCI_ERS_RESULT_NEED_RESET; 9636 } 9637 9638 /** 9639 * qla4xxx_pci_mmio_enabled() gets called if 9640 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9641 * and read/write to the device still works. 9642 * @pdev: PCI device pointer 9643 **/ 9644 static pci_ers_result_t 9645 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9646 { 9647 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9648 9649 if (!is_aer_supported(ha)) 9650 return PCI_ERS_RESULT_NONE; 9651 9652 return PCI_ERS_RESULT_RECOVERED; 9653 } 9654 9655 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9656 { 9657 uint32_t rval = QLA_ERROR; 9658 int fn; 9659 struct pci_dev *other_pdev = NULL; 9660 9661 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9662 9663 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9664 9665 if (test_bit(AF_ONLINE, &ha->flags)) { 9666 clear_bit(AF_ONLINE, &ha->flags); 9667 clear_bit(AF_LINK_UP, &ha->flags); 9668 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9669 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9670 } 9671 9672 fn = PCI_FUNC(ha->pdev->devfn); 9673 if (is_qla8022(ha)) { 9674 while (fn > 0) { 9675 fn--; 9676 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9677 ha->host_no, __func__, fn); 9678 /* Get the pci device given the domain, bus, 9679 * slot/function number */ 9680 other_pdev = pci_get_domain_bus_and_slot( 9681 pci_domain_nr(ha->pdev->bus), 9682 ha->pdev->bus->number, 9683 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9684 fn)); 9685 9686 if (!other_pdev) 9687 continue; 9688 9689 if (atomic_read(&other_pdev->enable_cnt)) { 9690 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9691 ha->host_no, __func__, fn); 9692 pci_dev_put(other_pdev); 9693 break; 9694 } 9695 pci_dev_put(other_pdev); 9696 } 9697 } else { 9698 /* this case is meant for ISP83xx/ISP84xx only */ 9699 if (qla4_83xx_can_perform_reset(ha)) { 9700 /* reset fn as iSCSI is going to perform the reset */ 9701 fn = 0; 9702 } 9703 } 9704 9705 /* The first function on the card, the reset owner will 9706 * start & initialize the firmware. The other functions 9707 * on the card will reset the firmware context 9708 */ 9709 if (!fn) { 9710 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9711 "0x%x is the owner\n", ha->host_no, __func__, 9712 ha->pdev->devfn); 9713 9714 ha->isp_ops->idc_lock(ha); 9715 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9716 QLA8XXX_DEV_COLD); 9717 ha->isp_ops->idc_unlock(ha); 9718 9719 rval = qla4_8xxx_update_idc_reg(ha); 9720 if (rval == QLA_ERROR) { 9721 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9722 ha->host_no, __func__); 9723 ha->isp_ops->idc_lock(ha); 9724 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9725 QLA8XXX_DEV_FAILED); 9726 ha->isp_ops->idc_unlock(ha); 9727 goto exit_error_recovery; 9728 } 9729 9730 clear_bit(AF_FW_RECOVERY, &ha->flags); 9731 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9732 9733 if (rval != QLA_SUCCESS) { 9734 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9735 "FAILED\n", ha->host_no, __func__); 9736 qla4xxx_free_irqs(ha); 9737 ha->isp_ops->idc_lock(ha); 9738 qla4_8xxx_clear_drv_active(ha); 9739 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9740 QLA8XXX_DEV_FAILED); 9741 ha->isp_ops->idc_unlock(ha); 9742 } else { 9743 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9744 "READY\n", ha->host_no, __func__); 9745 ha->isp_ops->idc_lock(ha); 9746 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9747 QLA8XXX_DEV_READY); 9748 /* Clear driver state register */ 9749 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9750 qla4_8xxx_set_drv_active(ha); 9751 ha->isp_ops->idc_unlock(ha); 9752 ha->isp_ops->enable_intrs(ha); 9753 } 9754 } else { 9755 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9756 "the reset owner\n", ha->host_no, __func__, 9757 ha->pdev->devfn); 9758 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9759 QLA8XXX_DEV_READY)) { 9760 clear_bit(AF_FW_RECOVERY, &ha->flags); 9761 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9762 if (rval == QLA_SUCCESS) 9763 ha->isp_ops->enable_intrs(ha); 9764 else 9765 qla4xxx_free_irqs(ha); 9766 9767 ha->isp_ops->idc_lock(ha); 9768 qla4_8xxx_set_drv_active(ha); 9769 ha->isp_ops->idc_unlock(ha); 9770 } 9771 } 9772 exit_error_recovery: 9773 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9774 return rval; 9775 } 9776 9777 static pci_ers_result_t 9778 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9779 { 9780 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9781 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9782 int rc; 9783 9784 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9785 ha->host_no, __func__); 9786 9787 if (!is_aer_supported(ha)) 9788 return PCI_ERS_RESULT_NONE; 9789 9790 /* Restore the saved state of PCIe device - 9791 * BAR registers, PCI Config space, PCIX, MSI, 9792 * IOV states 9793 */ 9794 pci_restore_state(pdev); 9795 9796 /* pci_restore_state() clears the saved_state flag of the device 9797 * save restored state which resets saved_state flag 9798 */ 9799 pci_save_state(pdev); 9800 9801 /* Initialize device or resume if in suspended state */ 9802 rc = pci_enable_device(pdev); 9803 if (rc) { 9804 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9805 "device after reset\n", ha->host_no, __func__); 9806 goto exit_slot_reset; 9807 } 9808 9809 ha->isp_ops->disable_intrs(ha); 9810 9811 if (is_qla80XX(ha)) { 9812 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9813 ret = PCI_ERS_RESULT_RECOVERED; 9814 goto exit_slot_reset; 9815 } else 9816 goto exit_slot_reset; 9817 } 9818 9819 exit_slot_reset: 9820 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9821 "device after reset\n", ha->host_no, __func__, ret); 9822 return ret; 9823 } 9824 9825 static void 9826 qla4xxx_pci_resume(struct pci_dev *pdev) 9827 { 9828 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9829 int ret; 9830 9831 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9832 ha->host_no, __func__); 9833 9834 ret = qla4xxx_wait_for_hba_online(ha); 9835 if (ret != QLA_SUCCESS) { 9836 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9837 "resume I/O from slot/link_reset\n", ha->host_no, 9838 __func__); 9839 } 9840 9841 clear_bit(AF_EEH_BUSY, &ha->flags); 9842 } 9843 9844 static const struct pci_error_handlers qla4xxx_err_handler = { 9845 .error_detected = qla4xxx_pci_error_detected, 9846 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9847 .slot_reset = qla4xxx_pci_slot_reset, 9848 .resume = qla4xxx_pci_resume, 9849 }; 9850 9851 static struct pci_device_id qla4xxx_pci_tbl[] = { 9852 { 9853 .vendor = PCI_VENDOR_ID_QLOGIC, 9854 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9855 .subvendor = PCI_ANY_ID, 9856 .subdevice = PCI_ANY_ID, 9857 }, 9858 { 9859 .vendor = PCI_VENDOR_ID_QLOGIC, 9860 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9861 .subvendor = PCI_ANY_ID, 9862 .subdevice = PCI_ANY_ID, 9863 }, 9864 { 9865 .vendor = PCI_VENDOR_ID_QLOGIC, 9866 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9867 .subvendor = PCI_ANY_ID, 9868 .subdevice = PCI_ANY_ID, 9869 }, 9870 { 9871 .vendor = PCI_VENDOR_ID_QLOGIC, 9872 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9873 .subvendor = PCI_ANY_ID, 9874 .subdevice = PCI_ANY_ID, 9875 }, 9876 { 9877 .vendor = PCI_VENDOR_ID_QLOGIC, 9878 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9879 .subvendor = PCI_ANY_ID, 9880 .subdevice = PCI_ANY_ID, 9881 }, 9882 { 9883 .vendor = PCI_VENDOR_ID_QLOGIC, 9884 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9885 .subvendor = PCI_ANY_ID, 9886 .subdevice = PCI_ANY_ID, 9887 }, 9888 {0, 0}, 9889 }; 9890 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9891 9892 static struct pci_driver qla4xxx_pci_driver = { 9893 .name = DRIVER_NAME, 9894 .id_table = qla4xxx_pci_tbl, 9895 .probe = qla4xxx_probe_adapter, 9896 .remove = qla4xxx_remove_adapter, 9897 .err_handler = &qla4xxx_err_handler, 9898 }; 9899 9900 static int __init qla4xxx_module_init(void) 9901 { 9902 int ret; 9903 9904 if (ql4xqfulltracking) 9905 qla4xxx_driver_template.track_queue_depth = 1; 9906 9907 /* Allocate cache for SRBs. */ 9908 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9909 SLAB_HWCACHE_ALIGN, NULL); 9910 if (srb_cachep == NULL) { 9911 printk(KERN_ERR 9912 "%s: Unable to allocate SRB cache..." 9913 "Failing load!\n", DRIVER_NAME); 9914 ret = -ENOMEM; 9915 goto no_srp_cache; 9916 } 9917 9918 /* Derive version string. */ 9919 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9920 if (ql4xextended_error_logging) 9921 strcat(qla4xxx_version_str, "-debug"); 9922 9923 qla4xxx_scsi_transport = 9924 iscsi_register_transport(&qla4xxx_iscsi_transport); 9925 if (!qla4xxx_scsi_transport){ 9926 ret = -ENODEV; 9927 goto release_srb_cache; 9928 } 9929 9930 ret = pci_register_driver(&qla4xxx_pci_driver); 9931 if (ret) 9932 goto unregister_transport; 9933 9934 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9935 return 0; 9936 9937 unregister_transport: 9938 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9939 release_srb_cache: 9940 kmem_cache_destroy(srb_cachep); 9941 no_srp_cache: 9942 return ret; 9943 } 9944 9945 static void __exit qla4xxx_module_exit(void) 9946 { 9947 pci_unregister_driver(&qla4xxx_pci_driver); 9948 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9949 kmem_cache_destroy(srb_cachep); 9950 } 9951 9952 module_init(qla4xxx_module_init); 9953 module_exit(qla4xxx_module_exit); 9954 9955 MODULE_AUTHOR("QLogic Corporation"); 9956 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9957 MODULE_LICENSE("GPL"); 9958 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9959