1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 #include <linux/moduleparam.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/iscsi_boot_sysfs.h> 11 #include <linux/inet.h> 12 13 #include <scsi/scsi_tcq.h> 14 #include <scsi/scsicam.h> 15 16 #include "ql4_def.h" 17 #include "ql4_version.h" 18 #include "ql4_glbl.h" 19 #include "ql4_dbg.h" 20 #include "ql4_inline.h" 21 #include "ql4_83xx.h" 22 23 /* 24 * Driver version 25 */ 26 static char qla4xxx_version_str[40]; 27 28 /* 29 * SRB allocation cache 30 */ 31 static struct kmem_cache *srb_cachep; 32 33 /* 34 * Module parameter information and variables 35 */ 36 static int ql4xdisablesysfsboot = 1; 37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(ql4xdisablesysfsboot, 39 " Set to disable exporting boot targets to sysfs.\n" 40 "\t\t 0 - Export boot targets\n" 41 "\t\t 1 - Do not export boot targets (Default)"); 42 43 int ql4xdontresethba; 44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 45 MODULE_PARM_DESC(ql4xdontresethba, 46 " Don't reset the HBA for driver recovery.\n" 47 "\t\t 0 - It will reset HBA (Default)\n" 48 "\t\t 1 - It will NOT reset HBA"); 49 50 int ql4xextended_error_logging; 51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 52 MODULE_PARM_DESC(ql4xextended_error_logging, 53 " Option to enable extended error logging.\n" 54 "\t\t 0 - no logging (Default)\n" 55 "\t\t 2 - debug logging"); 56 57 int ql4xenablemsix = 1; 58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 59 MODULE_PARM_DESC(ql4xenablemsix, 60 " Set to enable MSI or MSI-X interrupt mechanism.\n" 61 "\t\t 0 = enable INTx interrupt mechanism.\n" 62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 63 "\t\t 2 = enable MSI interrupt mechanism."); 64 65 #define QL4_DEF_QDEPTH 32 66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 68 MODULE_PARM_DESC(ql4xmaxqdepth, 69 " Maximum queue depth to report for target devices.\n" 70 "\t\t Default: 32."); 71 72 static int ql4xqfulltracking = 1; 73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(ql4xqfulltracking, 75 " Enable or disable dynamic tracking and adjustment of\n" 76 "\t\t scsi device queue depth.\n" 77 "\t\t 0 - Disable.\n" 78 "\t\t 1 - Enable. (Default)"); 79 80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 82 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 83 " Target Session Recovery Timeout.\n" 84 "\t\t Default: 120 sec."); 85 86 int ql4xmdcapmask = 0; 87 module_param(ql4xmdcapmask, int, S_IRUGO); 88 MODULE_PARM_DESC(ql4xmdcapmask, 89 " Set the Minidump driver capture mask level.\n" 90 "\t\t Default is 0 (firmware default capture mask)\n" 91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 92 93 int ql4xenablemd = 1; 94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 95 MODULE_PARM_DESC(ql4xenablemd, 96 " Set to enable minidump.\n" 97 "\t\t 0 - disable minidump\n" 98 "\t\t 1 - enable minidump (Default)"); 99 100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 101 /* 102 * SCSI host template entry points 103 */ 104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 105 106 /* 107 * iSCSI template entry points 108 */ 109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 110 enum iscsi_param param, char *buf); 111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 112 enum iscsi_param param, char *buf); 113 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 114 enum iscsi_host_param param, char *buf); 115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 116 uint32_t len); 117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 118 enum iscsi_param_type param_type, 119 int param, char *buf); 120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 122 struct sockaddr *dst_addr, 123 int non_blocking); 124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 127 enum iscsi_param param, char *buf); 128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 129 static struct iscsi_cls_conn * 130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 132 struct iscsi_cls_conn *cls_conn, 133 uint64_t transport_fd, int is_leading); 134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 135 static struct iscsi_cls_session * 136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 137 uint16_t qdepth, uint32_t initial_cmdsn); 138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 139 static void qla4xxx_task_work(struct work_struct *wdata); 140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 141 static int qla4xxx_task_xmit(struct iscsi_task *); 142 static void qla4xxx_task_cleanup(struct iscsi_task *); 143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 145 struct iscsi_stats *stats); 146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 147 uint32_t iface_type, uint32_t payload_size, 148 uint32_t pid, struct sockaddr *dst_addr); 149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 150 uint32_t *num_entries, char *buf); 151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 153 int len); 154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 155 156 /* 157 * SCSI host template entry points 158 */ 159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 164 static int qla4xxx_slave_alloc(struct scsi_device *device); 165 static int qla4xxx_slave_configure(struct scsi_device *device); 166 static void qla4xxx_slave_destroy(struct scsi_device *sdev); 167 static umode_t qla4_attr_is_visible(int param_type, int param); 168 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 169 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 170 int reason); 171 172 /* 173 * iSCSI Flash DDB sysfs entry points 174 */ 175 static int 176 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 177 struct iscsi_bus_flash_conn *fnode_conn, 178 void *data, int len); 179 static int 180 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 181 int param, char *buf); 182 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 183 int len); 184 static int 185 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 186 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 187 struct iscsi_bus_flash_conn *fnode_conn); 188 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 189 struct iscsi_bus_flash_conn *fnode_conn); 190 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 191 192 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 193 QLA82XX_LEGACY_INTR_CONFIG; 194 195 static struct scsi_host_template qla4xxx_driver_template = { 196 .module = THIS_MODULE, 197 .name = DRIVER_NAME, 198 .proc_name = DRIVER_NAME, 199 .queuecommand = qla4xxx_queuecommand, 200 201 .eh_abort_handler = qla4xxx_eh_abort, 202 .eh_device_reset_handler = qla4xxx_eh_device_reset, 203 .eh_target_reset_handler = qla4xxx_eh_target_reset, 204 .eh_host_reset_handler = qla4xxx_eh_host_reset, 205 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 206 207 .slave_configure = qla4xxx_slave_configure, 208 .slave_alloc = qla4xxx_slave_alloc, 209 .slave_destroy = qla4xxx_slave_destroy, 210 .change_queue_depth = qla4xxx_change_queue_depth, 211 212 .this_id = -1, 213 .cmd_per_lun = 3, 214 .use_clustering = ENABLE_CLUSTERING, 215 .sg_tablesize = SG_ALL, 216 217 .max_sectors = 0xFFFF, 218 .shost_attrs = qla4xxx_host_attrs, 219 .host_reset = qla4xxx_host_reset, 220 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 221 }; 222 223 static struct iscsi_transport qla4xxx_iscsi_transport = { 224 .owner = THIS_MODULE, 225 .name = DRIVER_NAME, 226 .caps = CAP_TEXT_NEGO | 227 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 228 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 229 CAP_MULTI_R2T, 230 .attr_is_visible = qla4_attr_is_visible, 231 .create_session = qla4xxx_session_create, 232 .destroy_session = qla4xxx_session_destroy, 233 .start_conn = qla4xxx_conn_start, 234 .create_conn = qla4xxx_conn_create, 235 .bind_conn = qla4xxx_conn_bind, 236 .stop_conn = iscsi_conn_stop, 237 .destroy_conn = qla4xxx_conn_destroy, 238 .set_param = iscsi_set_param, 239 .get_conn_param = qla4xxx_conn_get_param, 240 .get_session_param = qla4xxx_session_get_param, 241 .get_ep_param = qla4xxx_get_ep_param, 242 .ep_connect = qla4xxx_ep_connect, 243 .ep_poll = qla4xxx_ep_poll, 244 .ep_disconnect = qla4xxx_ep_disconnect, 245 .get_stats = qla4xxx_conn_get_stats, 246 .send_pdu = iscsi_conn_send_pdu, 247 .xmit_task = qla4xxx_task_xmit, 248 .cleanup_task = qla4xxx_task_cleanup, 249 .alloc_pdu = qla4xxx_alloc_pdu, 250 251 .get_host_param = qla4xxx_host_get_param, 252 .set_iface_param = qla4xxx_iface_set_param, 253 .get_iface_param = qla4xxx_get_iface_param, 254 .bsg_request = qla4xxx_bsg_request, 255 .send_ping = qla4xxx_send_ping, 256 .get_chap = qla4xxx_get_chap_list, 257 .delete_chap = qla4xxx_delete_chap, 258 .set_chap = qla4xxx_set_chap_entry, 259 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 260 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 261 .new_flashnode = qla4xxx_sysfs_ddb_add, 262 .del_flashnode = qla4xxx_sysfs_ddb_delete, 263 .login_flashnode = qla4xxx_sysfs_ddb_login, 264 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 265 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 266 .get_host_stats = qla4xxx_get_host_stats, 267 }; 268 269 static struct scsi_transport_template *qla4xxx_scsi_transport; 270 271 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 272 uint32_t iface_type, uint32_t payload_size, 273 uint32_t pid, struct sockaddr *dst_addr) 274 { 275 struct scsi_qla_host *ha = to_qla_host(shost); 276 struct sockaddr_in *addr; 277 struct sockaddr_in6 *addr6; 278 uint32_t options = 0; 279 uint8_t ipaddr[IPv6_ADDR_LEN]; 280 int rval; 281 282 memset(ipaddr, 0, IPv6_ADDR_LEN); 283 /* IPv4 to IPv4 */ 284 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 285 (dst_addr->sa_family == AF_INET)) { 286 addr = (struct sockaddr_in *)dst_addr; 287 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 288 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 289 "dest: %pI4\n", __func__, 290 &ha->ip_config.ip_address, ipaddr)); 291 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 292 ipaddr); 293 if (rval) 294 rval = -EINVAL; 295 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 296 (dst_addr->sa_family == AF_INET6)) { 297 /* IPv6 to IPv6 */ 298 addr6 = (struct sockaddr_in6 *)dst_addr; 299 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 300 301 options |= PING_IPV6_PROTOCOL_ENABLE; 302 303 /* Ping using LinkLocal address */ 304 if ((iface_num == 0) || (iface_num == 1)) { 305 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 306 "src: %pI6 dest: %pI6\n", __func__, 307 &ha->ip_config.ipv6_link_local_addr, 308 ipaddr)); 309 options |= PING_IPV6_LINKLOCAL_ADDR; 310 rval = qla4xxx_ping_iocb(ha, options, payload_size, 311 pid, ipaddr); 312 } else { 313 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 314 "not supported\n", __func__, iface_num); 315 rval = -ENOSYS; 316 goto exit_send_ping; 317 } 318 319 /* 320 * If ping using LinkLocal address fails, try ping using 321 * IPv6 address 322 */ 323 if (rval != QLA_SUCCESS) { 324 options &= ~PING_IPV6_LINKLOCAL_ADDR; 325 if (iface_num == 0) { 326 options |= PING_IPV6_ADDR0; 327 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 328 "Ping src: %pI6 " 329 "dest: %pI6\n", __func__, 330 &ha->ip_config.ipv6_addr0, 331 ipaddr)); 332 } else if (iface_num == 1) { 333 options |= PING_IPV6_ADDR1; 334 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 335 "Ping src: %pI6 " 336 "dest: %pI6\n", __func__, 337 &ha->ip_config.ipv6_addr1, 338 ipaddr)); 339 } 340 rval = qla4xxx_ping_iocb(ha, options, payload_size, 341 pid, ipaddr); 342 if (rval) 343 rval = -EINVAL; 344 } 345 } else 346 rval = -ENOSYS; 347 exit_send_ping: 348 return rval; 349 } 350 351 static umode_t qla4_attr_is_visible(int param_type, int param) 352 { 353 switch (param_type) { 354 case ISCSI_HOST_PARAM: 355 switch (param) { 356 case ISCSI_HOST_PARAM_HWADDRESS: 357 case ISCSI_HOST_PARAM_IPADDRESS: 358 case ISCSI_HOST_PARAM_INITIATOR_NAME: 359 case ISCSI_HOST_PARAM_PORT_STATE: 360 case ISCSI_HOST_PARAM_PORT_SPEED: 361 return S_IRUGO; 362 default: 363 return 0; 364 } 365 case ISCSI_PARAM: 366 switch (param) { 367 case ISCSI_PARAM_PERSISTENT_ADDRESS: 368 case ISCSI_PARAM_PERSISTENT_PORT: 369 case ISCSI_PARAM_CONN_ADDRESS: 370 case ISCSI_PARAM_CONN_PORT: 371 case ISCSI_PARAM_TARGET_NAME: 372 case ISCSI_PARAM_TPGT: 373 case ISCSI_PARAM_TARGET_ALIAS: 374 case ISCSI_PARAM_MAX_BURST: 375 case ISCSI_PARAM_MAX_R2T: 376 case ISCSI_PARAM_FIRST_BURST: 377 case ISCSI_PARAM_MAX_RECV_DLENGTH: 378 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 379 case ISCSI_PARAM_IFACE_NAME: 380 case ISCSI_PARAM_CHAP_OUT_IDX: 381 case ISCSI_PARAM_CHAP_IN_IDX: 382 case ISCSI_PARAM_USERNAME: 383 case ISCSI_PARAM_PASSWORD: 384 case ISCSI_PARAM_USERNAME_IN: 385 case ISCSI_PARAM_PASSWORD_IN: 386 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 387 case ISCSI_PARAM_DISCOVERY_SESS: 388 case ISCSI_PARAM_PORTAL_TYPE: 389 case ISCSI_PARAM_CHAP_AUTH_EN: 390 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 391 case ISCSI_PARAM_BIDI_CHAP_EN: 392 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 393 case ISCSI_PARAM_DEF_TIME2WAIT: 394 case ISCSI_PARAM_DEF_TIME2RETAIN: 395 case ISCSI_PARAM_HDRDGST_EN: 396 case ISCSI_PARAM_DATADGST_EN: 397 case ISCSI_PARAM_INITIAL_R2T_EN: 398 case ISCSI_PARAM_IMM_DATA_EN: 399 case ISCSI_PARAM_PDU_INORDER_EN: 400 case ISCSI_PARAM_DATASEQ_INORDER_EN: 401 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 402 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 403 case ISCSI_PARAM_TCP_WSF_DISABLE: 404 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 405 case ISCSI_PARAM_TCP_TIMER_SCALE: 406 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 407 case ISCSI_PARAM_TCP_XMIT_WSF: 408 case ISCSI_PARAM_TCP_RECV_WSF: 409 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 410 case ISCSI_PARAM_IPV4_TOS: 411 case ISCSI_PARAM_IPV6_TC: 412 case ISCSI_PARAM_IPV6_FLOW_LABEL: 413 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 414 case ISCSI_PARAM_KEEPALIVE_TMO: 415 case ISCSI_PARAM_LOCAL_PORT: 416 case ISCSI_PARAM_ISID: 417 case ISCSI_PARAM_TSID: 418 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 419 case ISCSI_PARAM_ERL: 420 case ISCSI_PARAM_STATSN: 421 case ISCSI_PARAM_EXP_STATSN: 422 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 423 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 424 case ISCSI_PARAM_LOCAL_IPADDR: 425 return S_IRUGO; 426 default: 427 return 0; 428 } 429 case ISCSI_NET_PARAM: 430 switch (param) { 431 case ISCSI_NET_PARAM_IPV4_ADDR: 432 case ISCSI_NET_PARAM_IPV4_SUBNET: 433 case ISCSI_NET_PARAM_IPV4_GW: 434 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 435 case ISCSI_NET_PARAM_IFACE_ENABLE: 436 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 437 case ISCSI_NET_PARAM_IPV6_ADDR: 438 case ISCSI_NET_PARAM_IPV6_ROUTER: 439 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 440 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 441 case ISCSI_NET_PARAM_VLAN_ID: 442 case ISCSI_NET_PARAM_VLAN_PRIORITY: 443 case ISCSI_NET_PARAM_VLAN_ENABLED: 444 case ISCSI_NET_PARAM_MTU: 445 case ISCSI_NET_PARAM_PORT: 446 case ISCSI_NET_PARAM_IPADDR_STATE: 447 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 448 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 449 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 450 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 451 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 452 case ISCSI_NET_PARAM_TCP_WSF: 453 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 454 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 455 case ISCSI_NET_PARAM_CACHE_ID: 456 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 457 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 458 case ISCSI_NET_PARAM_IPV4_TOS_EN: 459 case ISCSI_NET_PARAM_IPV4_TOS: 460 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 461 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 462 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 463 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 464 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 465 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 466 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 467 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 468 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 469 case ISCSI_NET_PARAM_REDIRECT_EN: 470 case ISCSI_NET_PARAM_IPV4_TTL: 471 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 472 case ISCSI_NET_PARAM_IPV6_MLD_EN: 473 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 474 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 475 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 476 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 477 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 478 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 479 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 480 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 481 return S_IRUGO; 482 default: 483 return 0; 484 } 485 case ISCSI_IFACE_PARAM: 486 switch (param) { 487 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 488 case ISCSI_IFACE_PARAM_HDRDGST_EN: 489 case ISCSI_IFACE_PARAM_DATADGST_EN: 490 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 491 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 492 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 493 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 494 case ISCSI_IFACE_PARAM_ERL: 495 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 496 case ISCSI_IFACE_PARAM_FIRST_BURST: 497 case ISCSI_IFACE_PARAM_MAX_R2T: 498 case ISCSI_IFACE_PARAM_MAX_BURST: 499 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 500 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 501 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 502 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 503 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 504 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 505 return S_IRUGO; 506 default: 507 return 0; 508 } 509 case ISCSI_FLASHNODE_PARAM: 510 switch (param) { 511 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 512 case ISCSI_FLASHNODE_PORTAL_TYPE: 513 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 514 case ISCSI_FLASHNODE_DISCOVERY_SESS: 515 case ISCSI_FLASHNODE_ENTRY_EN: 516 case ISCSI_FLASHNODE_HDR_DGST_EN: 517 case ISCSI_FLASHNODE_DATA_DGST_EN: 518 case ISCSI_FLASHNODE_IMM_DATA_EN: 519 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 520 case ISCSI_FLASHNODE_DATASEQ_INORDER: 521 case ISCSI_FLASHNODE_PDU_INORDER: 522 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 523 case ISCSI_FLASHNODE_SNACK_REQ_EN: 524 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 525 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 526 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 527 case ISCSI_FLASHNODE_ERL: 528 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 529 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 530 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 531 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 532 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 533 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 534 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 535 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 536 case ISCSI_FLASHNODE_FIRST_BURST: 537 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 538 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 539 case ISCSI_FLASHNODE_MAX_R2T: 540 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 541 case ISCSI_FLASHNODE_ISID: 542 case ISCSI_FLASHNODE_TSID: 543 case ISCSI_FLASHNODE_PORT: 544 case ISCSI_FLASHNODE_MAX_BURST: 545 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 546 case ISCSI_FLASHNODE_IPADDR: 547 case ISCSI_FLASHNODE_ALIAS: 548 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 549 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 550 case ISCSI_FLASHNODE_LOCAL_PORT: 551 case ISCSI_FLASHNODE_IPV4_TOS: 552 case ISCSI_FLASHNODE_IPV6_TC: 553 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 554 case ISCSI_FLASHNODE_NAME: 555 case ISCSI_FLASHNODE_TPGT: 556 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 557 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 558 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 559 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 560 case ISCSI_FLASHNODE_TCP_RECV_WSF: 561 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 562 case ISCSI_FLASHNODE_USERNAME: 563 case ISCSI_FLASHNODE_PASSWORD: 564 case ISCSI_FLASHNODE_STATSN: 565 case ISCSI_FLASHNODE_EXP_STATSN: 566 case ISCSI_FLASHNODE_IS_BOOT_TGT: 567 return S_IRUGO; 568 default: 569 return 0; 570 } 571 } 572 573 return 0; 574 } 575 576 /** 577 * qla4xxx_create chap_list - Create CHAP list from FLASH 578 * @ha: pointer to adapter structure 579 * 580 * Read flash and make a list of CHAP entries, during login when a CHAP entry 581 * is received, it will be checked in this list. If entry exist then the CHAP 582 * entry index is set in the DDB. If CHAP entry does not exist in this list 583 * then a new entry is added in FLASH in CHAP table and the index obtained is 584 * used in the DDB. 585 **/ 586 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 587 { 588 int rval = 0; 589 uint8_t *chap_flash_data = NULL; 590 uint32_t offset; 591 dma_addr_t chap_dma; 592 uint32_t chap_size = 0; 593 594 if (is_qla40XX(ha)) 595 chap_size = MAX_CHAP_ENTRIES_40XX * 596 sizeof(struct ql4_chap_table); 597 else /* Single region contains CHAP info for both 598 * ports which is divided into half for each port. 599 */ 600 chap_size = ha->hw.flt_chap_size / 2; 601 602 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 603 &chap_dma, GFP_KERNEL); 604 if (!chap_flash_data) { 605 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 606 return; 607 } 608 609 if (is_qla40XX(ha)) { 610 offset = FLASH_CHAP_OFFSET; 611 } else { 612 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 613 if (ha->port_num == 1) 614 offset += chap_size; 615 } 616 617 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 618 if (rval != QLA_SUCCESS) 619 goto exit_chap_list; 620 621 if (ha->chap_list == NULL) 622 ha->chap_list = vmalloc(chap_size); 623 if (ha->chap_list == NULL) { 624 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 625 goto exit_chap_list; 626 } 627 628 memset(ha->chap_list, 0, chap_size); 629 memcpy(ha->chap_list, chap_flash_data, chap_size); 630 631 exit_chap_list: 632 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 633 } 634 635 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 636 int16_t chap_index, 637 struct ql4_chap_table **chap_entry) 638 { 639 int rval = QLA_ERROR; 640 int max_chap_entries; 641 642 if (!ha->chap_list) { 643 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 644 rval = QLA_ERROR; 645 goto exit_get_chap; 646 } 647 648 if (is_qla80XX(ha)) 649 max_chap_entries = (ha->hw.flt_chap_size / 2) / 650 sizeof(struct ql4_chap_table); 651 else 652 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 653 654 if (chap_index > max_chap_entries) { 655 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 656 rval = QLA_ERROR; 657 goto exit_get_chap; 658 } 659 660 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 661 if ((*chap_entry)->cookie != 662 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 663 rval = QLA_ERROR; 664 *chap_entry = NULL; 665 } else { 666 rval = QLA_SUCCESS; 667 } 668 669 exit_get_chap: 670 return rval; 671 } 672 673 /** 674 * qla4xxx_find_free_chap_index - Find the first free chap index 675 * @ha: pointer to adapter structure 676 * @chap_index: CHAP index to be returned 677 * 678 * Find the first free chap index available in the chap table 679 * 680 * Note: Caller should acquire the chap lock before getting here. 681 **/ 682 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 683 uint16_t *chap_index) 684 { 685 int i, rval; 686 int free_index = -1; 687 int max_chap_entries = 0; 688 struct ql4_chap_table *chap_table; 689 690 if (is_qla80XX(ha)) 691 max_chap_entries = (ha->hw.flt_chap_size / 2) / 692 sizeof(struct ql4_chap_table); 693 else 694 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 695 696 if (!ha->chap_list) { 697 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 698 rval = QLA_ERROR; 699 goto exit_find_chap; 700 } 701 702 for (i = 0; i < max_chap_entries; i++) { 703 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 704 705 if ((chap_table->cookie != 706 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 707 (i > MAX_RESRV_CHAP_IDX)) { 708 free_index = i; 709 break; 710 } 711 } 712 713 if (free_index != -1) { 714 *chap_index = free_index; 715 rval = QLA_SUCCESS; 716 } else { 717 rval = QLA_ERROR; 718 } 719 720 exit_find_chap: 721 return rval; 722 } 723 724 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 725 uint32_t *num_entries, char *buf) 726 { 727 struct scsi_qla_host *ha = to_qla_host(shost); 728 struct ql4_chap_table *chap_table; 729 struct iscsi_chap_rec *chap_rec; 730 int max_chap_entries = 0; 731 int valid_chap_entries = 0; 732 int ret = 0, i; 733 734 if (is_qla80XX(ha)) 735 max_chap_entries = (ha->hw.flt_chap_size / 2) / 736 sizeof(struct ql4_chap_table); 737 else 738 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 739 740 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 741 __func__, *num_entries, chap_tbl_idx); 742 743 if (!buf) { 744 ret = -ENOMEM; 745 goto exit_get_chap_list; 746 } 747 748 qla4xxx_create_chap_list(ha); 749 750 chap_rec = (struct iscsi_chap_rec *) buf; 751 mutex_lock(&ha->chap_sem); 752 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 753 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 754 if (chap_table->cookie != 755 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 756 continue; 757 758 chap_rec->chap_tbl_idx = i; 759 strlcpy(chap_rec->username, chap_table->name, 760 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 761 strlcpy(chap_rec->password, chap_table->secret, 762 QL4_CHAP_MAX_SECRET_LEN); 763 chap_rec->password_length = chap_table->secret_len; 764 765 if (chap_table->flags & BIT_7) /* local */ 766 chap_rec->chap_type = CHAP_TYPE_OUT; 767 768 if (chap_table->flags & BIT_6) /* peer */ 769 chap_rec->chap_type = CHAP_TYPE_IN; 770 771 chap_rec++; 772 773 valid_chap_entries++; 774 if (valid_chap_entries == *num_entries) 775 break; 776 else 777 continue; 778 } 779 mutex_unlock(&ha->chap_sem); 780 781 exit_get_chap_list: 782 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 783 __func__, valid_chap_entries); 784 *num_entries = valid_chap_entries; 785 return ret; 786 } 787 788 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 789 { 790 int ret = 0; 791 uint16_t *chap_tbl_idx = (uint16_t *) data; 792 struct iscsi_cls_session *cls_session; 793 struct iscsi_session *sess; 794 struct ddb_entry *ddb_entry; 795 796 if (!iscsi_is_session_dev(dev)) 797 goto exit_is_chap_active; 798 799 cls_session = iscsi_dev_to_session(dev); 800 sess = cls_session->dd_data; 801 ddb_entry = sess->dd_data; 802 803 if (iscsi_session_chkready(cls_session)) 804 goto exit_is_chap_active; 805 806 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 807 ret = 1; 808 809 exit_is_chap_active: 810 return ret; 811 } 812 813 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 814 uint16_t chap_tbl_idx) 815 { 816 int ret = 0; 817 818 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 819 __qla4xxx_is_chap_active); 820 821 return ret; 822 } 823 824 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 825 { 826 struct scsi_qla_host *ha = to_qla_host(shost); 827 struct ql4_chap_table *chap_table; 828 dma_addr_t chap_dma; 829 int max_chap_entries = 0; 830 uint32_t offset = 0; 831 uint32_t chap_size; 832 int ret = 0; 833 834 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 835 if (chap_table == NULL) 836 return -ENOMEM; 837 838 memset(chap_table, 0, sizeof(struct ql4_chap_table)); 839 840 if (is_qla80XX(ha)) 841 max_chap_entries = (ha->hw.flt_chap_size / 2) / 842 sizeof(struct ql4_chap_table); 843 else 844 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 845 846 if (chap_tbl_idx > max_chap_entries) { 847 ret = -EINVAL; 848 goto exit_delete_chap; 849 } 850 851 /* Check if chap index is in use. 852 * If chap is in use don't delet chap entry */ 853 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 854 if (ret) { 855 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 856 "delete from flash\n", chap_tbl_idx); 857 ret = -EBUSY; 858 goto exit_delete_chap; 859 } 860 861 chap_size = sizeof(struct ql4_chap_table); 862 if (is_qla40XX(ha)) 863 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 864 else { 865 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 866 /* flt_chap_size is CHAP table size for both ports 867 * so divide it by 2 to calculate the offset for second port 868 */ 869 if (ha->port_num == 1) 870 offset += (ha->hw.flt_chap_size / 2); 871 offset += (chap_tbl_idx * chap_size); 872 } 873 874 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 875 if (ret != QLA_SUCCESS) { 876 ret = -EINVAL; 877 goto exit_delete_chap; 878 } 879 880 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 881 __le16_to_cpu(chap_table->cookie))); 882 883 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 884 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 885 goto exit_delete_chap; 886 } 887 888 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 889 890 offset = FLASH_CHAP_OFFSET | 891 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 892 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 893 FLASH_OPT_RMW_COMMIT); 894 if (ret == QLA_SUCCESS && ha->chap_list) { 895 mutex_lock(&ha->chap_sem); 896 /* Update ha chap_list cache */ 897 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 898 chap_table, sizeof(struct ql4_chap_table)); 899 mutex_unlock(&ha->chap_sem); 900 } 901 if (ret != QLA_SUCCESS) 902 ret = -EINVAL; 903 904 exit_delete_chap: 905 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 906 return ret; 907 } 908 909 /** 910 * qla4xxx_set_chap_entry - Make chap entry with given information 911 * @shost: pointer to host 912 * @data: chap info - credentials, index and type to make chap entry 913 * @len: length of data 914 * 915 * Add or update chap entry with the given information 916 **/ 917 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 918 { 919 struct scsi_qla_host *ha = to_qla_host(shost); 920 struct iscsi_chap_rec chap_rec; 921 struct ql4_chap_table *chap_entry = NULL; 922 struct iscsi_param_info *param_info; 923 struct nlattr *attr; 924 int max_chap_entries = 0; 925 int type; 926 int rem = len; 927 int rc = 0; 928 int size; 929 930 memset(&chap_rec, 0, sizeof(chap_rec)); 931 932 nla_for_each_attr(attr, data, len, rem) { 933 param_info = nla_data(attr); 934 935 switch (param_info->param) { 936 case ISCSI_CHAP_PARAM_INDEX: 937 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 938 break; 939 case ISCSI_CHAP_PARAM_CHAP_TYPE: 940 chap_rec.chap_type = param_info->value[0]; 941 break; 942 case ISCSI_CHAP_PARAM_USERNAME: 943 size = min_t(size_t, sizeof(chap_rec.username), 944 param_info->len); 945 memcpy(chap_rec.username, param_info->value, size); 946 break; 947 case ISCSI_CHAP_PARAM_PASSWORD: 948 size = min_t(size_t, sizeof(chap_rec.password), 949 param_info->len); 950 memcpy(chap_rec.password, param_info->value, size); 951 break; 952 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 953 chap_rec.password_length = param_info->value[0]; 954 break; 955 default: 956 ql4_printk(KERN_ERR, ha, 957 "%s: No such sysfs attribute\n", __func__); 958 rc = -ENOSYS; 959 goto exit_set_chap; 960 }; 961 } 962 963 if (chap_rec.chap_type == CHAP_TYPE_IN) 964 type = BIDI_CHAP; 965 else 966 type = LOCAL_CHAP; 967 968 if (is_qla80XX(ha)) 969 max_chap_entries = (ha->hw.flt_chap_size / 2) / 970 sizeof(struct ql4_chap_table); 971 else 972 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 973 974 mutex_lock(&ha->chap_sem); 975 if (chap_rec.chap_tbl_idx < max_chap_entries) { 976 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 977 &chap_entry); 978 if (!rc) { 979 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 980 ql4_printk(KERN_INFO, ha, 981 "Type mismatch for CHAP entry %d\n", 982 chap_rec.chap_tbl_idx); 983 rc = -EINVAL; 984 goto exit_unlock_chap; 985 } 986 987 /* If chap index is in use then don't modify it */ 988 rc = qla4xxx_is_chap_active(shost, 989 chap_rec.chap_tbl_idx); 990 if (rc) { 991 ql4_printk(KERN_INFO, ha, 992 "CHAP entry %d is in use\n", 993 chap_rec.chap_tbl_idx); 994 rc = -EBUSY; 995 goto exit_unlock_chap; 996 } 997 } 998 } else { 999 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1000 if (rc) { 1001 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1002 rc = -EBUSY; 1003 goto exit_unlock_chap; 1004 } 1005 } 1006 1007 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1008 chap_rec.chap_tbl_idx, type); 1009 1010 exit_unlock_chap: 1011 mutex_unlock(&ha->chap_sem); 1012 1013 exit_set_chap: 1014 return rc; 1015 } 1016 1017 1018 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1019 { 1020 struct scsi_qla_host *ha = to_qla_host(shost); 1021 struct iscsi_offload_host_stats *host_stats = NULL; 1022 int host_stats_size; 1023 int ret = 0; 1024 int ddb_idx = 0; 1025 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1026 int stats_size; 1027 dma_addr_t iscsi_stats_dma; 1028 1029 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1030 1031 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1032 1033 if (host_stats_size != len) { 1034 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1035 __func__, len, host_stats_size); 1036 ret = -EINVAL; 1037 goto exit_host_stats; 1038 } 1039 host_stats = (struct iscsi_offload_host_stats *)buf; 1040 1041 if (!buf) { 1042 ret = -ENOMEM; 1043 goto exit_host_stats; 1044 } 1045 1046 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1047 1048 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1049 &iscsi_stats_dma, GFP_KERNEL); 1050 if (!ql_iscsi_stats) { 1051 ql4_printk(KERN_ERR, ha, 1052 "Unable to allocate memory for iscsi stats\n"); 1053 ret = -ENOMEM; 1054 goto exit_host_stats; 1055 } 1056 1057 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1058 iscsi_stats_dma); 1059 if (ret != QLA_SUCCESS) { 1060 ql4_printk(KERN_ERR, ha, 1061 "Unable to retrieve iscsi stats\n"); 1062 ret = -EIO; 1063 goto exit_host_stats; 1064 } 1065 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1066 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1067 host_stats->mactx_multicast_frames = 1068 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1069 host_stats->mactx_broadcast_frames = 1070 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1071 host_stats->mactx_pause_frames = 1072 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1073 host_stats->mactx_control_frames = 1074 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1075 host_stats->mactx_deferral = 1076 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1077 host_stats->mactx_excess_deferral = 1078 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1079 host_stats->mactx_late_collision = 1080 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1081 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1082 host_stats->mactx_single_collision = 1083 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1084 host_stats->mactx_multiple_collision = 1085 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1086 host_stats->mactx_collision = 1087 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1088 host_stats->mactx_frames_dropped = 1089 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1090 host_stats->mactx_jumbo_frames = 1091 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1092 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1093 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1094 host_stats->macrx_unknown_control_frames = 1095 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1096 host_stats->macrx_pause_frames = 1097 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1098 host_stats->macrx_control_frames = 1099 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1100 host_stats->macrx_dribble = 1101 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1102 host_stats->macrx_frame_length_error = 1103 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1104 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1105 host_stats->macrx_carrier_sense_error = 1106 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1107 host_stats->macrx_frame_discarded = 1108 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1109 host_stats->macrx_frames_dropped = 1110 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1111 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1112 host_stats->mac_encoding_error = 1113 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1114 host_stats->macrx_length_error_large = 1115 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1116 host_stats->macrx_length_error_small = 1117 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1118 host_stats->macrx_multicast_frames = 1119 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1120 host_stats->macrx_broadcast_frames = 1121 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1122 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1123 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1124 host_stats->iptx_fragments = 1125 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1126 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1127 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1128 host_stats->iprx_fragments = 1129 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1130 host_stats->ip_datagram_reassembly = 1131 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1132 host_stats->ip_invalid_address_error = 1133 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1134 host_stats->ip_error_packets = 1135 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1136 host_stats->ip_fragrx_overlap = 1137 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1138 host_stats->ip_fragrx_outoforder = 1139 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1140 host_stats->ip_datagram_reassembly_timeout = 1141 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1142 host_stats->ipv6tx_packets = 1143 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1144 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1145 host_stats->ipv6tx_fragments = 1146 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1147 host_stats->ipv6rx_packets = 1148 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1149 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1150 host_stats->ipv6rx_fragments = 1151 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1152 host_stats->ipv6_datagram_reassembly = 1153 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1154 host_stats->ipv6_invalid_address_error = 1155 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1156 host_stats->ipv6_error_packets = 1157 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1158 host_stats->ipv6_fragrx_overlap = 1159 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1160 host_stats->ipv6_fragrx_outoforder = 1161 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1162 host_stats->ipv6_datagram_reassembly_timeout = 1163 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1164 host_stats->tcptx_segments = 1165 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1166 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1167 host_stats->tcprx_segments = 1168 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1169 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1170 host_stats->tcp_duplicate_ack_retx = 1171 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1172 host_stats->tcp_retx_timer_expired = 1173 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1174 host_stats->tcprx_duplicate_ack = 1175 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1176 host_stats->tcprx_pure_ackr = 1177 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1178 host_stats->tcptx_delayed_ack = 1179 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1180 host_stats->tcptx_pure_ack = 1181 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1182 host_stats->tcprx_segment_error = 1183 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1184 host_stats->tcprx_segment_outoforder = 1185 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1186 host_stats->tcprx_window_probe = 1187 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1188 host_stats->tcprx_window_update = 1189 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1190 host_stats->tcptx_window_probe_persist = 1191 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1192 host_stats->ecc_error_correction = 1193 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1194 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1195 host_stats->iscsi_data_bytes_tx = 1196 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1197 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1198 host_stats->iscsi_data_bytes_rx = 1199 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1200 host_stats->iscsi_io_completed = 1201 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1202 host_stats->iscsi_unexpected_io_rx = 1203 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1204 host_stats->iscsi_format_error = 1205 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1206 host_stats->iscsi_hdr_digest_error = 1207 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1208 host_stats->iscsi_data_digest_error = 1209 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1210 host_stats->iscsi_sequence_error = 1211 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1212 exit_host_stats: 1213 if (ql_iscsi_stats) 1214 dma_free_coherent(&ha->pdev->dev, host_stats_size, 1215 ql_iscsi_stats, iscsi_stats_dma); 1216 1217 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1218 __func__); 1219 return ret; 1220 } 1221 1222 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1223 enum iscsi_param_type param_type, 1224 int param, char *buf) 1225 { 1226 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1227 struct scsi_qla_host *ha = to_qla_host(shost); 1228 int ival; 1229 char *pval = NULL; 1230 int len = -ENOSYS; 1231 1232 if (param_type == ISCSI_NET_PARAM) { 1233 switch (param) { 1234 case ISCSI_NET_PARAM_IPV4_ADDR: 1235 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1236 break; 1237 case ISCSI_NET_PARAM_IPV4_SUBNET: 1238 len = sprintf(buf, "%pI4\n", 1239 &ha->ip_config.subnet_mask); 1240 break; 1241 case ISCSI_NET_PARAM_IPV4_GW: 1242 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1243 break; 1244 case ISCSI_NET_PARAM_IFACE_ENABLE: 1245 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1246 OP_STATE(ha->ip_config.ipv4_options, 1247 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1248 } else { 1249 OP_STATE(ha->ip_config.ipv6_options, 1250 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1251 } 1252 1253 len = sprintf(buf, "%s\n", pval); 1254 break; 1255 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1256 len = sprintf(buf, "%s\n", 1257 (ha->ip_config.tcp_options & 1258 TCPOPT_DHCP_ENABLE) ? 1259 "dhcp" : "static"); 1260 break; 1261 case ISCSI_NET_PARAM_IPV6_ADDR: 1262 if (iface->iface_num == 0) 1263 len = sprintf(buf, "%pI6\n", 1264 &ha->ip_config.ipv6_addr0); 1265 if (iface->iface_num == 1) 1266 len = sprintf(buf, "%pI6\n", 1267 &ha->ip_config.ipv6_addr1); 1268 break; 1269 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1270 len = sprintf(buf, "%pI6\n", 1271 &ha->ip_config.ipv6_link_local_addr); 1272 break; 1273 case ISCSI_NET_PARAM_IPV6_ROUTER: 1274 len = sprintf(buf, "%pI6\n", 1275 &ha->ip_config.ipv6_default_router_addr); 1276 break; 1277 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1278 pval = (ha->ip_config.ipv6_addl_options & 1279 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1280 "nd" : "static"; 1281 1282 len = sprintf(buf, "%s\n", pval); 1283 break; 1284 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1285 pval = (ha->ip_config.ipv6_addl_options & 1286 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1287 "auto" : "static"; 1288 1289 len = sprintf(buf, "%s\n", pval); 1290 break; 1291 case ISCSI_NET_PARAM_VLAN_ID: 1292 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1293 ival = ha->ip_config.ipv4_vlan_tag & 1294 ISCSI_MAX_VLAN_ID; 1295 else 1296 ival = ha->ip_config.ipv6_vlan_tag & 1297 ISCSI_MAX_VLAN_ID; 1298 1299 len = sprintf(buf, "%d\n", ival); 1300 break; 1301 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1302 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1303 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1304 ISCSI_MAX_VLAN_PRIORITY; 1305 else 1306 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1307 ISCSI_MAX_VLAN_PRIORITY; 1308 1309 len = sprintf(buf, "%d\n", ival); 1310 break; 1311 case ISCSI_NET_PARAM_VLAN_ENABLED: 1312 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1313 OP_STATE(ha->ip_config.ipv4_options, 1314 IPOPT_VLAN_TAGGING_ENABLE, pval); 1315 } else { 1316 OP_STATE(ha->ip_config.ipv6_options, 1317 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1318 } 1319 len = sprintf(buf, "%s\n", pval); 1320 break; 1321 case ISCSI_NET_PARAM_MTU: 1322 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1323 break; 1324 case ISCSI_NET_PARAM_PORT: 1325 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1326 len = sprintf(buf, "%d\n", 1327 ha->ip_config.ipv4_port); 1328 else 1329 len = sprintf(buf, "%d\n", 1330 ha->ip_config.ipv6_port); 1331 break; 1332 case ISCSI_NET_PARAM_IPADDR_STATE: 1333 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1334 pval = iscsi_get_ipaddress_state_name( 1335 ha->ip_config.ipv4_addr_state); 1336 } else { 1337 if (iface->iface_num == 0) 1338 pval = iscsi_get_ipaddress_state_name( 1339 ha->ip_config.ipv6_addr0_state); 1340 else if (iface->iface_num == 1) 1341 pval = iscsi_get_ipaddress_state_name( 1342 ha->ip_config.ipv6_addr1_state); 1343 } 1344 1345 len = sprintf(buf, "%s\n", pval); 1346 break; 1347 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1348 pval = iscsi_get_ipaddress_state_name( 1349 ha->ip_config.ipv6_link_local_state); 1350 len = sprintf(buf, "%s\n", pval); 1351 break; 1352 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1353 pval = iscsi_get_router_state_name( 1354 ha->ip_config.ipv6_default_router_state); 1355 len = sprintf(buf, "%s\n", pval); 1356 break; 1357 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1358 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1359 OP_STATE(~ha->ip_config.tcp_options, 1360 TCPOPT_DELAYED_ACK_DISABLE, pval); 1361 } else { 1362 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1363 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1364 } 1365 len = sprintf(buf, "%s\n", pval); 1366 break; 1367 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1368 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1369 OP_STATE(~ha->ip_config.tcp_options, 1370 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1371 } else { 1372 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1373 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1374 } 1375 len = sprintf(buf, "%s\n", pval); 1376 break; 1377 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1378 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1379 OP_STATE(~ha->ip_config.tcp_options, 1380 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1381 } else { 1382 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1383 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1384 pval); 1385 } 1386 len = sprintf(buf, "%s\n", pval); 1387 break; 1388 case ISCSI_NET_PARAM_TCP_WSF: 1389 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1390 len = sprintf(buf, "%d\n", 1391 ha->ip_config.tcp_wsf); 1392 else 1393 len = sprintf(buf, "%d\n", 1394 ha->ip_config.ipv6_tcp_wsf); 1395 break; 1396 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1397 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1398 ival = (ha->ip_config.tcp_options & 1399 TCPOPT_TIMER_SCALE) >> 1; 1400 else 1401 ival = (ha->ip_config.ipv6_tcp_options & 1402 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1403 1404 len = sprintf(buf, "%d\n", ival); 1405 break; 1406 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1407 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1408 OP_STATE(ha->ip_config.tcp_options, 1409 TCPOPT_TIMESTAMP_ENABLE, pval); 1410 } else { 1411 OP_STATE(ha->ip_config.ipv6_tcp_options, 1412 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1413 } 1414 len = sprintf(buf, "%s\n", pval); 1415 break; 1416 case ISCSI_NET_PARAM_CACHE_ID: 1417 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1418 len = sprintf(buf, "%d\n", 1419 ha->ip_config.ipv4_cache_id); 1420 else 1421 len = sprintf(buf, "%d\n", 1422 ha->ip_config.ipv6_cache_id); 1423 break; 1424 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1425 OP_STATE(ha->ip_config.tcp_options, 1426 TCPOPT_DNS_SERVER_IP_EN, pval); 1427 1428 len = sprintf(buf, "%s\n", pval); 1429 break; 1430 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1431 OP_STATE(ha->ip_config.tcp_options, 1432 TCPOPT_SLP_DA_INFO_EN, pval); 1433 1434 len = sprintf(buf, "%s\n", pval); 1435 break; 1436 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1437 OP_STATE(ha->ip_config.ipv4_options, 1438 IPOPT_IPV4_TOS_EN, pval); 1439 1440 len = sprintf(buf, "%s\n", pval); 1441 break; 1442 case ISCSI_NET_PARAM_IPV4_TOS: 1443 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1444 break; 1445 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1446 OP_STATE(ha->ip_config.ipv4_options, 1447 IPOPT_GRAT_ARP_EN, pval); 1448 1449 len = sprintf(buf, "%s\n", pval); 1450 break; 1451 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1452 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1453 pval); 1454 1455 len = sprintf(buf, "%s\n", pval); 1456 break; 1457 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1458 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1459 (char *)ha->ip_config.ipv4_alt_cid : ""; 1460 1461 len = sprintf(buf, "%s\n", pval); 1462 break; 1463 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1464 OP_STATE(ha->ip_config.ipv4_options, 1465 IPOPT_REQ_VID_EN, pval); 1466 1467 len = sprintf(buf, "%s\n", pval); 1468 break; 1469 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1470 OP_STATE(ha->ip_config.ipv4_options, 1471 IPOPT_USE_VID_EN, pval); 1472 1473 len = sprintf(buf, "%s\n", pval); 1474 break; 1475 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1476 pval = (ha->ip_config.ipv4_vid_len) ? 1477 (char *)ha->ip_config.ipv4_vid : ""; 1478 1479 len = sprintf(buf, "%s\n", pval); 1480 break; 1481 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1482 OP_STATE(ha->ip_config.ipv4_options, 1483 IPOPT_LEARN_IQN_EN, pval); 1484 1485 len = sprintf(buf, "%s\n", pval); 1486 break; 1487 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1488 OP_STATE(~ha->ip_config.ipv4_options, 1489 IPOPT_FRAGMENTATION_DISABLE, pval); 1490 1491 len = sprintf(buf, "%s\n", pval); 1492 break; 1493 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1494 OP_STATE(ha->ip_config.ipv4_options, 1495 IPOPT_IN_FORWARD_EN, pval); 1496 1497 len = sprintf(buf, "%s\n", pval); 1498 break; 1499 case ISCSI_NET_PARAM_REDIRECT_EN: 1500 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1501 OP_STATE(ha->ip_config.ipv4_options, 1502 IPOPT_ARP_REDIRECT_EN, pval); 1503 } else { 1504 OP_STATE(ha->ip_config.ipv6_options, 1505 IPV6_OPT_REDIRECT_EN, pval); 1506 } 1507 len = sprintf(buf, "%s\n", pval); 1508 break; 1509 case ISCSI_NET_PARAM_IPV4_TTL: 1510 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1511 break; 1512 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1513 OP_STATE(ha->ip_config.ipv6_options, 1514 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1515 1516 len = sprintf(buf, "%s\n", pval); 1517 break; 1518 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1519 OP_STATE(ha->ip_config.ipv6_addl_options, 1520 IPV6_ADDOPT_MLD_EN, pval); 1521 1522 len = sprintf(buf, "%s\n", pval); 1523 break; 1524 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1525 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1526 break; 1527 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1528 len = sprintf(buf, "%d\n", 1529 ha->ip_config.ipv6_traffic_class); 1530 break; 1531 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1532 len = sprintf(buf, "%d\n", 1533 ha->ip_config.ipv6_hop_limit); 1534 break; 1535 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1536 len = sprintf(buf, "%d\n", 1537 ha->ip_config.ipv6_nd_reach_time); 1538 break; 1539 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1540 len = sprintf(buf, "%d\n", 1541 ha->ip_config.ipv6_nd_rexmit_timer); 1542 break; 1543 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1544 len = sprintf(buf, "%d\n", 1545 ha->ip_config.ipv6_nd_stale_timeout); 1546 break; 1547 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1548 len = sprintf(buf, "%d\n", 1549 ha->ip_config.ipv6_dup_addr_detect_count); 1550 break; 1551 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1552 len = sprintf(buf, "%d\n", 1553 ha->ip_config.ipv6_gw_advrt_mtu); 1554 break; 1555 default: 1556 len = -ENOSYS; 1557 } 1558 } else if (param_type == ISCSI_IFACE_PARAM) { 1559 switch (param) { 1560 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1561 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1562 break; 1563 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1564 OP_STATE(ha->ip_config.iscsi_options, 1565 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1566 1567 len = sprintf(buf, "%s\n", pval); 1568 break; 1569 case ISCSI_IFACE_PARAM_DATADGST_EN: 1570 OP_STATE(ha->ip_config.iscsi_options, 1571 ISCSIOPTS_DATA_DIGEST_EN, pval); 1572 1573 len = sprintf(buf, "%s\n", pval); 1574 break; 1575 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1576 OP_STATE(ha->ip_config.iscsi_options, 1577 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1578 1579 len = sprintf(buf, "%s\n", pval); 1580 break; 1581 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1582 OP_STATE(ha->ip_config.iscsi_options, 1583 ISCSIOPTS_INITIAL_R2T_EN, pval); 1584 1585 len = sprintf(buf, "%s\n", pval); 1586 break; 1587 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1588 OP_STATE(ha->ip_config.iscsi_options, 1589 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1590 1591 len = sprintf(buf, "%s\n", pval); 1592 break; 1593 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1594 OP_STATE(ha->ip_config.iscsi_options, 1595 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1596 1597 len = sprintf(buf, "%s\n", pval); 1598 break; 1599 case ISCSI_IFACE_PARAM_ERL: 1600 len = sprintf(buf, "%d\n", 1601 (ha->ip_config.iscsi_options & 1602 ISCSIOPTS_ERL)); 1603 break; 1604 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1605 len = sprintf(buf, "%u\n", 1606 ha->ip_config.iscsi_max_pdu_size * 1607 BYTE_UNITS); 1608 break; 1609 case ISCSI_IFACE_PARAM_FIRST_BURST: 1610 len = sprintf(buf, "%u\n", 1611 ha->ip_config.iscsi_first_burst_len * 1612 BYTE_UNITS); 1613 break; 1614 case ISCSI_IFACE_PARAM_MAX_R2T: 1615 len = sprintf(buf, "%d\n", 1616 ha->ip_config.iscsi_max_outstnd_r2t); 1617 break; 1618 case ISCSI_IFACE_PARAM_MAX_BURST: 1619 len = sprintf(buf, "%u\n", 1620 ha->ip_config.iscsi_max_burst_len * 1621 BYTE_UNITS); 1622 break; 1623 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1624 OP_STATE(ha->ip_config.iscsi_options, 1625 ISCSIOPTS_CHAP_AUTH_EN, pval); 1626 1627 len = sprintf(buf, "%s\n", pval); 1628 break; 1629 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1630 OP_STATE(ha->ip_config.iscsi_options, 1631 ISCSIOPTS_BIDI_CHAP_EN, pval); 1632 1633 len = sprintf(buf, "%s\n", pval); 1634 break; 1635 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1636 OP_STATE(ha->ip_config.iscsi_options, 1637 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1638 1639 len = sprintf(buf, "%s\n", pval); 1640 break; 1641 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1642 OP_STATE(ha->ip_config.iscsi_options, 1643 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1644 1645 len = sprintf(buf, "%s\n", pval); 1646 break; 1647 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1648 OP_STATE(ha->ip_config.iscsi_options, 1649 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1650 1651 len = sprintf(buf, "%s\n", pval); 1652 break; 1653 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1654 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1655 break; 1656 default: 1657 len = -ENOSYS; 1658 } 1659 } 1660 1661 return len; 1662 } 1663 1664 static struct iscsi_endpoint * 1665 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1666 int non_blocking) 1667 { 1668 int ret; 1669 struct iscsi_endpoint *ep; 1670 struct qla_endpoint *qla_ep; 1671 struct scsi_qla_host *ha; 1672 struct sockaddr_in *addr; 1673 struct sockaddr_in6 *addr6; 1674 1675 if (!shost) { 1676 ret = -ENXIO; 1677 pr_err("%s: shost is NULL\n", __func__); 1678 return ERR_PTR(ret); 1679 } 1680 1681 ha = iscsi_host_priv(shost); 1682 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1683 if (!ep) { 1684 ret = -ENOMEM; 1685 return ERR_PTR(ret); 1686 } 1687 1688 qla_ep = ep->dd_data; 1689 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1690 if (dst_addr->sa_family == AF_INET) { 1691 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1692 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1693 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1694 (char *)&addr->sin_addr)); 1695 } else if (dst_addr->sa_family == AF_INET6) { 1696 memcpy(&qla_ep->dst_addr, dst_addr, 1697 sizeof(struct sockaddr_in6)); 1698 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1699 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1700 (char *)&addr6->sin6_addr)); 1701 } else { 1702 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1703 __func__); 1704 } 1705 1706 qla_ep->host = shost; 1707 1708 return ep; 1709 } 1710 1711 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1712 { 1713 struct qla_endpoint *qla_ep; 1714 struct scsi_qla_host *ha; 1715 int ret = 0; 1716 1717 qla_ep = ep->dd_data; 1718 ha = to_qla_host(qla_ep->host); 1719 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1720 1721 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1722 ret = 1; 1723 1724 return ret; 1725 } 1726 1727 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1728 { 1729 struct qla_endpoint *qla_ep; 1730 struct scsi_qla_host *ha; 1731 1732 qla_ep = ep->dd_data; 1733 ha = to_qla_host(qla_ep->host); 1734 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1735 ha->host_no)); 1736 iscsi_destroy_endpoint(ep); 1737 } 1738 1739 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1740 enum iscsi_param param, 1741 char *buf) 1742 { 1743 struct qla_endpoint *qla_ep = ep->dd_data; 1744 struct sockaddr *dst_addr; 1745 struct scsi_qla_host *ha; 1746 1747 if (!qla_ep) 1748 return -ENOTCONN; 1749 1750 ha = to_qla_host(qla_ep->host); 1751 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1752 ha->host_no)); 1753 1754 switch (param) { 1755 case ISCSI_PARAM_CONN_PORT: 1756 case ISCSI_PARAM_CONN_ADDRESS: 1757 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1758 if (!dst_addr) 1759 return -ENOTCONN; 1760 1761 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1762 &qla_ep->dst_addr, param, buf); 1763 default: 1764 return -ENOSYS; 1765 } 1766 } 1767 1768 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1769 struct iscsi_stats *stats) 1770 { 1771 struct iscsi_session *sess; 1772 struct iscsi_cls_session *cls_sess; 1773 struct ddb_entry *ddb_entry; 1774 struct scsi_qla_host *ha; 1775 struct ql_iscsi_stats *ql_iscsi_stats; 1776 int stats_size; 1777 int ret; 1778 dma_addr_t iscsi_stats_dma; 1779 1780 cls_sess = iscsi_conn_to_session(cls_conn); 1781 sess = cls_sess->dd_data; 1782 ddb_entry = sess->dd_data; 1783 ha = ddb_entry->ha; 1784 1785 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1786 ha->host_no)); 1787 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1788 /* Allocate memory */ 1789 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1790 &iscsi_stats_dma, GFP_KERNEL); 1791 if (!ql_iscsi_stats) { 1792 ql4_printk(KERN_ERR, ha, 1793 "Unable to allocate memory for iscsi stats\n"); 1794 goto exit_get_stats; 1795 } 1796 1797 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1798 iscsi_stats_dma); 1799 if (ret != QLA_SUCCESS) { 1800 ql4_printk(KERN_ERR, ha, 1801 "Unable to retrieve iscsi stats\n"); 1802 goto free_stats; 1803 } 1804 1805 /* octets */ 1806 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1807 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1808 /* xmit pdus */ 1809 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1810 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1811 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1812 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1813 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1814 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1815 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1816 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1817 /* recv pdus */ 1818 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1819 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1820 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1821 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1822 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1823 stats->logoutrsp_pdus = 1824 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1825 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1826 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1827 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1828 1829 free_stats: 1830 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1831 iscsi_stats_dma); 1832 exit_get_stats: 1833 return; 1834 } 1835 1836 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1837 { 1838 struct iscsi_cls_session *session; 1839 struct iscsi_session *sess; 1840 unsigned long flags; 1841 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED; 1842 1843 session = starget_to_session(scsi_target(sc->device)); 1844 sess = session->dd_data; 1845 1846 spin_lock_irqsave(&session->lock, flags); 1847 if (session->state == ISCSI_SESSION_FAILED) 1848 ret = BLK_EH_RESET_TIMER; 1849 spin_unlock_irqrestore(&session->lock, flags); 1850 1851 return ret; 1852 } 1853 1854 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1855 { 1856 struct scsi_qla_host *ha = to_qla_host(shost); 1857 struct iscsi_cls_host *ihost = shost->shost_data; 1858 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1859 1860 qla4xxx_get_firmware_state(ha); 1861 1862 switch (ha->addl_fw_state & 0x0F00) { 1863 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1864 speed = ISCSI_PORT_SPEED_10MBPS; 1865 break; 1866 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1867 speed = ISCSI_PORT_SPEED_100MBPS; 1868 break; 1869 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1870 speed = ISCSI_PORT_SPEED_1GBPS; 1871 break; 1872 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1873 speed = ISCSI_PORT_SPEED_10GBPS; 1874 break; 1875 } 1876 ihost->port_speed = speed; 1877 } 1878 1879 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1880 { 1881 struct scsi_qla_host *ha = to_qla_host(shost); 1882 struct iscsi_cls_host *ihost = shost->shost_data; 1883 uint32_t state = ISCSI_PORT_STATE_DOWN; 1884 1885 if (test_bit(AF_LINK_UP, &ha->flags)) 1886 state = ISCSI_PORT_STATE_UP; 1887 1888 ihost->port_state = state; 1889 } 1890 1891 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1892 enum iscsi_host_param param, char *buf) 1893 { 1894 struct scsi_qla_host *ha = to_qla_host(shost); 1895 int len; 1896 1897 switch (param) { 1898 case ISCSI_HOST_PARAM_HWADDRESS: 1899 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1900 break; 1901 case ISCSI_HOST_PARAM_IPADDRESS: 1902 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1903 break; 1904 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1905 len = sprintf(buf, "%s\n", ha->name_string); 1906 break; 1907 case ISCSI_HOST_PARAM_PORT_STATE: 1908 qla4xxx_set_port_state(shost); 1909 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1910 break; 1911 case ISCSI_HOST_PARAM_PORT_SPEED: 1912 qla4xxx_set_port_speed(shost); 1913 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1914 break; 1915 default: 1916 return -ENOSYS; 1917 } 1918 1919 return len; 1920 } 1921 1922 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1923 { 1924 if (ha->iface_ipv4) 1925 return; 1926 1927 /* IPv4 */ 1928 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1929 &qla4xxx_iscsi_transport, 1930 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1931 if (!ha->iface_ipv4) 1932 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1933 "iface0.\n"); 1934 } 1935 1936 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1937 { 1938 if (!ha->iface_ipv6_0) 1939 /* IPv6 iface-0 */ 1940 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1941 &qla4xxx_iscsi_transport, 1942 ISCSI_IFACE_TYPE_IPV6, 0, 1943 0); 1944 if (!ha->iface_ipv6_0) 1945 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1946 "iface0.\n"); 1947 1948 if (!ha->iface_ipv6_1) 1949 /* IPv6 iface-1 */ 1950 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1951 &qla4xxx_iscsi_transport, 1952 ISCSI_IFACE_TYPE_IPV6, 1, 1953 0); 1954 if (!ha->iface_ipv6_1) 1955 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1956 "iface1.\n"); 1957 } 1958 1959 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 1960 { 1961 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 1962 qla4xxx_create_ipv4_iface(ha); 1963 1964 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 1965 qla4xxx_create_ipv6_iface(ha); 1966 } 1967 1968 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 1969 { 1970 if (ha->iface_ipv4) { 1971 iscsi_destroy_iface(ha->iface_ipv4); 1972 ha->iface_ipv4 = NULL; 1973 } 1974 } 1975 1976 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 1977 { 1978 if (ha->iface_ipv6_0) { 1979 iscsi_destroy_iface(ha->iface_ipv6_0); 1980 ha->iface_ipv6_0 = NULL; 1981 } 1982 if (ha->iface_ipv6_1) { 1983 iscsi_destroy_iface(ha->iface_ipv6_1); 1984 ha->iface_ipv6_1 = NULL; 1985 } 1986 } 1987 1988 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 1989 { 1990 qla4xxx_destroy_ipv4_iface(ha); 1991 qla4xxx_destroy_ipv6_iface(ha); 1992 } 1993 1994 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 1995 struct iscsi_iface_param_info *iface_param, 1996 struct addr_ctrl_blk *init_fw_cb) 1997 { 1998 /* 1999 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 2000 * iface_num 1 is valid only for IPv6 Addr. 2001 */ 2002 switch (iface_param->param) { 2003 case ISCSI_NET_PARAM_IPV6_ADDR: 2004 if (iface_param->iface_num & 0x1) 2005 /* IPv6 Addr 1 */ 2006 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2007 sizeof(init_fw_cb->ipv6_addr1)); 2008 else 2009 /* IPv6 Addr 0 */ 2010 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2011 sizeof(init_fw_cb->ipv6_addr0)); 2012 break; 2013 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2014 if (iface_param->iface_num & 0x1) 2015 break; 2016 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2017 sizeof(init_fw_cb->ipv6_if_id)); 2018 break; 2019 case ISCSI_NET_PARAM_IPV6_ROUTER: 2020 if (iface_param->iface_num & 0x1) 2021 break; 2022 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2023 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2024 break; 2025 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2026 /* Autocfg applies to even interface */ 2027 if (iface_param->iface_num & 0x1) 2028 break; 2029 2030 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2031 init_fw_cb->ipv6_addtl_opts &= 2032 cpu_to_le16( 2033 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2034 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2035 init_fw_cb->ipv6_addtl_opts |= 2036 cpu_to_le16( 2037 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2038 else 2039 ql4_printk(KERN_ERR, ha, 2040 "Invalid autocfg setting for IPv6 addr\n"); 2041 break; 2042 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2043 /* Autocfg applies to even interface */ 2044 if (iface_param->iface_num & 0x1) 2045 break; 2046 2047 if (iface_param->value[0] == 2048 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2049 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2050 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2051 else if (iface_param->value[0] == 2052 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2053 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2054 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2055 else 2056 ql4_printk(KERN_ERR, ha, 2057 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2058 break; 2059 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2060 /* Autocfg applies to even interface */ 2061 if (iface_param->iface_num & 0x1) 2062 break; 2063 2064 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2065 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2066 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2067 break; 2068 case ISCSI_NET_PARAM_IFACE_ENABLE: 2069 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2070 init_fw_cb->ipv6_opts |= 2071 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2072 qla4xxx_create_ipv6_iface(ha); 2073 } else { 2074 init_fw_cb->ipv6_opts &= 2075 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2076 0xFFFF); 2077 qla4xxx_destroy_ipv6_iface(ha); 2078 } 2079 break; 2080 case ISCSI_NET_PARAM_VLAN_TAG: 2081 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2082 break; 2083 init_fw_cb->ipv6_vlan_tag = 2084 cpu_to_be16(*(uint16_t *)iface_param->value); 2085 break; 2086 case ISCSI_NET_PARAM_VLAN_ENABLED: 2087 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2088 init_fw_cb->ipv6_opts |= 2089 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2090 else 2091 init_fw_cb->ipv6_opts &= 2092 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2093 break; 2094 case ISCSI_NET_PARAM_MTU: 2095 init_fw_cb->eth_mtu_size = 2096 cpu_to_le16(*(uint16_t *)iface_param->value); 2097 break; 2098 case ISCSI_NET_PARAM_PORT: 2099 /* Autocfg applies to even interface */ 2100 if (iface_param->iface_num & 0x1) 2101 break; 2102 2103 init_fw_cb->ipv6_port = 2104 cpu_to_le16(*(uint16_t *)iface_param->value); 2105 break; 2106 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2107 if (iface_param->iface_num & 0x1) 2108 break; 2109 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2110 init_fw_cb->ipv6_tcp_opts |= 2111 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2112 else 2113 init_fw_cb->ipv6_tcp_opts &= 2114 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2115 0xFFFF); 2116 break; 2117 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2118 if (iface_param->iface_num & 0x1) 2119 break; 2120 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2121 init_fw_cb->ipv6_tcp_opts |= 2122 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2123 else 2124 init_fw_cb->ipv6_tcp_opts &= 2125 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2126 break; 2127 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2128 if (iface_param->iface_num & 0x1) 2129 break; 2130 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2131 init_fw_cb->ipv6_tcp_opts |= 2132 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2133 else 2134 init_fw_cb->ipv6_tcp_opts &= 2135 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2136 break; 2137 case ISCSI_NET_PARAM_TCP_WSF: 2138 if (iface_param->iface_num & 0x1) 2139 break; 2140 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2141 break; 2142 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2143 if (iface_param->iface_num & 0x1) 2144 break; 2145 init_fw_cb->ipv6_tcp_opts &= 2146 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2147 init_fw_cb->ipv6_tcp_opts |= 2148 cpu_to_le16((iface_param->value[0] << 1) & 2149 IPV6_TCPOPT_TIMER_SCALE); 2150 break; 2151 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2152 if (iface_param->iface_num & 0x1) 2153 break; 2154 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2155 init_fw_cb->ipv6_tcp_opts |= 2156 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2157 else 2158 init_fw_cb->ipv6_tcp_opts &= 2159 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2160 break; 2161 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2162 if (iface_param->iface_num & 0x1) 2163 break; 2164 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2165 init_fw_cb->ipv6_opts |= 2166 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2167 else 2168 init_fw_cb->ipv6_opts &= 2169 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2170 break; 2171 case ISCSI_NET_PARAM_REDIRECT_EN: 2172 if (iface_param->iface_num & 0x1) 2173 break; 2174 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2175 init_fw_cb->ipv6_opts |= 2176 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2177 else 2178 init_fw_cb->ipv6_opts &= 2179 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2180 break; 2181 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2182 if (iface_param->iface_num & 0x1) 2183 break; 2184 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2185 init_fw_cb->ipv6_addtl_opts |= 2186 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2187 else 2188 init_fw_cb->ipv6_addtl_opts &= 2189 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2190 break; 2191 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2192 if (iface_param->iface_num & 0x1) 2193 break; 2194 init_fw_cb->ipv6_flow_lbl = 2195 cpu_to_le16(*(uint16_t *)iface_param->value); 2196 break; 2197 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2198 if (iface_param->iface_num & 0x1) 2199 break; 2200 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2201 break; 2202 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2203 if (iface_param->iface_num & 0x1) 2204 break; 2205 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2206 break; 2207 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2208 if (iface_param->iface_num & 0x1) 2209 break; 2210 init_fw_cb->ipv6_nd_reach_time = 2211 cpu_to_le32(*(uint32_t *)iface_param->value); 2212 break; 2213 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2214 if (iface_param->iface_num & 0x1) 2215 break; 2216 init_fw_cb->ipv6_nd_rexmit_timer = 2217 cpu_to_le32(*(uint32_t *)iface_param->value); 2218 break; 2219 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2220 if (iface_param->iface_num & 0x1) 2221 break; 2222 init_fw_cb->ipv6_nd_stale_timeout = 2223 cpu_to_le32(*(uint32_t *)iface_param->value); 2224 break; 2225 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2226 if (iface_param->iface_num & 0x1) 2227 break; 2228 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2229 break; 2230 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2231 if (iface_param->iface_num & 0x1) 2232 break; 2233 init_fw_cb->ipv6_gw_advrt_mtu = 2234 cpu_to_le32(*(uint32_t *)iface_param->value); 2235 break; 2236 default: 2237 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2238 iface_param->param); 2239 break; 2240 } 2241 } 2242 2243 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2244 struct iscsi_iface_param_info *iface_param, 2245 struct addr_ctrl_blk *init_fw_cb) 2246 { 2247 switch (iface_param->param) { 2248 case ISCSI_NET_PARAM_IPV4_ADDR: 2249 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2250 sizeof(init_fw_cb->ipv4_addr)); 2251 break; 2252 case ISCSI_NET_PARAM_IPV4_SUBNET: 2253 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2254 sizeof(init_fw_cb->ipv4_subnet)); 2255 break; 2256 case ISCSI_NET_PARAM_IPV4_GW: 2257 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2258 sizeof(init_fw_cb->ipv4_gw_addr)); 2259 break; 2260 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2261 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2262 init_fw_cb->ipv4_tcp_opts |= 2263 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2264 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2265 init_fw_cb->ipv4_tcp_opts &= 2266 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2267 else 2268 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2269 break; 2270 case ISCSI_NET_PARAM_IFACE_ENABLE: 2271 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2272 init_fw_cb->ipv4_ip_opts |= 2273 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2274 qla4xxx_create_ipv4_iface(ha); 2275 } else { 2276 init_fw_cb->ipv4_ip_opts &= 2277 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2278 0xFFFF); 2279 qla4xxx_destroy_ipv4_iface(ha); 2280 } 2281 break; 2282 case ISCSI_NET_PARAM_VLAN_TAG: 2283 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2284 break; 2285 init_fw_cb->ipv4_vlan_tag = 2286 cpu_to_be16(*(uint16_t *)iface_param->value); 2287 break; 2288 case ISCSI_NET_PARAM_VLAN_ENABLED: 2289 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2290 init_fw_cb->ipv4_ip_opts |= 2291 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2292 else 2293 init_fw_cb->ipv4_ip_opts &= 2294 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2295 break; 2296 case ISCSI_NET_PARAM_MTU: 2297 init_fw_cb->eth_mtu_size = 2298 cpu_to_le16(*(uint16_t *)iface_param->value); 2299 break; 2300 case ISCSI_NET_PARAM_PORT: 2301 init_fw_cb->ipv4_port = 2302 cpu_to_le16(*(uint16_t *)iface_param->value); 2303 break; 2304 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2305 if (iface_param->iface_num & 0x1) 2306 break; 2307 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2308 init_fw_cb->ipv4_tcp_opts |= 2309 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2310 else 2311 init_fw_cb->ipv4_tcp_opts &= 2312 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2313 0xFFFF); 2314 break; 2315 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2316 if (iface_param->iface_num & 0x1) 2317 break; 2318 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2319 init_fw_cb->ipv4_tcp_opts |= 2320 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2321 else 2322 init_fw_cb->ipv4_tcp_opts &= 2323 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2324 break; 2325 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2326 if (iface_param->iface_num & 0x1) 2327 break; 2328 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2329 init_fw_cb->ipv4_tcp_opts |= 2330 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2331 else 2332 init_fw_cb->ipv4_tcp_opts &= 2333 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2334 break; 2335 case ISCSI_NET_PARAM_TCP_WSF: 2336 if (iface_param->iface_num & 0x1) 2337 break; 2338 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2339 break; 2340 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2341 if (iface_param->iface_num & 0x1) 2342 break; 2343 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2344 init_fw_cb->ipv4_tcp_opts |= 2345 cpu_to_le16((iface_param->value[0] << 1) & 2346 TCPOPT_TIMER_SCALE); 2347 break; 2348 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2349 if (iface_param->iface_num & 0x1) 2350 break; 2351 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2352 init_fw_cb->ipv4_tcp_opts |= 2353 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2354 else 2355 init_fw_cb->ipv4_tcp_opts &= 2356 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2357 break; 2358 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2359 if (iface_param->iface_num & 0x1) 2360 break; 2361 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2362 init_fw_cb->ipv4_tcp_opts |= 2363 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2364 else 2365 init_fw_cb->ipv4_tcp_opts &= 2366 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2367 break; 2368 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2369 if (iface_param->iface_num & 0x1) 2370 break; 2371 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2372 init_fw_cb->ipv4_tcp_opts |= 2373 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2374 else 2375 init_fw_cb->ipv4_tcp_opts &= 2376 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2377 break; 2378 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2379 if (iface_param->iface_num & 0x1) 2380 break; 2381 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2382 init_fw_cb->ipv4_ip_opts |= 2383 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2384 else 2385 init_fw_cb->ipv4_ip_opts &= 2386 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2387 break; 2388 case ISCSI_NET_PARAM_IPV4_TOS: 2389 if (iface_param->iface_num & 0x1) 2390 break; 2391 init_fw_cb->ipv4_tos = iface_param->value[0]; 2392 break; 2393 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2394 if (iface_param->iface_num & 0x1) 2395 break; 2396 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2397 init_fw_cb->ipv4_ip_opts |= 2398 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2399 else 2400 init_fw_cb->ipv4_ip_opts &= 2401 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2402 break; 2403 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2404 if (iface_param->iface_num & 0x1) 2405 break; 2406 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2407 init_fw_cb->ipv4_ip_opts |= 2408 cpu_to_le16(IPOPT_ALT_CID_EN); 2409 else 2410 init_fw_cb->ipv4_ip_opts &= 2411 cpu_to_le16(~IPOPT_ALT_CID_EN); 2412 break; 2413 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2414 if (iface_param->iface_num & 0x1) 2415 break; 2416 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2417 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2418 init_fw_cb->ipv4_dhcp_alt_cid_len = 2419 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2420 break; 2421 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2422 if (iface_param->iface_num & 0x1) 2423 break; 2424 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2425 init_fw_cb->ipv4_ip_opts |= 2426 cpu_to_le16(IPOPT_REQ_VID_EN); 2427 else 2428 init_fw_cb->ipv4_ip_opts &= 2429 cpu_to_le16(~IPOPT_REQ_VID_EN); 2430 break; 2431 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2432 if (iface_param->iface_num & 0x1) 2433 break; 2434 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2435 init_fw_cb->ipv4_ip_opts |= 2436 cpu_to_le16(IPOPT_USE_VID_EN); 2437 else 2438 init_fw_cb->ipv4_ip_opts &= 2439 cpu_to_le16(~IPOPT_USE_VID_EN); 2440 break; 2441 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2442 if (iface_param->iface_num & 0x1) 2443 break; 2444 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2445 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2446 init_fw_cb->ipv4_dhcp_vid_len = 2447 strlen(init_fw_cb->ipv4_dhcp_vid); 2448 break; 2449 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2450 if (iface_param->iface_num & 0x1) 2451 break; 2452 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2453 init_fw_cb->ipv4_ip_opts |= 2454 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2455 else 2456 init_fw_cb->ipv4_ip_opts &= 2457 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2458 break; 2459 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2460 if (iface_param->iface_num & 0x1) 2461 break; 2462 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2463 init_fw_cb->ipv4_ip_opts |= 2464 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2465 else 2466 init_fw_cb->ipv4_ip_opts &= 2467 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2468 break; 2469 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2470 if (iface_param->iface_num & 0x1) 2471 break; 2472 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2473 init_fw_cb->ipv4_ip_opts |= 2474 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2475 else 2476 init_fw_cb->ipv4_ip_opts &= 2477 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2478 break; 2479 case ISCSI_NET_PARAM_REDIRECT_EN: 2480 if (iface_param->iface_num & 0x1) 2481 break; 2482 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2483 init_fw_cb->ipv4_ip_opts |= 2484 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2485 else 2486 init_fw_cb->ipv4_ip_opts &= 2487 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2488 break; 2489 case ISCSI_NET_PARAM_IPV4_TTL: 2490 if (iface_param->iface_num & 0x1) 2491 break; 2492 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2493 break; 2494 default: 2495 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2496 iface_param->param); 2497 break; 2498 } 2499 } 2500 2501 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2502 struct iscsi_iface_param_info *iface_param, 2503 struct addr_ctrl_blk *init_fw_cb) 2504 { 2505 switch (iface_param->param) { 2506 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2507 if (iface_param->iface_num & 0x1) 2508 break; 2509 init_fw_cb->def_timeout = 2510 cpu_to_le16(*(uint16_t *)iface_param->value); 2511 break; 2512 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2513 if (iface_param->iface_num & 0x1) 2514 break; 2515 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2516 init_fw_cb->iscsi_opts |= 2517 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2518 else 2519 init_fw_cb->iscsi_opts &= 2520 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2521 break; 2522 case ISCSI_IFACE_PARAM_DATADGST_EN: 2523 if (iface_param->iface_num & 0x1) 2524 break; 2525 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2526 init_fw_cb->iscsi_opts |= 2527 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2528 else 2529 init_fw_cb->iscsi_opts &= 2530 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2531 break; 2532 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2533 if (iface_param->iface_num & 0x1) 2534 break; 2535 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2536 init_fw_cb->iscsi_opts |= 2537 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2538 else 2539 init_fw_cb->iscsi_opts &= 2540 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2541 break; 2542 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2543 if (iface_param->iface_num & 0x1) 2544 break; 2545 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2546 init_fw_cb->iscsi_opts |= 2547 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2548 else 2549 init_fw_cb->iscsi_opts &= 2550 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2551 break; 2552 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2553 if (iface_param->iface_num & 0x1) 2554 break; 2555 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2556 init_fw_cb->iscsi_opts |= 2557 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2558 else 2559 init_fw_cb->iscsi_opts &= 2560 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2561 break; 2562 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2563 if (iface_param->iface_num & 0x1) 2564 break; 2565 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2566 init_fw_cb->iscsi_opts |= 2567 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2568 else 2569 init_fw_cb->iscsi_opts &= 2570 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2571 break; 2572 case ISCSI_IFACE_PARAM_ERL: 2573 if (iface_param->iface_num & 0x1) 2574 break; 2575 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2576 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2577 ISCSIOPTS_ERL); 2578 break; 2579 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2580 if (iface_param->iface_num & 0x1) 2581 break; 2582 init_fw_cb->iscsi_max_pdu_size = 2583 cpu_to_le32(*(uint32_t *)iface_param->value) / 2584 BYTE_UNITS; 2585 break; 2586 case ISCSI_IFACE_PARAM_FIRST_BURST: 2587 if (iface_param->iface_num & 0x1) 2588 break; 2589 init_fw_cb->iscsi_fburst_len = 2590 cpu_to_le32(*(uint32_t *)iface_param->value) / 2591 BYTE_UNITS; 2592 break; 2593 case ISCSI_IFACE_PARAM_MAX_R2T: 2594 if (iface_param->iface_num & 0x1) 2595 break; 2596 init_fw_cb->iscsi_max_outstnd_r2t = 2597 cpu_to_le16(*(uint16_t *)iface_param->value); 2598 break; 2599 case ISCSI_IFACE_PARAM_MAX_BURST: 2600 if (iface_param->iface_num & 0x1) 2601 break; 2602 init_fw_cb->iscsi_max_burst_len = 2603 cpu_to_le32(*(uint32_t *)iface_param->value) / 2604 BYTE_UNITS; 2605 break; 2606 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2607 if (iface_param->iface_num & 0x1) 2608 break; 2609 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2610 init_fw_cb->iscsi_opts |= 2611 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2612 else 2613 init_fw_cb->iscsi_opts &= 2614 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2615 break; 2616 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2617 if (iface_param->iface_num & 0x1) 2618 break; 2619 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2620 init_fw_cb->iscsi_opts |= 2621 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2622 else 2623 init_fw_cb->iscsi_opts &= 2624 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2625 break; 2626 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2627 if (iface_param->iface_num & 0x1) 2628 break; 2629 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2630 init_fw_cb->iscsi_opts |= 2631 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2632 else 2633 init_fw_cb->iscsi_opts &= 2634 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2635 break; 2636 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2637 if (iface_param->iface_num & 0x1) 2638 break; 2639 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2640 init_fw_cb->iscsi_opts |= 2641 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2642 else 2643 init_fw_cb->iscsi_opts &= 2644 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2645 break; 2646 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2647 if (iface_param->iface_num & 0x1) 2648 break; 2649 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2650 init_fw_cb->iscsi_opts |= 2651 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2652 else 2653 init_fw_cb->iscsi_opts &= 2654 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2655 break; 2656 default: 2657 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2658 iface_param->param); 2659 break; 2660 } 2661 } 2662 2663 static void 2664 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2665 { 2666 struct addr_ctrl_blk_def *acb; 2667 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2668 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2669 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2670 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2671 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2672 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2673 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2674 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2675 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2676 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2677 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2678 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2679 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2680 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2681 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2682 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2683 } 2684 2685 static int 2686 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2687 { 2688 struct scsi_qla_host *ha = to_qla_host(shost); 2689 int rval = 0; 2690 struct iscsi_iface_param_info *iface_param = NULL; 2691 struct addr_ctrl_blk *init_fw_cb = NULL; 2692 dma_addr_t init_fw_cb_dma; 2693 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2694 uint32_t mbox_sts[MBOX_REG_COUNT]; 2695 uint32_t rem = len; 2696 struct nlattr *attr; 2697 2698 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2699 sizeof(struct addr_ctrl_blk), 2700 &init_fw_cb_dma, GFP_KERNEL); 2701 if (!init_fw_cb) { 2702 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2703 __func__); 2704 return -ENOMEM; 2705 } 2706 2707 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2708 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2709 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2710 2711 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2712 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2713 rval = -EIO; 2714 goto exit_init_fw_cb; 2715 } 2716 2717 nla_for_each_attr(attr, data, len, rem) { 2718 iface_param = nla_data(attr); 2719 2720 if (iface_param->param_type == ISCSI_NET_PARAM) { 2721 switch (iface_param->iface_type) { 2722 case ISCSI_IFACE_TYPE_IPV4: 2723 switch (iface_param->iface_num) { 2724 case 0: 2725 qla4xxx_set_ipv4(ha, iface_param, 2726 init_fw_cb); 2727 break; 2728 default: 2729 /* Cannot have more than one IPv4 interface */ 2730 ql4_printk(KERN_ERR, ha, 2731 "Invalid IPv4 iface number = %d\n", 2732 iface_param->iface_num); 2733 break; 2734 } 2735 break; 2736 case ISCSI_IFACE_TYPE_IPV6: 2737 switch (iface_param->iface_num) { 2738 case 0: 2739 case 1: 2740 qla4xxx_set_ipv6(ha, iface_param, 2741 init_fw_cb); 2742 break; 2743 default: 2744 /* Cannot have more than two IPv6 interface */ 2745 ql4_printk(KERN_ERR, ha, 2746 "Invalid IPv6 iface number = %d\n", 2747 iface_param->iface_num); 2748 break; 2749 } 2750 break; 2751 default: 2752 ql4_printk(KERN_ERR, ha, 2753 "Invalid iface type\n"); 2754 break; 2755 } 2756 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2757 qla4xxx_set_iscsi_param(ha, iface_param, 2758 init_fw_cb); 2759 } else { 2760 continue; 2761 } 2762 } 2763 2764 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2765 2766 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2767 sizeof(struct addr_ctrl_blk), 2768 FLASH_OPT_RMW_COMMIT); 2769 if (rval != QLA_SUCCESS) { 2770 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2771 __func__); 2772 rval = -EIO; 2773 goto exit_init_fw_cb; 2774 } 2775 2776 rval = qla4xxx_disable_acb(ha); 2777 if (rval != QLA_SUCCESS) { 2778 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2779 __func__); 2780 rval = -EIO; 2781 goto exit_init_fw_cb; 2782 } 2783 2784 wait_for_completion_timeout(&ha->disable_acb_comp, 2785 DISABLE_ACB_TOV * HZ); 2786 2787 qla4xxx_initcb_to_acb(init_fw_cb); 2788 2789 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2790 if (rval != QLA_SUCCESS) { 2791 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2792 __func__); 2793 rval = -EIO; 2794 goto exit_init_fw_cb; 2795 } 2796 2797 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2798 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2799 init_fw_cb_dma); 2800 2801 exit_init_fw_cb: 2802 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2803 init_fw_cb, init_fw_cb_dma); 2804 2805 return rval; 2806 } 2807 2808 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2809 enum iscsi_param param, char *buf) 2810 { 2811 struct iscsi_session *sess = cls_sess->dd_data; 2812 struct ddb_entry *ddb_entry = sess->dd_data; 2813 struct scsi_qla_host *ha = ddb_entry->ha; 2814 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2815 struct ql4_chap_table chap_tbl; 2816 int rval, len; 2817 uint16_t idx; 2818 2819 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2820 switch (param) { 2821 case ISCSI_PARAM_CHAP_IN_IDX: 2822 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2823 sess->password_in, BIDI_CHAP, 2824 &idx); 2825 if (rval) 2826 len = sprintf(buf, "\n"); 2827 else 2828 len = sprintf(buf, "%hu\n", idx); 2829 break; 2830 case ISCSI_PARAM_CHAP_OUT_IDX: 2831 if (ddb_entry->ddb_type == FLASH_DDB) { 2832 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2833 idx = ddb_entry->chap_tbl_idx; 2834 rval = QLA_SUCCESS; 2835 } else { 2836 rval = QLA_ERROR; 2837 } 2838 } else { 2839 rval = qla4xxx_get_chap_index(ha, sess->username, 2840 sess->password, 2841 LOCAL_CHAP, &idx); 2842 } 2843 if (rval) 2844 len = sprintf(buf, "\n"); 2845 else 2846 len = sprintf(buf, "%hu\n", idx); 2847 break; 2848 case ISCSI_PARAM_USERNAME: 2849 case ISCSI_PARAM_PASSWORD: 2850 /* First, populate session username and password for FLASH DDB, 2851 * if not already done. This happens when session login fails 2852 * for a FLASH DDB. 2853 */ 2854 if (ddb_entry->ddb_type == FLASH_DDB && 2855 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2856 !sess->username && !sess->password) { 2857 idx = ddb_entry->chap_tbl_idx; 2858 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2859 chap_tbl.secret, 2860 idx); 2861 if (!rval) { 2862 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2863 (char *)chap_tbl.name, 2864 strlen((char *)chap_tbl.name)); 2865 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2866 (char *)chap_tbl.secret, 2867 chap_tbl.secret_len); 2868 } 2869 } 2870 /* allow fall-through */ 2871 default: 2872 return iscsi_session_get_param(cls_sess, param, buf); 2873 } 2874 2875 return len; 2876 } 2877 2878 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2879 enum iscsi_param param, char *buf) 2880 { 2881 struct iscsi_conn *conn; 2882 struct qla_conn *qla_conn; 2883 struct sockaddr *dst_addr; 2884 2885 conn = cls_conn->dd_data; 2886 qla_conn = conn->dd_data; 2887 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2888 2889 switch (param) { 2890 case ISCSI_PARAM_CONN_PORT: 2891 case ISCSI_PARAM_CONN_ADDRESS: 2892 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2893 dst_addr, param, buf); 2894 default: 2895 return iscsi_conn_get_param(cls_conn, param, buf); 2896 } 2897 } 2898 2899 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2900 { 2901 uint32_t mbx_sts = 0; 2902 uint16_t tmp_ddb_index; 2903 int ret; 2904 2905 get_ddb_index: 2906 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2907 2908 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2909 DEBUG2(ql4_printk(KERN_INFO, ha, 2910 "Free DDB index not available\n")); 2911 ret = QLA_ERROR; 2912 goto exit_get_ddb_index; 2913 } 2914 2915 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2916 goto get_ddb_index; 2917 2918 DEBUG2(ql4_printk(KERN_INFO, ha, 2919 "Found a free DDB index at %d\n", tmp_ddb_index)); 2920 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2921 if (ret == QLA_ERROR) { 2922 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2923 ql4_printk(KERN_INFO, ha, 2924 "DDB index = %d not available trying next\n", 2925 tmp_ddb_index); 2926 goto get_ddb_index; 2927 } 2928 DEBUG2(ql4_printk(KERN_INFO, ha, 2929 "Free FW DDB not available\n")); 2930 } 2931 2932 *ddb_index = tmp_ddb_index; 2933 2934 exit_get_ddb_index: 2935 return ret; 2936 } 2937 2938 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2939 struct ddb_entry *ddb_entry, 2940 char *existing_ipaddr, 2941 char *user_ipaddr) 2942 { 2943 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2944 char formatted_ipaddr[DDB_IPADDR_LEN]; 2945 int status = QLA_SUCCESS, ret = 0; 2946 2947 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2948 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2949 '\0', NULL); 2950 if (ret == 0) { 2951 status = QLA_ERROR; 2952 goto out_match; 2953 } 2954 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2955 } else { 2956 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2957 '\0', NULL); 2958 if (ret == 0) { 2959 status = QLA_ERROR; 2960 goto out_match; 2961 } 2962 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 2963 } 2964 2965 if (strcmp(existing_ipaddr, formatted_ipaddr)) 2966 status = QLA_ERROR; 2967 2968 out_match: 2969 return status; 2970 } 2971 2972 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 2973 struct iscsi_cls_conn *cls_conn) 2974 { 2975 int idx = 0, max_ddbs, rval; 2976 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 2977 struct iscsi_session *sess, *existing_sess; 2978 struct iscsi_conn *conn, *existing_conn; 2979 struct ddb_entry *ddb_entry; 2980 2981 sess = cls_sess->dd_data; 2982 conn = cls_conn->dd_data; 2983 2984 if (sess->targetname == NULL || 2985 conn->persistent_address == NULL || 2986 conn->persistent_port == 0) 2987 return QLA_ERROR; 2988 2989 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 2990 MAX_DEV_DB_ENTRIES; 2991 2992 for (idx = 0; idx < max_ddbs; idx++) { 2993 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 2994 if (ddb_entry == NULL) 2995 continue; 2996 2997 if (ddb_entry->ddb_type != FLASH_DDB) 2998 continue; 2999 3000 existing_sess = ddb_entry->sess->dd_data; 3001 existing_conn = ddb_entry->conn->dd_data; 3002 3003 if (existing_sess->targetname == NULL || 3004 existing_conn->persistent_address == NULL || 3005 existing_conn->persistent_port == 0) 3006 continue; 3007 3008 DEBUG2(ql4_printk(KERN_INFO, ha, 3009 "IQN = %s User IQN = %s\n", 3010 existing_sess->targetname, 3011 sess->targetname)); 3012 3013 DEBUG2(ql4_printk(KERN_INFO, ha, 3014 "IP = %s User IP = %s\n", 3015 existing_conn->persistent_address, 3016 conn->persistent_address)); 3017 3018 DEBUG2(ql4_printk(KERN_INFO, ha, 3019 "Port = %d User Port = %d\n", 3020 existing_conn->persistent_port, 3021 conn->persistent_port)); 3022 3023 if (strcmp(existing_sess->targetname, sess->targetname)) 3024 continue; 3025 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3026 existing_conn->persistent_address, 3027 conn->persistent_address); 3028 if (rval == QLA_ERROR) 3029 continue; 3030 if (existing_conn->persistent_port != conn->persistent_port) 3031 continue; 3032 break; 3033 } 3034 3035 if (idx == max_ddbs) 3036 return QLA_ERROR; 3037 3038 DEBUG2(ql4_printk(KERN_INFO, ha, 3039 "Match found in fwdb sessions\n")); 3040 return QLA_SUCCESS; 3041 } 3042 3043 static struct iscsi_cls_session * 3044 qla4xxx_session_create(struct iscsi_endpoint *ep, 3045 uint16_t cmds_max, uint16_t qdepth, 3046 uint32_t initial_cmdsn) 3047 { 3048 struct iscsi_cls_session *cls_sess; 3049 struct scsi_qla_host *ha; 3050 struct qla_endpoint *qla_ep; 3051 struct ddb_entry *ddb_entry; 3052 uint16_t ddb_index; 3053 struct iscsi_session *sess; 3054 struct sockaddr *dst_addr; 3055 int ret; 3056 3057 if (!ep) { 3058 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3059 return NULL; 3060 } 3061 3062 qla_ep = ep->dd_data; 3063 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 3064 ha = to_qla_host(qla_ep->host); 3065 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3066 ha->host_no)); 3067 3068 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3069 if (ret == QLA_ERROR) 3070 return NULL; 3071 3072 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3073 cmds_max, sizeof(struct ddb_entry), 3074 sizeof(struct ql4_task_data), 3075 initial_cmdsn, ddb_index); 3076 if (!cls_sess) 3077 return NULL; 3078 3079 sess = cls_sess->dd_data; 3080 ddb_entry = sess->dd_data; 3081 ddb_entry->fw_ddb_index = ddb_index; 3082 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3083 ddb_entry->ha = ha; 3084 ddb_entry->sess = cls_sess; 3085 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3086 ddb_entry->ddb_change = qla4xxx_ddb_change; 3087 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3088 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3089 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3090 ha->tot_ddbs++; 3091 3092 return cls_sess; 3093 } 3094 3095 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3096 { 3097 struct iscsi_session *sess; 3098 struct ddb_entry *ddb_entry; 3099 struct scsi_qla_host *ha; 3100 unsigned long flags, wtime; 3101 struct dev_db_entry *fw_ddb_entry = NULL; 3102 dma_addr_t fw_ddb_entry_dma; 3103 uint32_t ddb_state; 3104 int ret; 3105 3106 sess = cls_sess->dd_data; 3107 ddb_entry = sess->dd_data; 3108 ha = ddb_entry->ha; 3109 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3110 ha->host_no)); 3111 3112 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3113 &fw_ddb_entry_dma, GFP_KERNEL); 3114 if (!fw_ddb_entry) { 3115 ql4_printk(KERN_ERR, ha, 3116 "%s: Unable to allocate dma buffer\n", __func__); 3117 goto destroy_session; 3118 } 3119 3120 wtime = jiffies + (HZ * LOGOUT_TOV); 3121 do { 3122 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3123 fw_ddb_entry, fw_ddb_entry_dma, 3124 NULL, NULL, &ddb_state, NULL, 3125 NULL, NULL); 3126 if (ret == QLA_ERROR) 3127 goto destroy_session; 3128 3129 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3130 (ddb_state == DDB_DS_SESSION_FAILED)) 3131 goto destroy_session; 3132 3133 schedule_timeout_uninterruptible(HZ); 3134 } while ((time_after(wtime, jiffies))); 3135 3136 destroy_session: 3137 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3138 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3139 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3140 spin_lock_irqsave(&ha->hardware_lock, flags); 3141 qla4xxx_free_ddb(ha, ddb_entry); 3142 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3143 3144 iscsi_session_teardown(cls_sess); 3145 3146 if (fw_ddb_entry) 3147 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3148 fw_ddb_entry, fw_ddb_entry_dma); 3149 } 3150 3151 static struct iscsi_cls_conn * 3152 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3153 { 3154 struct iscsi_cls_conn *cls_conn; 3155 struct iscsi_session *sess; 3156 struct ddb_entry *ddb_entry; 3157 struct scsi_qla_host *ha; 3158 3159 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3160 conn_idx); 3161 if (!cls_conn) { 3162 pr_info("%s: Can not create connection for conn_idx = %u\n", 3163 __func__, conn_idx); 3164 return NULL; 3165 } 3166 3167 sess = cls_sess->dd_data; 3168 ddb_entry = sess->dd_data; 3169 ddb_entry->conn = cls_conn; 3170 3171 ha = ddb_entry->ha; 3172 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3173 conn_idx)); 3174 return cls_conn; 3175 } 3176 3177 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3178 struct iscsi_cls_conn *cls_conn, 3179 uint64_t transport_fd, int is_leading) 3180 { 3181 struct iscsi_conn *conn; 3182 struct qla_conn *qla_conn; 3183 struct iscsi_endpoint *ep; 3184 struct ddb_entry *ddb_entry; 3185 struct scsi_qla_host *ha; 3186 struct iscsi_session *sess; 3187 3188 sess = cls_session->dd_data; 3189 ddb_entry = sess->dd_data; 3190 ha = ddb_entry->ha; 3191 3192 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3193 cls_session->sid, cls_conn->cid)); 3194 3195 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3196 return -EINVAL; 3197 ep = iscsi_lookup_endpoint(transport_fd); 3198 conn = cls_conn->dd_data; 3199 qla_conn = conn->dd_data; 3200 qla_conn->qla_ep = ep->dd_data; 3201 return 0; 3202 } 3203 3204 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3205 { 3206 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3207 struct iscsi_session *sess; 3208 struct ddb_entry *ddb_entry; 3209 struct scsi_qla_host *ha; 3210 struct dev_db_entry *fw_ddb_entry = NULL; 3211 dma_addr_t fw_ddb_entry_dma; 3212 uint32_t mbx_sts = 0; 3213 int ret = 0; 3214 int status = QLA_SUCCESS; 3215 3216 sess = cls_sess->dd_data; 3217 ddb_entry = sess->dd_data; 3218 ha = ddb_entry->ha; 3219 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3220 cls_sess->sid, cls_conn->cid)); 3221 3222 /* Check if we have matching FW DDB, if yes then do not 3223 * login to this target. This could cause target to logout previous 3224 * connection 3225 */ 3226 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3227 if (ret == QLA_SUCCESS) { 3228 ql4_printk(KERN_INFO, ha, 3229 "Session already exist in FW.\n"); 3230 ret = -EEXIST; 3231 goto exit_conn_start; 3232 } 3233 3234 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3235 &fw_ddb_entry_dma, GFP_KERNEL); 3236 if (!fw_ddb_entry) { 3237 ql4_printk(KERN_ERR, ha, 3238 "%s: Unable to allocate dma buffer\n", __func__); 3239 ret = -ENOMEM; 3240 goto exit_conn_start; 3241 } 3242 3243 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3244 if (ret) { 3245 /* If iscsid is stopped and started then no need to do 3246 * set param again since ddb state will be already 3247 * active and FW does not allow set ddb to an 3248 * active session. 3249 */ 3250 if (mbx_sts) 3251 if (ddb_entry->fw_ddb_device_state == 3252 DDB_DS_SESSION_ACTIVE) { 3253 ddb_entry->unblock_sess(ddb_entry->sess); 3254 goto exit_set_param; 3255 } 3256 3257 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3258 __func__, ddb_entry->fw_ddb_index); 3259 goto exit_conn_start; 3260 } 3261 3262 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3263 if (status == QLA_ERROR) { 3264 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3265 sess->targetname); 3266 ret = -EINVAL; 3267 goto exit_conn_start; 3268 } 3269 3270 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3271 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3272 3273 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3274 ddb_entry->fw_ddb_device_state)); 3275 3276 exit_set_param: 3277 ret = 0; 3278 3279 exit_conn_start: 3280 if (fw_ddb_entry) 3281 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3282 fw_ddb_entry, fw_ddb_entry_dma); 3283 return ret; 3284 } 3285 3286 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3287 { 3288 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3289 struct iscsi_session *sess; 3290 struct scsi_qla_host *ha; 3291 struct ddb_entry *ddb_entry; 3292 int options; 3293 3294 sess = cls_sess->dd_data; 3295 ddb_entry = sess->dd_data; 3296 ha = ddb_entry->ha; 3297 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3298 cls_conn->cid)); 3299 3300 options = LOGOUT_OPTION_CLOSE_SESSION; 3301 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3302 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3303 } 3304 3305 static void qla4xxx_task_work(struct work_struct *wdata) 3306 { 3307 struct ql4_task_data *task_data; 3308 struct scsi_qla_host *ha; 3309 struct passthru_status *sts; 3310 struct iscsi_task *task; 3311 struct iscsi_hdr *hdr; 3312 uint8_t *data; 3313 uint32_t data_len; 3314 struct iscsi_conn *conn; 3315 int hdr_len; 3316 itt_t itt; 3317 3318 task_data = container_of(wdata, struct ql4_task_data, task_work); 3319 ha = task_data->ha; 3320 task = task_data->task; 3321 sts = &task_data->sts; 3322 hdr_len = sizeof(struct iscsi_hdr); 3323 3324 DEBUG3(printk(KERN_INFO "Status returned\n")); 3325 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3326 DEBUG3(printk(KERN_INFO "Response buffer")); 3327 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3328 3329 conn = task->conn; 3330 3331 switch (sts->completionStatus) { 3332 case PASSTHRU_STATUS_COMPLETE: 3333 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3334 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3335 itt = sts->handle; 3336 hdr->itt = itt; 3337 data = task_data->resp_buffer + hdr_len; 3338 data_len = task_data->resp_len - hdr_len; 3339 iscsi_complete_pdu(conn, hdr, data, data_len); 3340 break; 3341 default: 3342 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3343 sts->completionStatus); 3344 break; 3345 } 3346 return; 3347 } 3348 3349 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3350 { 3351 struct ql4_task_data *task_data; 3352 struct iscsi_session *sess; 3353 struct ddb_entry *ddb_entry; 3354 struct scsi_qla_host *ha; 3355 int hdr_len; 3356 3357 sess = task->conn->session; 3358 ddb_entry = sess->dd_data; 3359 ha = ddb_entry->ha; 3360 task_data = task->dd_data; 3361 memset(task_data, 0, sizeof(struct ql4_task_data)); 3362 3363 if (task->sc) { 3364 ql4_printk(KERN_INFO, ha, 3365 "%s: SCSI Commands not implemented\n", __func__); 3366 return -EINVAL; 3367 } 3368 3369 hdr_len = sizeof(struct iscsi_hdr); 3370 task_data->ha = ha; 3371 task_data->task = task; 3372 3373 if (task->data_count) { 3374 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3375 task->data_count, 3376 PCI_DMA_TODEVICE); 3377 } 3378 3379 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3380 __func__, task->conn->max_recv_dlength, hdr_len)); 3381 3382 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3383 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3384 task_data->resp_len, 3385 &task_data->resp_dma, 3386 GFP_ATOMIC); 3387 if (!task_data->resp_buffer) 3388 goto exit_alloc_pdu; 3389 3390 task_data->req_len = task->data_count + hdr_len; 3391 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3392 task_data->req_len, 3393 &task_data->req_dma, 3394 GFP_ATOMIC); 3395 if (!task_data->req_buffer) 3396 goto exit_alloc_pdu; 3397 3398 task->hdr = task_data->req_buffer; 3399 3400 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3401 3402 return 0; 3403 3404 exit_alloc_pdu: 3405 if (task_data->resp_buffer) 3406 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3407 task_data->resp_buffer, task_data->resp_dma); 3408 3409 if (task_data->req_buffer) 3410 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3411 task_data->req_buffer, task_data->req_dma); 3412 return -ENOMEM; 3413 } 3414 3415 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3416 { 3417 struct ql4_task_data *task_data; 3418 struct iscsi_session *sess; 3419 struct ddb_entry *ddb_entry; 3420 struct scsi_qla_host *ha; 3421 int hdr_len; 3422 3423 hdr_len = sizeof(struct iscsi_hdr); 3424 sess = task->conn->session; 3425 ddb_entry = sess->dd_data; 3426 ha = ddb_entry->ha; 3427 task_data = task->dd_data; 3428 3429 if (task->data_count) { 3430 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3431 task->data_count, PCI_DMA_TODEVICE); 3432 } 3433 3434 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3435 __func__, task->conn->max_recv_dlength, hdr_len)); 3436 3437 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3438 task_data->resp_buffer, task_data->resp_dma); 3439 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3440 task_data->req_buffer, task_data->req_dma); 3441 return; 3442 } 3443 3444 static int qla4xxx_task_xmit(struct iscsi_task *task) 3445 { 3446 struct scsi_cmnd *sc = task->sc; 3447 struct iscsi_session *sess = task->conn->session; 3448 struct ddb_entry *ddb_entry = sess->dd_data; 3449 struct scsi_qla_host *ha = ddb_entry->ha; 3450 3451 if (!sc) 3452 return qla4xxx_send_passthru0(task); 3453 3454 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3455 __func__); 3456 return -ENOSYS; 3457 } 3458 3459 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3460 struct iscsi_bus_flash_conn *conn, 3461 struct dev_db_entry *fw_ddb_entry) 3462 { 3463 unsigned long options = 0; 3464 int rc = 0; 3465 3466 options = le16_to_cpu(fw_ddb_entry->options); 3467 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3468 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3469 rc = iscsi_switch_str_param(&sess->portal_type, 3470 PORTAL_TYPE_IPV6); 3471 if (rc) 3472 goto exit_copy; 3473 } else { 3474 rc = iscsi_switch_str_param(&sess->portal_type, 3475 PORTAL_TYPE_IPV4); 3476 if (rc) 3477 goto exit_copy; 3478 } 3479 3480 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3481 &options); 3482 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3483 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3484 3485 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3486 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3487 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3488 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3489 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3490 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3491 &options); 3492 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3493 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3494 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3495 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3496 &options); 3497 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3498 sess->discovery_auth_optional = 3499 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3500 if (test_bit(ISCSIOPT_ERL1, &options)) 3501 sess->erl |= BIT_1; 3502 if (test_bit(ISCSIOPT_ERL0, &options)) 3503 sess->erl |= BIT_0; 3504 3505 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3506 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3507 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3508 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3509 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3510 conn->tcp_timer_scale |= BIT_3; 3511 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3512 conn->tcp_timer_scale |= BIT_2; 3513 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3514 conn->tcp_timer_scale |= BIT_1; 3515 3516 conn->tcp_timer_scale >>= 1; 3517 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3518 3519 options = le16_to_cpu(fw_ddb_entry->ip_options); 3520 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3521 3522 conn->max_recv_dlength = BYTE_UNITS * 3523 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3524 conn->max_xmit_dlength = BYTE_UNITS * 3525 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3526 sess->first_burst = BYTE_UNITS * 3527 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3528 sess->max_burst = BYTE_UNITS * 3529 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3530 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3531 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3532 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3533 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3534 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3535 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3536 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3537 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3538 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3539 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3540 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3541 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3542 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3543 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3544 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3545 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3546 3547 sess->default_taskmgmt_timeout = 3548 le16_to_cpu(fw_ddb_entry->def_timeout); 3549 conn->port = le16_to_cpu(fw_ddb_entry->port); 3550 3551 options = le16_to_cpu(fw_ddb_entry->options); 3552 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3553 if (!conn->ipaddress) { 3554 rc = -ENOMEM; 3555 goto exit_copy; 3556 } 3557 3558 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3559 if (!conn->redirect_ipaddr) { 3560 rc = -ENOMEM; 3561 goto exit_copy; 3562 } 3563 3564 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3565 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3566 3567 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3568 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3569 3570 conn->link_local_ipv6_addr = kmemdup( 3571 fw_ddb_entry->link_local_ipv6_addr, 3572 IPv6_ADDR_LEN, GFP_KERNEL); 3573 if (!conn->link_local_ipv6_addr) { 3574 rc = -ENOMEM; 3575 goto exit_copy; 3576 } 3577 } else { 3578 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3579 } 3580 3581 if (fw_ddb_entry->iscsi_name[0]) { 3582 rc = iscsi_switch_str_param(&sess->targetname, 3583 (char *)fw_ddb_entry->iscsi_name); 3584 if (rc) 3585 goto exit_copy; 3586 } 3587 3588 if (fw_ddb_entry->iscsi_alias[0]) { 3589 rc = iscsi_switch_str_param(&sess->targetalias, 3590 (char *)fw_ddb_entry->iscsi_alias); 3591 if (rc) 3592 goto exit_copy; 3593 } 3594 3595 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3596 3597 exit_copy: 3598 return rc; 3599 } 3600 3601 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3602 struct iscsi_bus_flash_conn *conn, 3603 struct dev_db_entry *fw_ddb_entry) 3604 { 3605 uint16_t options; 3606 int rc = 0; 3607 3608 options = le16_to_cpu(fw_ddb_entry->options); 3609 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3610 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3611 options |= BIT_8; 3612 else 3613 options &= ~BIT_8; 3614 3615 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3616 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3617 SET_BITVAL(sess->entry_state, options, BIT_3); 3618 fw_ddb_entry->options = cpu_to_le16(options); 3619 3620 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3621 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3622 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3623 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3624 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3625 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3626 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3627 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3628 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3629 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3630 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3631 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3632 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3633 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3634 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3635 3636 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3637 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3638 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3639 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3640 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3641 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3642 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3643 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3644 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3645 3646 options = le16_to_cpu(fw_ddb_entry->ip_options); 3647 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3648 fw_ddb_entry->ip_options = cpu_to_le16(options); 3649 3650 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3651 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3652 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3653 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3654 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3655 fw_ddb_entry->iscsi_first_burst_len = 3656 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3657 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3658 BYTE_UNITS); 3659 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3660 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3661 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3662 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3663 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3664 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3665 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3666 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3667 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3668 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3669 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3670 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3671 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3672 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3673 fw_ddb_entry->port = cpu_to_le16(conn->port); 3674 fw_ddb_entry->def_timeout = 3675 cpu_to_le16(sess->default_taskmgmt_timeout); 3676 3677 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3678 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3679 else 3680 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3681 3682 if (conn->ipaddress) 3683 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3684 sizeof(fw_ddb_entry->ip_addr)); 3685 3686 if (conn->redirect_ipaddr) 3687 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3688 sizeof(fw_ddb_entry->tgt_addr)); 3689 3690 if (conn->link_local_ipv6_addr) 3691 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3692 conn->link_local_ipv6_addr, 3693 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3694 3695 if (sess->targetname) 3696 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3697 sizeof(fw_ddb_entry->iscsi_name)); 3698 3699 if (sess->targetalias) 3700 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3701 sizeof(fw_ddb_entry->iscsi_alias)); 3702 3703 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3704 3705 return rc; 3706 } 3707 3708 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3709 struct iscsi_session *sess, 3710 struct dev_db_entry *fw_ddb_entry) 3711 { 3712 unsigned long options = 0; 3713 uint16_t ddb_link; 3714 uint16_t disc_parent; 3715 char ip_addr[DDB_IPADDR_LEN]; 3716 3717 options = le16_to_cpu(fw_ddb_entry->options); 3718 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3719 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3720 &options); 3721 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3722 3723 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3724 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3725 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3726 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3727 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3728 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3729 &options); 3730 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3731 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3732 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3733 &options); 3734 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3735 sess->discovery_auth_optional = 3736 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3737 if (test_bit(ISCSIOPT_ERL1, &options)) 3738 sess->erl |= BIT_1; 3739 if (test_bit(ISCSIOPT_ERL0, &options)) 3740 sess->erl |= BIT_0; 3741 3742 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3743 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3744 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3745 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3746 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3747 conn->tcp_timer_scale |= BIT_3; 3748 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3749 conn->tcp_timer_scale |= BIT_2; 3750 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3751 conn->tcp_timer_scale |= BIT_1; 3752 3753 conn->tcp_timer_scale >>= 1; 3754 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3755 3756 options = le16_to_cpu(fw_ddb_entry->ip_options); 3757 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3758 3759 conn->max_recv_dlength = BYTE_UNITS * 3760 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3761 conn->max_xmit_dlength = BYTE_UNITS * 3762 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3763 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3764 sess->first_burst = BYTE_UNITS * 3765 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3766 sess->max_burst = BYTE_UNITS * 3767 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3768 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3769 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3770 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3771 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3772 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3773 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3774 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3775 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3776 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3777 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3778 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3779 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3780 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3781 3782 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3783 if (ddb_link == DDB_ISNS) 3784 disc_parent = ISCSI_DISC_PARENT_ISNS; 3785 else if (ddb_link == DDB_NO_LINK) 3786 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3787 else if (ddb_link < MAX_DDB_ENTRIES) 3788 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3789 else 3790 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3791 3792 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3793 iscsi_get_discovery_parent_name(disc_parent), 0); 3794 3795 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3796 (char *)fw_ddb_entry->iscsi_alias, 0); 3797 3798 options = le16_to_cpu(fw_ddb_entry->options); 3799 if (options & DDB_OPT_IPV6_DEVICE) { 3800 memset(ip_addr, 0, sizeof(ip_addr)); 3801 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3802 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3803 (char *)ip_addr, 0); 3804 } 3805 } 3806 3807 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3808 struct dev_db_entry *fw_ddb_entry, 3809 struct iscsi_cls_session *cls_sess, 3810 struct iscsi_cls_conn *cls_conn) 3811 { 3812 int buflen = 0; 3813 struct iscsi_session *sess; 3814 struct ddb_entry *ddb_entry; 3815 struct ql4_chap_table chap_tbl; 3816 struct iscsi_conn *conn; 3817 char ip_addr[DDB_IPADDR_LEN]; 3818 uint16_t options = 0; 3819 3820 sess = cls_sess->dd_data; 3821 ddb_entry = sess->dd_data; 3822 conn = cls_conn->dd_data; 3823 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3824 3825 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3826 3827 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3828 3829 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3830 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3831 3832 memset(ip_addr, 0, sizeof(ip_addr)); 3833 options = le16_to_cpu(fw_ddb_entry->options); 3834 if (options & DDB_OPT_IPV6_DEVICE) { 3835 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3836 3837 memset(ip_addr, 0, sizeof(ip_addr)); 3838 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3839 } else { 3840 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3841 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3842 } 3843 3844 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3845 (char *)ip_addr, buflen); 3846 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3847 (char *)fw_ddb_entry->iscsi_name, buflen); 3848 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3849 (char *)ha->name_string, buflen); 3850 3851 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3852 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3853 chap_tbl.secret, 3854 ddb_entry->chap_tbl_idx)) { 3855 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3856 (char *)chap_tbl.name, 3857 strlen((char *)chap_tbl.name)); 3858 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3859 (char *)chap_tbl.secret, 3860 chap_tbl.secret_len); 3861 } 3862 } 3863 } 3864 3865 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3866 struct ddb_entry *ddb_entry) 3867 { 3868 struct iscsi_cls_session *cls_sess; 3869 struct iscsi_cls_conn *cls_conn; 3870 uint32_t ddb_state; 3871 dma_addr_t fw_ddb_entry_dma; 3872 struct dev_db_entry *fw_ddb_entry; 3873 3874 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3875 &fw_ddb_entry_dma, GFP_KERNEL); 3876 if (!fw_ddb_entry) { 3877 ql4_printk(KERN_ERR, ha, 3878 "%s: Unable to allocate dma buffer\n", __func__); 3879 goto exit_session_conn_fwddb_param; 3880 } 3881 3882 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3883 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3884 NULL, NULL, NULL) == QLA_ERROR) { 3885 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3886 "get_ddb_entry for fw_ddb_index %d\n", 3887 ha->host_no, __func__, 3888 ddb_entry->fw_ddb_index)); 3889 goto exit_session_conn_fwddb_param; 3890 } 3891 3892 cls_sess = ddb_entry->sess; 3893 3894 cls_conn = ddb_entry->conn; 3895 3896 /* Update params */ 3897 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3898 3899 exit_session_conn_fwddb_param: 3900 if (fw_ddb_entry) 3901 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3902 fw_ddb_entry, fw_ddb_entry_dma); 3903 } 3904 3905 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3906 struct ddb_entry *ddb_entry) 3907 { 3908 struct iscsi_cls_session *cls_sess; 3909 struct iscsi_cls_conn *cls_conn; 3910 struct iscsi_session *sess; 3911 struct iscsi_conn *conn; 3912 uint32_t ddb_state; 3913 dma_addr_t fw_ddb_entry_dma; 3914 struct dev_db_entry *fw_ddb_entry; 3915 3916 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3917 &fw_ddb_entry_dma, GFP_KERNEL); 3918 if (!fw_ddb_entry) { 3919 ql4_printk(KERN_ERR, ha, 3920 "%s: Unable to allocate dma buffer\n", __func__); 3921 goto exit_session_conn_param; 3922 } 3923 3924 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3925 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3926 NULL, NULL, NULL) == QLA_ERROR) { 3927 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3928 "get_ddb_entry for fw_ddb_index %d\n", 3929 ha->host_no, __func__, 3930 ddb_entry->fw_ddb_index)); 3931 goto exit_session_conn_param; 3932 } 3933 3934 cls_sess = ddb_entry->sess; 3935 sess = cls_sess->dd_data; 3936 3937 cls_conn = ddb_entry->conn; 3938 conn = cls_conn->dd_data; 3939 3940 /* Update timers after login */ 3941 ddb_entry->default_relogin_timeout = 3942 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3943 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3944 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3945 ddb_entry->default_time2wait = 3946 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3947 3948 /* Update params */ 3949 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3950 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3951 3952 memcpy(sess->initiatorname, ha->name_string, 3953 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3954 3955 exit_session_conn_param: 3956 if (fw_ddb_entry) 3957 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3958 fw_ddb_entry, fw_ddb_entry_dma); 3959 } 3960 3961 /* 3962 * Timer routines 3963 */ 3964 3965 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, 3966 unsigned long interval) 3967 { 3968 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 3969 __func__, ha->host->host_no)); 3970 init_timer(&ha->timer); 3971 ha->timer.expires = jiffies + interval * HZ; 3972 ha->timer.data = (unsigned long)ha; 3973 ha->timer.function = (void (*)(unsigned long))func; 3974 add_timer(&ha->timer); 3975 ha->timer_active = 1; 3976 } 3977 3978 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 3979 { 3980 del_timer_sync(&ha->timer); 3981 ha->timer_active = 0; 3982 } 3983 3984 /*** 3985 * qla4xxx_mark_device_missing - blocks the session 3986 * @cls_session: Pointer to the session to be blocked 3987 * @ddb_entry: Pointer to device database entry 3988 * 3989 * This routine marks a device missing and close connection. 3990 **/ 3991 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 3992 { 3993 iscsi_block_session(cls_session); 3994 } 3995 3996 /** 3997 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 3998 * @ha: Pointer to host adapter structure. 3999 * 4000 * This routine marks a device missing and resets the relogin retry count. 4001 **/ 4002 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 4003 { 4004 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 4005 } 4006 4007 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4008 struct ddb_entry *ddb_entry, 4009 struct scsi_cmnd *cmd) 4010 { 4011 struct srb *srb; 4012 4013 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4014 if (!srb) 4015 return srb; 4016 4017 kref_init(&srb->srb_ref); 4018 srb->ha = ha; 4019 srb->ddb = ddb_entry; 4020 srb->cmd = cmd; 4021 srb->flags = 0; 4022 CMD_SP(cmd) = (void *)srb; 4023 4024 return srb; 4025 } 4026 4027 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4028 { 4029 struct scsi_cmnd *cmd = srb->cmd; 4030 4031 if (srb->flags & SRB_DMA_VALID) { 4032 scsi_dma_unmap(cmd); 4033 srb->flags &= ~SRB_DMA_VALID; 4034 } 4035 CMD_SP(cmd) = NULL; 4036 } 4037 4038 void qla4xxx_srb_compl(struct kref *ref) 4039 { 4040 struct srb *srb = container_of(ref, struct srb, srb_ref); 4041 struct scsi_cmnd *cmd = srb->cmd; 4042 struct scsi_qla_host *ha = srb->ha; 4043 4044 qla4xxx_srb_free_dma(ha, srb); 4045 4046 mempool_free(srb, ha->srb_mempool); 4047 4048 cmd->scsi_done(cmd); 4049 } 4050 4051 /** 4052 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4053 * @host: scsi host 4054 * @cmd: Pointer to Linux's SCSI command structure 4055 * 4056 * Remarks: 4057 * This routine is invoked by Linux to send a SCSI command to the driver. 4058 * The mid-level driver tries to ensure that queuecommand never gets 4059 * invoked concurrently with itself or the interrupt handler (although 4060 * the interrupt handler may call this routine as part of request- 4061 * completion handling). Unfortunely, it sometimes calls the scheduler 4062 * in interrupt context which is a big NO! NO!. 4063 **/ 4064 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4065 { 4066 struct scsi_qla_host *ha = to_qla_host(host); 4067 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4068 struct iscsi_cls_session *sess = ddb_entry->sess; 4069 struct srb *srb; 4070 int rval; 4071 4072 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4073 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4074 cmd->result = DID_NO_CONNECT << 16; 4075 else 4076 cmd->result = DID_REQUEUE << 16; 4077 goto qc_fail_command; 4078 } 4079 4080 if (!sess) { 4081 cmd->result = DID_IMM_RETRY << 16; 4082 goto qc_fail_command; 4083 } 4084 4085 rval = iscsi_session_chkready(sess); 4086 if (rval) { 4087 cmd->result = rval; 4088 goto qc_fail_command; 4089 } 4090 4091 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4092 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4093 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4094 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4095 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4096 !test_bit(AF_ONLINE, &ha->flags) || 4097 !test_bit(AF_LINK_UP, &ha->flags) || 4098 test_bit(AF_LOOPBACK, &ha->flags) || 4099 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4100 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4101 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4102 goto qc_host_busy; 4103 4104 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4105 if (!srb) 4106 goto qc_host_busy; 4107 4108 rval = qla4xxx_send_command_to_isp(ha, srb); 4109 if (rval != QLA_SUCCESS) 4110 goto qc_host_busy_free_sp; 4111 4112 return 0; 4113 4114 qc_host_busy_free_sp: 4115 qla4xxx_srb_free_dma(ha, srb); 4116 mempool_free(srb, ha->srb_mempool); 4117 4118 qc_host_busy: 4119 return SCSI_MLQUEUE_HOST_BUSY; 4120 4121 qc_fail_command: 4122 cmd->scsi_done(cmd); 4123 4124 return 0; 4125 } 4126 4127 /** 4128 * qla4xxx_mem_free - frees memory allocated to adapter 4129 * @ha: Pointer to host adapter structure. 4130 * 4131 * Frees memory previously allocated by qla4xxx_mem_alloc 4132 **/ 4133 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4134 { 4135 if (ha->queues) 4136 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4137 ha->queues_dma); 4138 4139 if (ha->fw_dump) 4140 vfree(ha->fw_dump); 4141 4142 ha->queues_len = 0; 4143 ha->queues = NULL; 4144 ha->queues_dma = 0; 4145 ha->request_ring = NULL; 4146 ha->request_dma = 0; 4147 ha->response_ring = NULL; 4148 ha->response_dma = 0; 4149 ha->shadow_regs = NULL; 4150 ha->shadow_regs_dma = 0; 4151 ha->fw_dump = NULL; 4152 ha->fw_dump_size = 0; 4153 4154 /* Free srb pool. */ 4155 if (ha->srb_mempool) 4156 mempool_destroy(ha->srb_mempool); 4157 4158 ha->srb_mempool = NULL; 4159 4160 if (ha->chap_dma_pool) 4161 dma_pool_destroy(ha->chap_dma_pool); 4162 4163 if (ha->chap_list) 4164 vfree(ha->chap_list); 4165 ha->chap_list = NULL; 4166 4167 if (ha->fw_ddb_dma_pool) 4168 dma_pool_destroy(ha->fw_ddb_dma_pool); 4169 4170 /* release io space registers */ 4171 if (is_qla8022(ha)) { 4172 if (ha->nx_pcibase) 4173 iounmap( 4174 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4175 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4176 if (ha->nx_pcibase) 4177 iounmap( 4178 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4179 } else if (ha->reg) { 4180 iounmap(ha->reg); 4181 } 4182 4183 if (ha->reset_tmplt.buff) 4184 vfree(ha->reset_tmplt.buff); 4185 4186 pci_release_regions(ha->pdev); 4187 } 4188 4189 /** 4190 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4191 * @ha: Pointer to host adapter structure 4192 * 4193 * Allocates DMA memory for request and response queues. Also allocates memory 4194 * for srbs. 4195 **/ 4196 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4197 { 4198 unsigned long align; 4199 4200 /* Allocate contiguous block of DMA memory for queues. */ 4201 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4202 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4203 sizeof(struct shadow_regs) + 4204 MEM_ALIGN_VALUE + 4205 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4206 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4207 &ha->queues_dma, GFP_KERNEL); 4208 if (ha->queues == NULL) { 4209 ql4_printk(KERN_WARNING, ha, 4210 "Memory Allocation failed - queues.\n"); 4211 4212 goto mem_alloc_error_exit; 4213 } 4214 memset(ha->queues, 0, ha->queues_len); 4215 4216 /* 4217 * As per RISC alignment requirements -- the bus-address must be a 4218 * multiple of the request-ring size (in bytes). 4219 */ 4220 align = 0; 4221 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4222 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4223 (MEM_ALIGN_VALUE - 1)); 4224 4225 /* Update request and response queue pointers. */ 4226 ha->request_dma = ha->queues_dma + align; 4227 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4228 ha->response_dma = ha->queues_dma + align + 4229 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4230 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4231 (REQUEST_QUEUE_DEPTH * 4232 QUEUE_SIZE)); 4233 ha->shadow_regs_dma = ha->queues_dma + align + 4234 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4235 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4236 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4237 (REQUEST_QUEUE_DEPTH * 4238 QUEUE_SIZE) + 4239 (RESPONSE_QUEUE_DEPTH * 4240 QUEUE_SIZE)); 4241 4242 /* Allocate memory for srb pool. */ 4243 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4244 mempool_free_slab, srb_cachep); 4245 if (ha->srb_mempool == NULL) { 4246 ql4_printk(KERN_WARNING, ha, 4247 "Memory Allocation failed - SRB Pool.\n"); 4248 4249 goto mem_alloc_error_exit; 4250 } 4251 4252 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4253 CHAP_DMA_BLOCK_SIZE, 8, 0); 4254 4255 if (ha->chap_dma_pool == NULL) { 4256 ql4_printk(KERN_WARNING, ha, 4257 "%s: chap_dma_pool allocation failed..\n", __func__); 4258 goto mem_alloc_error_exit; 4259 } 4260 4261 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4262 DDB_DMA_BLOCK_SIZE, 8, 0); 4263 4264 if (ha->fw_ddb_dma_pool == NULL) { 4265 ql4_printk(KERN_WARNING, ha, 4266 "%s: fw_ddb_dma_pool allocation failed..\n", 4267 __func__); 4268 goto mem_alloc_error_exit; 4269 } 4270 4271 return QLA_SUCCESS; 4272 4273 mem_alloc_error_exit: 4274 qla4xxx_mem_free(ha); 4275 return QLA_ERROR; 4276 } 4277 4278 /** 4279 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4280 * @ha: adapter block pointer. 4281 * 4282 * Note: The caller should not hold the idc lock. 4283 **/ 4284 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4285 { 4286 uint32_t temp, temp_state, temp_val; 4287 int status = QLA_SUCCESS; 4288 4289 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4290 4291 temp_state = qla82xx_get_temp_state(temp); 4292 temp_val = qla82xx_get_temp_val(temp); 4293 4294 if (temp_state == QLA82XX_TEMP_PANIC) { 4295 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4296 " exceeds maximum allowed. Hardware has been shut" 4297 " down.\n", temp_val); 4298 status = QLA_ERROR; 4299 } else if (temp_state == QLA82XX_TEMP_WARN) { 4300 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4301 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4302 " degrees C exceeds operating range." 4303 " Immediate action needed.\n", temp_val); 4304 } else { 4305 if (ha->temperature == QLA82XX_TEMP_WARN) 4306 ql4_printk(KERN_INFO, ha, "Device temperature is" 4307 " now %d degrees C in normal range.\n", 4308 temp_val); 4309 } 4310 ha->temperature = temp_state; 4311 return status; 4312 } 4313 4314 /** 4315 * qla4_8xxx_check_fw_alive - Check firmware health 4316 * @ha: Pointer to host adapter structure. 4317 * 4318 * Context: Interrupt 4319 **/ 4320 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4321 { 4322 uint32_t fw_heartbeat_counter; 4323 int status = QLA_SUCCESS; 4324 4325 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4326 QLA8XXX_PEG_ALIVE_COUNTER); 4327 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4328 if (fw_heartbeat_counter == 0xffffffff) { 4329 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4330 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4331 ha->host_no, __func__)); 4332 return status; 4333 } 4334 4335 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4336 ha->seconds_since_last_heartbeat++; 4337 /* FW not alive after 2 seconds */ 4338 if (ha->seconds_since_last_heartbeat == 2) { 4339 ha->seconds_since_last_heartbeat = 0; 4340 qla4_8xxx_dump_peg_reg(ha); 4341 status = QLA_ERROR; 4342 } 4343 } else 4344 ha->seconds_since_last_heartbeat = 0; 4345 4346 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4347 return status; 4348 } 4349 4350 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4351 { 4352 uint32_t halt_status; 4353 int halt_status_unrecoverable = 0; 4354 4355 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4356 4357 if (is_qla8022(ha)) { 4358 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4359 __func__); 4360 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4361 CRB_NIU_XG_PAUSE_CTL_P0 | 4362 CRB_NIU_XG_PAUSE_CTL_P1); 4363 4364 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4365 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4366 __func__); 4367 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4368 halt_status_unrecoverable = 1; 4369 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4370 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4371 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4372 __func__); 4373 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4374 halt_status_unrecoverable = 1; 4375 } 4376 4377 /* 4378 * Since we cannot change dev_state in interrupt context, 4379 * set appropriate DPC flag then wakeup DPC 4380 */ 4381 if (halt_status_unrecoverable) { 4382 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4383 } else { 4384 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4385 __func__); 4386 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4387 } 4388 qla4xxx_mailbox_premature_completion(ha); 4389 qla4xxx_wake_dpc(ha); 4390 } 4391 4392 /** 4393 * qla4_8xxx_watchdog - Poll dev state 4394 * @ha: Pointer to host adapter structure. 4395 * 4396 * Context: Interrupt 4397 **/ 4398 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4399 { 4400 uint32_t dev_state; 4401 uint32_t idc_ctrl; 4402 4403 if (is_qla8032(ha) && 4404 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4405 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4406 __func__, ha->func_num); 4407 4408 /* don't poll if reset is going on */ 4409 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4410 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4411 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4412 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4413 4414 if (qla4_8xxx_check_temp(ha)) { 4415 if (is_qla8022(ha)) { 4416 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4417 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4418 CRB_NIU_XG_PAUSE_CTL_P0 | 4419 CRB_NIU_XG_PAUSE_CTL_P1); 4420 } 4421 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4422 qla4xxx_wake_dpc(ha); 4423 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4424 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4425 4426 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4427 __func__); 4428 4429 if (is_qla8032(ha) || is_qla8042(ha)) { 4430 idc_ctrl = qla4_83xx_rd_reg(ha, 4431 QLA83XX_IDC_DRV_CTRL); 4432 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4433 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4434 __func__); 4435 qla4xxx_mailbox_premature_completion( 4436 ha); 4437 } 4438 } 4439 4440 if ((is_qla8032(ha) || is_qla8042(ha)) || 4441 (is_qla8022(ha) && !ql4xdontresethba)) { 4442 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4443 qla4xxx_wake_dpc(ha); 4444 } 4445 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4446 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4447 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4448 __func__); 4449 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4450 qla4xxx_wake_dpc(ha); 4451 } else { 4452 /* Check firmware health */ 4453 if (qla4_8xxx_check_fw_alive(ha)) 4454 qla4_8xxx_process_fw_error(ha); 4455 } 4456 } 4457 } 4458 4459 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4460 { 4461 struct iscsi_session *sess; 4462 struct ddb_entry *ddb_entry; 4463 struct scsi_qla_host *ha; 4464 4465 sess = cls_sess->dd_data; 4466 ddb_entry = sess->dd_data; 4467 ha = ddb_entry->ha; 4468 4469 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4470 return; 4471 4472 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4473 !iscsi_is_session_online(cls_sess)) { 4474 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4475 INVALID_ENTRY) { 4476 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4477 0) { 4478 atomic_set(&ddb_entry->retry_relogin_timer, 4479 INVALID_ENTRY); 4480 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4481 set_bit(DF_RELOGIN, &ddb_entry->flags); 4482 DEBUG2(ql4_printk(KERN_INFO, ha, 4483 "%s: index [%d] login device\n", 4484 __func__, ddb_entry->fw_ddb_index)); 4485 } else 4486 atomic_dec(&ddb_entry->retry_relogin_timer); 4487 } 4488 } 4489 4490 /* Wait for relogin to timeout */ 4491 if (atomic_read(&ddb_entry->relogin_timer) && 4492 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4493 /* 4494 * If the relogin times out and the device is 4495 * still NOT ONLINE then try and relogin again. 4496 */ 4497 if (!iscsi_is_session_online(cls_sess)) { 4498 /* Reset retry relogin timer */ 4499 atomic_inc(&ddb_entry->relogin_retry_count); 4500 DEBUG2(ql4_printk(KERN_INFO, ha, 4501 "%s: index[%d] relogin timed out-retrying" 4502 " relogin (%d), retry (%d)\n", __func__, 4503 ddb_entry->fw_ddb_index, 4504 atomic_read(&ddb_entry->relogin_retry_count), 4505 ddb_entry->default_time2wait + 4)); 4506 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4507 atomic_set(&ddb_entry->retry_relogin_timer, 4508 ddb_entry->default_time2wait + 4); 4509 } 4510 } 4511 } 4512 4513 /** 4514 * qla4xxx_timer - checks every second for work to do. 4515 * @ha: Pointer to host adapter structure. 4516 **/ 4517 static void qla4xxx_timer(struct scsi_qla_host *ha) 4518 { 4519 int start_dpc = 0; 4520 uint16_t w; 4521 4522 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4523 4524 /* If we are in the middle of AER/EEH processing 4525 * skip any processing and reschedule the timer 4526 */ 4527 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4528 mod_timer(&ha->timer, jiffies + HZ); 4529 return; 4530 } 4531 4532 /* Hardware read to trigger an EEH error during mailbox waits. */ 4533 if (!pci_channel_offline(ha->pdev)) 4534 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4535 4536 if (is_qla80XX(ha)) 4537 qla4_8xxx_watchdog(ha); 4538 4539 if (is_qla40XX(ha)) { 4540 /* Check for heartbeat interval. */ 4541 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4542 ha->heartbeat_interval != 0) { 4543 ha->seconds_since_last_heartbeat++; 4544 if (ha->seconds_since_last_heartbeat > 4545 ha->heartbeat_interval + 2) 4546 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4547 } 4548 } 4549 4550 /* Process any deferred work. */ 4551 if (!list_empty(&ha->work_list)) 4552 start_dpc++; 4553 4554 /* Wakeup the dpc routine for this adapter, if needed. */ 4555 if (start_dpc || 4556 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4557 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4558 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4559 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4560 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4561 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4562 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4563 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4564 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4565 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4566 test_bit(DPC_AEN, &ha->dpc_flags)) { 4567 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4568 " - dpc flags = 0x%lx\n", 4569 ha->host_no, __func__, ha->dpc_flags)); 4570 qla4xxx_wake_dpc(ha); 4571 } 4572 4573 /* Reschedule timer thread to call us back in one second */ 4574 mod_timer(&ha->timer, jiffies + HZ); 4575 4576 DEBUG2(ha->seconds_since_last_intr++); 4577 } 4578 4579 /** 4580 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4581 * @ha: Pointer to host adapter structure. 4582 * 4583 * This routine stalls the driver until all outstanding commands are returned. 4584 * Caller must release the Hardware Lock prior to calling this routine. 4585 **/ 4586 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4587 { 4588 uint32_t index = 0; 4589 unsigned long flags; 4590 struct scsi_cmnd *cmd; 4591 unsigned long wtime; 4592 uint32_t wtmo; 4593 4594 if (is_qla40XX(ha)) 4595 wtmo = WAIT_CMD_TOV; 4596 else 4597 wtmo = ha->nx_reset_timeout / 2; 4598 4599 wtime = jiffies + (wtmo * HZ); 4600 4601 DEBUG2(ql4_printk(KERN_INFO, ha, 4602 "Wait up to %u seconds for cmds to complete\n", 4603 wtmo)); 4604 4605 while (!time_after_eq(jiffies, wtime)) { 4606 spin_lock_irqsave(&ha->hardware_lock, flags); 4607 /* Find a command that hasn't completed. */ 4608 for (index = 0; index < ha->host->can_queue; index++) { 4609 cmd = scsi_host_find_tag(ha->host, index); 4610 /* 4611 * We cannot just check if the index is valid, 4612 * becase if we are run from the scsi eh, then 4613 * the scsi/block layer is going to prevent 4614 * the tag from being released. 4615 */ 4616 if (cmd != NULL && CMD_SP(cmd)) 4617 break; 4618 } 4619 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4620 4621 /* If No Commands are pending, wait is complete */ 4622 if (index == ha->host->can_queue) 4623 return QLA_SUCCESS; 4624 4625 msleep(1000); 4626 } 4627 /* If we timed out on waiting for commands to come back 4628 * return ERROR. */ 4629 return QLA_ERROR; 4630 } 4631 4632 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4633 { 4634 uint32_t ctrl_status; 4635 unsigned long flags = 0; 4636 4637 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4638 4639 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4640 return QLA_ERROR; 4641 4642 spin_lock_irqsave(&ha->hardware_lock, flags); 4643 4644 /* 4645 * If the SCSI Reset Interrupt bit is set, clear it. 4646 * Otherwise, the Soft Reset won't work. 4647 */ 4648 ctrl_status = readw(&ha->reg->ctrl_status); 4649 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4650 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4651 4652 /* Issue Soft Reset */ 4653 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4654 readl(&ha->reg->ctrl_status); 4655 4656 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4657 return QLA_SUCCESS; 4658 } 4659 4660 /** 4661 * qla4xxx_soft_reset - performs soft reset. 4662 * @ha: Pointer to host adapter structure. 4663 **/ 4664 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4665 { 4666 uint32_t max_wait_time; 4667 unsigned long flags = 0; 4668 int status; 4669 uint32_t ctrl_status; 4670 4671 status = qla4xxx_hw_reset(ha); 4672 if (status != QLA_SUCCESS) 4673 return status; 4674 4675 status = QLA_ERROR; 4676 /* Wait until the Network Reset Intr bit is cleared */ 4677 max_wait_time = RESET_INTR_TOV; 4678 do { 4679 spin_lock_irqsave(&ha->hardware_lock, flags); 4680 ctrl_status = readw(&ha->reg->ctrl_status); 4681 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4682 4683 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4684 break; 4685 4686 msleep(1000); 4687 } while ((--max_wait_time)); 4688 4689 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4690 DEBUG2(printk(KERN_WARNING 4691 "scsi%ld: Network Reset Intr not cleared by " 4692 "Network function, clearing it now!\n", 4693 ha->host_no)); 4694 spin_lock_irqsave(&ha->hardware_lock, flags); 4695 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4696 readl(&ha->reg->ctrl_status); 4697 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4698 } 4699 4700 /* Wait until the firmware tells us the Soft Reset is done */ 4701 max_wait_time = SOFT_RESET_TOV; 4702 do { 4703 spin_lock_irqsave(&ha->hardware_lock, flags); 4704 ctrl_status = readw(&ha->reg->ctrl_status); 4705 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4706 4707 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4708 status = QLA_SUCCESS; 4709 break; 4710 } 4711 4712 msleep(1000); 4713 } while ((--max_wait_time)); 4714 4715 /* 4716 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4717 * after the soft reset has taken place. 4718 */ 4719 spin_lock_irqsave(&ha->hardware_lock, flags); 4720 ctrl_status = readw(&ha->reg->ctrl_status); 4721 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4722 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4723 readl(&ha->reg->ctrl_status); 4724 } 4725 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4726 4727 /* If soft reset fails then most probably the bios on other 4728 * function is also enabled. 4729 * Since the initialization is sequential the other fn 4730 * wont be able to acknowledge the soft reset. 4731 * Issue a force soft reset to workaround this scenario. 4732 */ 4733 if (max_wait_time == 0) { 4734 /* Issue Force Soft Reset */ 4735 spin_lock_irqsave(&ha->hardware_lock, flags); 4736 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4737 readl(&ha->reg->ctrl_status); 4738 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4739 /* Wait until the firmware tells us the Soft Reset is done */ 4740 max_wait_time = SOFT_RESET_TOV; 4741 do { 4742 spin_lock_irqsave(&ha->hardware_lock, flags); 4743 ctrl_status = readw(&ha->reg->ctrl_status); 4744 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4745 4746 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4747 status = QLA_SUCCESS; 4748 break; 4749 } 4750 4751 msleep(1000); 4752 } while ((--max_wait_time)); 4753 } 4754 4755 return status; 4756 } 4757 4758 /** 4759 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4760 * @ha: Pointer to host adapter structure. 4761 * @res: returned scsi status 4762 * 4763 * This routine is called just prior to a HARD RESET to return all 4764 * outstanding commands back to the Operating System. 4765 * Caller should make sure that the following locks are released 4766 * before this calling routine: Hardware lock, and io_request_lock. 4767 **/ 4768 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4769 { 4770 struct srb *srb; 4771 int i; 4772 unsigned long flags; 4773 4774 spin_lock_irqsave(&ha->hardware_lock, flags); 4775 for (i = 0; i < ha->host->can_queue; i++) { 4776 srb = qla4xxx_del_from_active_array(ha, i); 4777 if (srb != NULL) { 4778 srb->cmd->result = res; 4779 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4780 } 4781 } 4782 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4783 } 4784 4785 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4786 { 4787 clear_bit(AF_ONLINE, &ha->flags); 4788 4789 /* Disable the board */ 4790 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4791 4792 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4793 qla4xxx_mark_all_devices_missing(ha); 4794 clear_bit(AF_INIT_DONE, &ha->flags); 4795 } 4796 4797 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4798 { 4799 struct iscsi_session *sess; 4800 struct ddb_entry *ddb_entry; 4801 4802 sess = cls_session->dd_data; 4803 ddb_entry = sess->dd_data; 4804 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4805 4806 if (ddb_entry->ddb_type == FLASH_DDB) 4807 iscsi_block_session(ddb_entry->sess); 4808 else 4809 iscsi_session_failure(cls_session->dd_data, 4810 ISCSI_ERR_CONN_FAILED); 4811 } 4812 4813 /** 4814 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4815 * @ha: Pointer to host adapter structure. 4816 **/ 4817 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4818 { 4819 int status = QLA_ERROR; 4820 uint8_t reset_chip = 0; 4821 uint32_t dev_state; 4822 unsigned long wait; 4823 4824 /* Stall incoming I/O until we are done */ 4825 scsi_block_requests(ha->host); 4826 clear_bit(AF_ONLINE, &ha->flags); 4827 clear_bit(AF_LINK_UP, &ha->flags); 4828 4829 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4830 4831 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4832 4833 if ((is_qla8032(ha) || is_qla8042(ha)) && 4834 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4835 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4836 __func__); 4837 /* disable pause frame for ISP83xx */ 4838 qla4_83xx_disable_pause(ha); 4839 } 4840 4841 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4842 4843 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4844 reset_chip = 1; 4845 4846 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4847 * do not reset adapter, jump to initialize_adapter */ 4848 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4849 status = QLA_SUCCESS; 4850 goto recover_ha_init_adapter; 4851 } 4852 4853 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4854 * from eh_host_reset or ioctl module */ 4855 if (is_qla80XX(ha) && !reset_chip && 4856 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4857 4858 DEBUG2(ql4_printk(KERN_INFO, ha, 4859 "scsi%ld: %s - Performing stop_firmware...\n", 4860 ha->host_no, __func__)); 4861 status = ha->isp_ops->reset_firmware(ha); 4862 if (status == QLA_SUCCESS) { 4863 ha->isp_ops->disable_intrs(ha); 4864 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4865 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4866 } else { 4867 /* If the stop_firmware fails then 4868 * reset the entire chip */ 4869 reset_chip = 1; 4870 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4871 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4872 } 4873 } 4874 4875 /* Issue full chip reset if recovering from a catastrophic error, 4876 * or if stop_firmware fails for ISP-8xxx. 4877 * This is the default case for ISP-4xxx */ 4878 if (is_qla40XX(ha) || reset_chip) { 4879 if (is_qla40XX(ha)) 4880 goto chip_reset; 4881 4882 /* Check if 8XXX firmware is alive or not 4883 * We may have arrived here from NEED_RESET 4884 * detection only */ 4885 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4886 goto chip_reset; 4887 4888 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4889 while (time_before(jiffies, wait)) { 4890 if (qla4_8xxx_check_fw_alive(ha)) { 4891 qla4xxx_mailbox_premature_completion(ha); 4892 break; 4893 } 4894 4895 set_current_state(TASK_UNINTERRUPTIBLE); 4896 schedule_timeout(HZ); 4897 } 4898 chip_reset: 4899 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4900 qla4xxx_cmd_wait(ha); 4901 4902 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4903 DEBUG2(ql4_printk(KERN_INFO, ha, 4904 "scsi%ld: %s - Performing chip reset..\n", 4905 ha->host_no, __func__)); 4906 status = ha->isp_ops->reset_chip(ha); 4907 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4908 } 4909 4910 /* Flush any pending ddb changed AENs */ 4911 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4912 4913 recover_ha_init_adapter: 4914 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4915 if (status == QLA_SUCCESS) { 4916 /* For ISP-4xxx, force function 1 to always initialize 4917 * before function 3 to prevent both funcions from 4918 * stepping on top of the other */ 4919 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4920 ssleep(6); 4921 4922 /* NOTE: AF_ONLINE flag set upon successful completion of 4923 * qla4xxx_initialize_adapter */ 4924 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4925 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4926 status = qla4_8xxx_check_init_adapter_retry(ha); 4927 if (status == QLA_ERROR) { 4928 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4929 ha->host_no, __func__); 4930 qla4xxx_dead_adapter_cleanup(ha); 4931 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4932 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4933 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4934 &ha->dpc_flags); 4935 goto exit_recover; 4936 } 4937 } 4938 } 4939 4940 /* Retry failed adapter initialization, if necessary 4941 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4942 * case to prevent ping-pong resets between functions */ 4943 if (!test_bit(AF_ONLINE, &ha->flags) && 4944 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4945 /* Adapter initialization failed, see if we can retry 4946 * resetting the ha. 4947 * Since we don't want to block the DPC for too long 4948 * with multiple resets in the same thread, 4949 * utilize DPC to retry */ 4950 if (is_qla80XX(ha)) { 4951 ha->isp_ops->idc_lock(ha); 4952 dev_state = qla4_8xxx_rd_direct(ha, 4953 QLA8XXX_CRB_DEV_STATE); 4954 ha->isp_ops->idc_unlock(ha); 4955 if (dev_state == QLA8XXX_DEV_FAILED) { 4956 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4957 "recover adapter. H/W is in Failed " 4958 "state\n", __func__); 4959 qla4xxx_dead_adapter_cleanup(ha); 4960 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4961 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4962 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4963 &ha->dpc_flags); 4964 status = QLA_ERROR; 4965 4966 goto exit_recover; 4967 } 4968 } 4969 4970 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 4971 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 4972 DEBUG2(printk("scsi%ld: recover adapter - retrying " 4973 "(%d) more times\n", ha->host_no, 4974 ha->retry_reset_ha_cnt)); 4975 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4976 status = QLA_ERROR; 4977 } else { 4978 if (ha->retry_reset_ha_cnt > 0) { 4979 /* Schedule another Reset HA--DPC will retry */ 4980 ha->retry_reset_ha_cnt--; 4981 DEBUG2(printk("scsi%ld: recover adapter - " 4982 "retry remaining %d\n", 4983 ha->host_no, 4984 ha->retry_reset_ha_cnt)); 4985 status = QLA_ERROR; 4986 } 4987 4988 if (ha->retry_reset_ha_cnt == 0) { 4989 /* Recover adapter retries have been exhausted. 4990 * Adapter DEAD */ 4991 DEBUG2(printk("scsi%ld: recover adapter " 4992 "failed - board disabled\n", 4993 ha->host_no)); 4994 qla4xxx_dead_adapter_cleanup(ha); 4995 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4996 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4997 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4998 &ha->dpc_flags); 4999 status = QLA_ERROR; 5000 } 5001 } 5002 } else { 5003 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5004 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5005 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5006 } 5007 5008 exit_recover: 5009 ha->adapter_error_count++; 5010 5011 if (test_bit(AF_ONLINE, &ha->flags)) 5012 ha->isp_ops->enable_intrs(ha); 5013 5014 scsi_unblock_requests(ha->host); 5015 5016 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5017 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5018 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5019 5020 return status; 5021 } 5022 5023 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5024 { 5025 struct iscsi_session *sess; 5026 struct ddb_entry *ddb_entry; 5027 struct scsi_qla_host *ha; 5028 5029 sess = cls_session->dd_data; 5030 ddb_entry = sess->dd_data; 5031 ha = ddb_entry->ha; 5032 if (!iscsi_is_session_online(cls_session)) { 5033 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5034 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5035 " unblock session\n", ha->host_no, __func__, 5036 ddb_entry->fw_ddb_index); 5037 iscsi_unblock_session(ddb_entry->sess); 5038 } else { 5039 /* Trigger relogin */ 5040 if (ddb_entry->ddb_type == FLASH_DDB) { 5041 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5042 test_bit(DF_DISABLE_RELOGIN, 5043 &ddb_entry->flags))) 5044 qla4xxx_arm_relogin_timer(ddb_entry); 5045 } else 5046 iscsi_session_failure(cls_session->dd_data, 5047 ISCSI_ERR_CONN_FAILED); 5048 } 5049 } 5050 } 5051 5052 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5053 { 5054 struct iscsi_session *sess; 5055 struct ddb_entry *ddb_entry; 5056 struct scsi_qla_host *ha; 5057 5058 sess = cls_session->dd_data; 5059 ddb_entry = sess->dd_data; 5060 ha = ddb_entry->ha; 5061 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5062 " unblock session\n", ha->host_no, __func__, 5063 ddb_entry->fw_ddb_index); 5064 5065 iscsi_unblock_session(ddb_entry->sess); 5066 5067 /* Start scan target */ 5068 if (test_bit(AF_ONLINE, &ha->flags)) { 5069 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5070 " start scan\n", ha->host_no, __func__, 5071 ddb_entry->fw_ddb_index); 5072 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5073 } 5074 return QLA_SUCCESS; 5075 } 5076 5077 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5078 { 5079 struct iscsi_session *sess; 5080 struct ddb_entry *ddb_entry; 5081 struct scsi_qla_host *ha; 5082 int status = QLA_SUCCESS; 5083 5084 sess = cls_session->dd_data; 5085 ddb_entry = sess->dd_data; 5086 ha = ddb_entry->ha; 5087 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5088 " unblock user space session\n", ha->host_no, __func__, 5089 ddb_entry->fw_ddb_index); 5090 5091 if (!iscsi_is_session_online(cls_session)) { 5092 iscsi_conn_start(ddb_entry->conn); 5093 iscsi_conn_login_event(ddb_entry->conn, 5094 ISCSI_CONN_STATE_LOGGED_IN); 5095 } else { 5096 ql4_printk(KERN_INFO, ha, 5097 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5098 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5099 cls_session->sid); 5100 status = QLA_ERROR; 5101 } 5102 5103 return status; 5104 } 5105 5106 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5107 { 5108 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5109 } 5110 5111 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5112 { 5113 uint16_t relogin_timer; 5114 struct iscsi_session *sess; 5115 struct ddb_entry *ddb_entry; 5116 struct scsi_qla_host *ha; 5117 5118 sess = cls_sess->dd_data; 5119 ddb_entry = sess->dd_data; 5120 ha = ddb_entry->ha; 5121 5122 relogin_timer = max(ddb_entry->default_relogin_timeout, 5123 (uint16_t)RELOGIN_TOV); 5124 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5125 5126 DEBUG2(ql4_printk(KERN_INFO, ha, 5127 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5128 ddb_entry->fw_ddb_index, relogin_timer)); 5129 5130 qla4xxx_login_flash_ddb(cls_sess); 5131 } 5132 5133 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5134 { 5135 struct iscsi_session *sess; 5136 struct ddb_entry *ddb_entry; 5137 struct scsi_qla_host *ha; 5138 5139 sess = cls_sess->dd_data; 5140 ddb_entry = sess->dd_data; 5141 ha = ddb_entry->ha; 5142 5143 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5144 return; 5145 5146 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5147 return; 5148 5149 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5150 !iscsi_is_session_online(cls_sess)) { 5151 DEBUG2(ql4_printk(KERN_INFO, ha, 5152 "relogin issued\n")); 5153 qla4xxx_relogin_flash_ddb(cls_sess); 5154 } 5155 } 5156 5157 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5158 { 5159 if (ha->dpc_thread) 5160 queue_work(ha->dpc_thread, &ha->dpc_work); 5161 } 5162 5163 static struct qla4_work_evt * 5164 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5165 enum qla4_work_type type) 5166 { 5167 struct qla4_work_evt *e; 5168 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5169 5170 e = kzalloc(size, GFP_ATOMIC); 5171 if (!e) 5172 return NULL; 5173 5174 INIT_LIST_HEAD(&e->list); 5175 e->type = type; 5176 return e; 5177 } 5178 5179 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5180 struct qla4_work_evt *e) 5181 { 5182 unsigned long flags; 5183 5184 spin_lock_irqsave(&ha->work_lock, flags); 5185 list_add_tail(&e->list, &ha->work_list); 5186 spin_unlock_irqrestore(&ha->work_lock, flags); 5187 qla4xxx_wake_dpc(ha); 5188 } 5189 5190 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5191 enum iscsi_host_event_code aen_code, 5192 uint32_t data_size, uint8_t *data) 5193 { 5194 struct qla4_work_evt *e; 5195 5196 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5197 if (!e) 5198 return QLA_ERROR; 5199 5200 e->u.aen.code = aen_code; 5201 e->u.aen.data_size = data_size; 5202 memcpy(e->u.aen.data, data, data_size); 5203 5204 qla4xxx_post_work(ha, e); 5205 5206 return QLA_SUCCESS; 5207 } 5208 5209 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5210 uint32_t status, uint32_t pid, 5211 uint32_t data_size, uint8_t *data) 5212 { 5213 struct qla4_work_evt *e; 5214 5215 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5216 if (!e) 5217 return QLA_ERROR; 5218 5219 e->u.ping.status = status; 5220 e->u.ping.pid = pid; 5221 e->u.ping.data_size = data_size; 5222 memcpy(e->u.ping.data, data, data_size); 5223 5224 qla4xxx_post_work(ha, e); 5225 5226 return QLA_SUCCESS; 5227 } 5228 5229 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5230 { 5231 struct qla4_work_evt *e, *tmp; 5232 unsigned long flags; 5233 LIST_HEAD(work); 5234 5235 spin_lock_irqsave(&ha->work_lock, flags); 5236 list_splice_init(&ha->work_list, &work); 5237 spin_unlock_irqrestore(&ha->work_lock, flags); 5238 5239 list_for_each_entry_safe(e, tmp, &work, list) { 5240 list_del_init(&e->list); 5241 5242 switch (e->type) { 5243 case QLA4_EVENT_AEN: 5244 iscsi_post_host_event(ha->host_no, 5245 &qla4xxx_iscsi_transport, 5246 e->u.aen.code, 5247 e->u.aen.data_size, 5248 e->u.aen.data); 5249 break; 5250 case QLA4_EVENT_PING_STATUS: 5251 iscsi_ping_comp_event(ha->host_no, 5252 &qla4xxx_iscsi_transport, 5253 e->u.ping.status, 5254 e->u.ping.pid, 5255 e->u.ping.data_size, 5256 e->u.ping.data); 5257 break; 5258 default: 5259 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5260 "supported", e->type); 5261 } 5262 kfree(e); 5263 } 5264 } 5265 5266 /** 5267 * qla4xxx_do_dpc - dpc routine 5268 * @data: in our case pointer to adapter structure 5269 * 5270 * This routine is a task that is schedule by the interrupt handler 5271 * to perform the background processing for interrupts. We put it 5272 * on a task queue that is consumed whenever the scheduler runs; that's 5273 * so you can do anything (i.e. put the process to sleep etc). In fact, 5274 * the mid-level tries to sleep when it reaches the driver threshold 5275 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5276 **/ 5277 static void qla4xxx_do_dpc(struct work_struct *work) 5278 { 5279 struct scsi_qla_host *ha = 5280 container_of(work, struct scsi_qla_host, dpc_work); 5281 int status = QLA_ERROR; 5282 5283 DEBUG2(ql4_printk(KERN_INFO, ha, 5284 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5285 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5286 5287 /* Initialization not yet finished. Don't do anything yet. */ 5288 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5289 return; 5290 5291 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5292 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5293 ha->host_no, __func__, ha->flags)); 5294 return; 5295 } 5296 5297 /* post events to application */ 5298 qla4xxx_do_work(ha); 5299 5300 if (is_qla80XX(ha)) { 5301 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5302 if (is_qla8032(ha) || is_qla8042(ha)) { 5303 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5304 __func__); 5305 /* disable pause frame for ISP83xx */ 5306 qla4_83xx_disable_pause(ha); 5307 } 5308 5309 ha->isp_ops->idc_lock(ha); 5310 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5311 QLA8XXX_DEV_FAILED); 5312 ha->isp_ops->idc_unlock(ha); 5313 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5314 qla4_8xxx_device_state_handler(ha); 5315 } 5316 5317 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5318 if (is_qla8042(ha)) { 5319 if (ha->idc_info.info2 & 5320 ENABLE_INTERNAL_LOOPBACK) { 5321 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5322 __func__); 5323 status = qla4_84xx_config_acb(ha, 5324 ACB_CONFIG_DISABLE); 5325 if (status != QLA_SUCCESS) { 5326 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5327 __func__); 5328 } 5329 } 5330 } 5331 qla4_83xx_post_idc_ack(ha); 5332 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5333 } 5334 5335 if (is_qla8042(ha) && 5336 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5337 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5338 __func__); 5339 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5340 QLA_SUCCESS) { 5341 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5342 __func__); 5343 } 5344 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5345 } 5346 5347 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5348 qla4_8xxx_need_qsnt_handler(ha); 5349 } 5350 } 5351 5352 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5353 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5354 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5355 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5356 if ((is_qla8022(ha) && ql4xdontresethba) || 5357 ((is_qla8032(ha) || is_qla8042(ha)) && 5358 qla4_83xx_idc_dontreset(ha))) { 5359 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5360 ha->host_no, __func__)); 5361 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5362 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5363 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5364 goto dpc_post_reset_ha; 5365 } 5366 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5367 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5368 qla4xxx_recover_adapter(ha); 5369 5370 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5371 uint8_t wait_time = RESET_INTR_TOV; 5372 5373 while ((readw(&ha->reg->ctrl_status) & 5374 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5375 if (--wait_time == 0) 5376 break; 5377 msleep(1000); 5378 } 5379 if (wait_time == 0) 5380 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5381 "bit not cleared-- resetting\n", 5382 ha->host_no, __func__)); 5383 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5384 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5385 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5386 status = qla4xxx_recover_adapter(ha); 5387 } 5388 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5389 if (status == QLA_SUCCESS) 5390 ha->isp_ops->enable_intrs(ha); 5391 } 5392 } 5393 5394 dpc_post_reset_ha: 5395 /* ---- process AEN? --- */ 5396 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5397 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5398 5399 /* ---- Get DHCP IP Address? --- */ 5400 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5401 qla4xxx_get_dhcp_ip_address(ha); 5402 5403 /* ---- relogin device? --- */ 5404 if (adapter_up(ha) && 5405 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5406 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5407 } 5408 5409 /* ---- link change? --- */ 5410 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5411 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5412 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5413 /* ---- link down? --- */ 5414 qla4xxx_mark_all_devices_missing(ha); 5415 } else { 5416 /* ---- link up? --- * 5417 * F/W will auto login to all devices ONLY ONCE after 5418 * link up during driver initialization and runtime 5419 * fatal error recovery. Therefore, the driver must 5420 * manually relogin to devices when recovering from 5421 * connection failures, logouts, expired KATO, etc. */ 5422 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5423 qla4xxx_build_ddb_list(ha, ha->is_reset); 5424 iscsi_host_for_each_session(ha->host, 5425 qla4xxx_login_flash_ddb); 5426 } else 5427 qla4xxx_relogin_all_devices(ha); 5428 } 5429 } 5430 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5431 if (qla4xxx_sysfs_ddb_export(ha)) 5432 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5433 __func__); 5434 } 5435 } 5436 5437 /** 5438 * qla4xxx_free_adapter - release the adapter 5439 * @ha: pointer to adapter structure 5440 **/ 5441 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5442 { 5443 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5444 5445 /* Turn-off interrupts on the card. */ 5446 ha->isp_ops->disable_intrs(ha); 5447 5448 if (is_qla40XX(ha)) { 5449 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5450 &ha->reg->ctrl_status); 5451 readl(&ha->reg->ctrl_status); 5452 } else if (is_qla8022(ha)) { 5453 writel(0, &ha->qla4_82xx_reg->host_int); 5454 readl(&ha->qla4_82xx_reg->host_int); 5455 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5456 writel(0, &ha->qla4_83xx_reg->risc_intr); 5457 readl(&ha->qla4_83xx_reg->risc_intr); 5458 } 5459 5460 /* Remove timer thread, if present */ 5461 if (ha->timer_active) 5462 qla4xxx_stop_timer(ha); 5463 5464 /* Kill the kernel thread for this host */ 5465 if (ha->dpc_thread) 5466 destroy_workqueue(ha->dpc_thread); 5467 5468 /* Kill the kernel thread for this host */ 5469 if (ha->task_wq) 5470 destroy_workqueue(ha->task_wq); 5471 5472 /* Put firmware in known state */ 5473 ha->isp_ops->reset_firmware(ha); 5474 5475 if (is_qla80XX(ha)) { 5476 ha->isp_ops->idc_lock(ha); 5477 qla4_8xxx_clear_drv_active(ha); 5478 ha->isp_ops->idc_unlock(ha); 5479 } 5480 5481 /* Detach interrupts */ 5482 qla4xxx_free_irqs(ha); 5483 5484 /* free extra memory */ 5485 qla4xxx_mem_free(ha); 5486 } 5487 5488 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5489 { 5490 int status = 0; 5491 unsigned long mem_base, mem_len, db_base, db_len; 5492 struct pci_dev *pdev = ha->pdev; 5493 5494 status = pci_request_regions(pdev, DRIVER_NAME); 5495 if (status) { 5496 printk(KERN_WARNING 5497 "scsi(%ld) Failed to reserve PIO regions (%s) " 5498 "status=%d\n", ha->host_no, pci_name(pdev), status); 5499 goto iospace_error_exit; 5500 } 5501 5502 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5503 __func__, pdev->revision)); 5504 ha->revision_id = pdev->revision; 5505 5506 /* remap phys address */ 5507 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5508 mem_len = pci_resource_len(pdev, 0); 5509 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5510 __func__, mem_base, mem_len)); 5511 5512 /* mapping of pcibase pointer */ 5513 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5514 if (!ha->nx_pcibase) { 5515 printk(KERN_ERR 5516 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5517 pci_release_regions(ha->pdev); 5518 goto iospace_error_exit; 5519 } 5520 5521 /* Mapping of IO base pointer, door bell read and write pointer */ 5522 5523 /* mapping of IO base pointer */ 5524 if (is_qla8022(ha)) { 5525 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5526 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5527 (ha->pdev->devfn << 11)); 5528 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5529 QLA82XX_CAM_RAM_DB2); 5530 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5531 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5532 ((uint8_t *)ha->nx_pcibase); 5533 } 5534 5535 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 5536 db_len = pci_resource_len(pdev, 4); 5537 5538 return 0; 5539 iospace_error_exit: 5540 return -ENOMEM; 5541 } 5542 5543 /*** 5544 * qla4xxx_iospace_config - maps registers 5545 * @ha: pointer to adapter structure 5546 * 5547 * This routines maps HBA's registers from the pci address space 5548 * into the kernel virtual address space for memory mapped i/o. 5549 **/ 5550 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5551 { 5552 unsigned long pio, pio_len, pio_flags; 5553 unsigned long mmio, mmio_len, mmio_flags; 5554 5555 pio = pci_resource_start(ha->pdev, 0); 5556 pio_len = pci_resource_len(ha->pdev, 0); 5557 pio_flags = pci_resource_flags(ha->pdev, 0); 5558 if (pio_flags & IORESOURCE_IO) { 5559 if (pio_len < MIN_IOBASE_LEN) { 5560 ql4_printk(KERN_WARNING, ha, 5561 "Invalid PCI I/O region size\n"); 5562 pio = 0; 5563 } 5564 } else { 5565 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5566 pio = 0; 5567 } 5568 5569 /* Use MMIO operations for all accesses. */ 5570 mmio = pci_resource_start(ha->pdev, 1); 5571 mmio_len = pci_resource_len(ha->pdev, 1); 5572 mmio_flags = pci_resource_flags(ha->pdev, 1); 5573 5574 if (!(mmio_flags & IORESOURCE_MEM)) { 5575 ql4_printk(KERN_ERR, ha, 5576 "region #0 not an MMIO resource, aborting\n"); 5577 5578 goto iospace_error_exit; 5579 } 5580 5581 if (mmio_len < MIN_IOBASE_LEN) { 5582 ql4_printk(KERN_ERR, ha, 5583 "Invalid PCI mem region size, aborting\n"); 5584 goto iospace_error_exit; 5585 } 5586 5587 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5588 ql4_printk(KERN_WARNING, ha, 5589 "Failed to reserve PIO/MMIO regions\n"); 5590 5591 goto iospace_error_exit; 5592 } 5593 5594 ha->pio_address = pio; 5595 ha->pio_length = pio_len; 5596 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5597 if (!ha->reg) { 5598 ql4_printk(KERN_ERR, ha, 5599 "cannot remap MMIO, aborting\n"); 5600 5601 goto iospace_error_exit; 5602 } 5603 5604 return 0; 5605 5606 iospace_error_exit: 5607 return -ENOMEM; 5608 } 5609 5610 static struct isp_operations qla4xxx_isp_ops = { 5611 .iospace_config = qla4xxx_iospace_config, 5612 .pci_config = qla4xxx_pci_config, 5613 .disable_intrs = qla4xxx_disable_intrs, 5614 .enable_intrs = qla4xxx_enable_intrs, 5615 .start_firmware = qla4xxx_start_firmware, 5616 .intr_handler = qla4xxx_intr_handler, 5617 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5618 .reset_chip = qla4xxx_soft_reset, 5619 .reset_firmware = qla4xxx_hw_reset, 5620 .queue_iocb = qla4xxx_queue_iocb, 5621 .complete_iocb = qla4xxx_complete_iocb, 5622 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5623 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5624 .get_sys_info = qla4xxx_get_sys_info, 5625 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5626 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5627 }; 5628 5629 static struct isp_operations qla4_82xx_isp_ops = { 5630 .iospace_config = qla4_8xxx_iospace_config, 5631 .pci_config = qla4_8xxx_pci_config, 5632 .disable_intrs = qla4_82xx_disable_intrs, 5633 .enable_intrs = qla4_82xx_enable_intrs, 5634 .start_firmware = qla4_8xxx_load_risc, 5635 .restart_firmware = qla4_82xx_try_start_fw, 5636 .intr_handler = qla4_82xx_intr_handler, 5637 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5638 .need_reset = qla4_8xxx_need_reset, 5639 .reset_chip = qla4_82xx_isp_reset, 5640 .reset_firmware = qla4_8xxx_stop_firmware, 5641 .queue_iocb = qla4_82xx_queue_iocb, 5642 .complete_iocb = qla4_82xx_complete_iocb, 5643 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5644 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5645 .get_sys_info = qla4_8xxx_get_sys_info, 5646 .rd_reg_direct = qla4_82xx_rd_32, 5647 .wr_reg_direct = qla4_82xx_wr_32, 5648 .rd_reg_indirect = qla4_82xx_md_rd_32, 5649 .wr_reg_indirect = qla4_82xx_md_wr_32, 5650 .idc_lock = qla4_82xx_idc_lock, 5651 .idc_unlock = qla4_82xx_idc_unlock, 5652 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5653 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5654 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5655 }; 5656 5657 static struct isp_operations qla4_83xx_isp_ops = { 5658 .iospace_config = qla4_8xxx_iospace_config, 5659 .pci_config = qla4_8xxx_pci_config, 5660 .disable_intrs = qla4_83xx_disable_intrs, 5661 .enable_intrs = qla4_83xx_enable_intrs, 5662 .start_firmware = qla4_8xxx_load_risc, 5663 .restart_firmware = qla4_83xx_start_firmware, 5664 .intr_handler = qla4_83xx_intr_handler, 5665 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5666 .need_reset = qla4_8xxx_need_reset, 5667 .reset_chip = qla4_83xx_isp_reset, 5668 .reset_firmware = qla4_8xxx_stop_firmware, 5669 .queue_iocb = qla4_83xx_queue_iocb, 5670 .complete_iocb = qla4_83xx_complete_iocb, 5671 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5672 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5673 .get_sys_info = qla4_8xxx_get_sys_info, 5674 .rd_reg_direct = qla4_83xx_rd_reg, 5675 .wr_reg_direct = qla4_83xx_wr_reg, 5676 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5677 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5678 .idc_lock = qla4_83xx_drv_lock, 5679 .idc_unlock = qla4_83xx_drv_unlock, 5680 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5681 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5682 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5683 }; 5684 5685 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5686 { 5687 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5688 } 5689 5690 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5691 { 5692 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5693 } 5694 5695 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5696 { 5697 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5698 } 5699 5700 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5701 { 5702 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5703 } 5704 5705 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5706 { 5707 struct scsi_qla_host *ha = data; 5708 char *str = buf; 5709 int rc; 5710 5711 switch (type) { 5712 case ISCSI_BOOT_ETH_FLAGS: 5713 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5714 break; 5715 case ISCSI_BOOT_ETH_INDEX: 5716 rc = sprintf(str, "0\n"); 5717 break; 5718 case ISCSI_BOOT_ETH_MAC: 5719 rc = sysfs_format_mac(str, ha->my_mac, 5720 MAC_ADDR_LEN); 5721 break; 5722 default: 5723 rc = -ENOSYS; 5724 break; 5725 } 5726 return rc; 5727 } 5728 5729 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5730 { 5731 int rc; 5732 5733 switch (type) { 5734 case ISCSI_BOOT_ETH_FLAGS: 5735 case ISCSI_BOOT_ETH_MAC: 5736 case ISCSI_BOOT_ETH_INDEX: 5737 rc = S_IRUGO; 5738 break; 5739 default: 5740 rc = 0; 5741 break; 5742 } 5743 return rc; 5744 } 5745 5746 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5747 { 5748 struct scsi_qla_host *ha = data; 5749 char *str = buf; 5750 int rc; 5751 5752 switch (type) { 5753 case ISCSI_BOOT_INI_INITIATOR_NAME: 5754 rc = sprintf(str, "%s\n", ha->name_string); 5755 break; 5756 default: 5757 rc = -ENOSYS; 5758 break; 5759 } 5760 return rc; 5761 } 5762 5763 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5764 { 5765 int rc; 5766 5767 switch (type) { 5768 case ISCSI_BOOT_INI_INITIATOR_NAME: 5769 rc = S_IRUGO; 5770 break; 5771 default: 5772 rc = 0; 5773 break; 5774 } 5775 return rc; 5776 } 5777 5778 static ssize_t 5779 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5780 char *buf) 5781 { 5782 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5783 char *str = buf; 5784 int rc; 5785 5786 switch (type) { 5787 case ISCSI_BOOT_TGT_NAME: 5788 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5789 break; 5790 case ISCSI_BOOT_TGT_IP_ADDR: 5791 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5792 rc = sprintf(buf, "%pI4\n", 5793 &boot_conn->dest_ipaddr.ip_address); 5794 else 5795 rc = sprintf(str, "%pI6\n", 5796 &boot_conn->dest_ipaddr.ip_address); 5797 break; 5798 case ISCSI_BOOT_TGT_PORT: 5799 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5800 break; 5801 case ISCSI_BOOT_TGT_CHAP_NAME: 5802 rc = sprintf(str, "%.*s\n", 5803 boot_conn->chap.target_chap_name_length, 5804 (char *)&boot_conn->chap.target_chap_name); 5805 break; 5806 case ISCSI_BOOT_TGT_CHAP_SECRET: 5807 rc = sprintf(str, "%.*s\n", 5808 boot_conn->chap.target_secret_length, 5809 (char *)&boot_conn->chap.target_secret); 5810 break; 5811 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5812 rc = sprintf(str, "%.*s\n", 5813 boot_conn->chap.intr_chap_name_length, 5814 (char *)&boot_conn->chap.intr_chap_name); 5815 break; 5816 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5817 rc = sprintf(str, "%.*s\n", 5818 boot_conn->chap.intr_secret_length, 5819 (char *)&boot_conn->chap.intr_secret); 5820 break; 5821 case ISCSI_BOOT_TGT_FLAGS: 5822 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5823 break; 5824 case ISCSI_BOOT_TGT_NIC_ASSOC: 5825 rc = sprintf(str, "0\n"); 5826 break; 5827 default: 5828 rc = -ENOSYS; 5829 break; 5830 } 5831 return rc; 5832 } 5833 5834 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5835 { 5836 struct scsi_qla_host *ha = data; 5837 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5838 5839 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5840 } 5841 5842 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5843 { 5844 struct scsi_qla_host *ha = data; 5845 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5846 5847 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5848 } 5849 5850 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5851 { 5852 int rc; 5853 5854 switch (type) { 5855 case ISCSI_BOOT_TGT_NAME: 5856 case ISCSI_BOOT_TGT_IP_ADDR: 5857 case ISCSI_BOOT_TGT_PORT: 5858 case ISCSI_BOOT_TGT_CHAP_NAME: 5859 case ISCSI_BOOT_TGT_CHAP_SECRET: 5860 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5861 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5862 case ISCSI_BOOT_TGT_NIC_ASSOC: 5863 case ISCSI_BOOT_TGT_FLAGS: 5864 rc = S_IRUGO; 5865 break; 5866 default: 5867 rc = 0; 5868 break; 5869 } 5870 return rc; 5871 } 5872 5873 static void qla4xxx_boot_release(void *data) 5874 { 5875 struct scsi_qla_host *ha = data; 5876 5877 scsi_host_put(ha->host); 5878 } 5879 5880 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5881 { 5882 dma_addr_t buf_dma; 5883 uint32_t addr, pri_addr, sec_addr; 5884 uint32_t offset; 5885 uint16_t func_num; 5886 uint8_t val; 5887 uint8_t *buf = NULL; 5888 size_t size = 13 * sizeof(uint8_t); 5889 int ret = QLA_SUCCESS; 5890 5891 func_num = PCI_FUNC(ha->pdev->devfn); 5892 5893 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5894 __func__, ha->pdev->device, func_num); 5895 5896 if (is_qla40XX(ha)) { 5897 if (func_num == 1) { 5898 addr = NVRAM_PORT0_BOOT_MODE; 5899 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5900 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5901 } else if (func_num == 3) { 5902 addr = NVRAM_PORT1_BOOT_MODE; 5903 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5904 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5905 } else { 5906 ret = QLA_ERROR; 5907 goto exit_boot_info; 5908 } 5909 5910 /* Check Boot Mode */ 5911 val = rd_nvram_byte(ha, addr); 5912 if (!(val & 0x07)) { 5913 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5914 "options : 0x%x\n", __func__, val)); 5915 ret = QLA_ERROR; 5916 goto exit_boot_info; 5917 } 5918 5919 /* get primary valid target index */ 5920 val = rd_nvram_byte(ha, pri_addr); 5921 if (val & BIT_7) 5922 ddb_index[0] = (val & 0x7f); 5923 5924 /* get secondary valid target index */ 5925 val = rd_nvram_byte(ha, sec_addr); 5926 if (val & BIT_7) 5927 ddb_index[1] = (val & 0x7f); 5928 5929 } else if (is_qla80XX(ha)) { 5930 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5931 &buf_dma, GFP_KERNEL); 5932 if (!buf) { 5933 DEBUG2(ql4_printk(KERN_ERR, ha, 5934 "%s: Unable to allocate dma buffer\n", 5935 __func__)); 5936 ret = QLA_ERROR; 5937 goto exit_boot_info; 5938 } 5939 5940 if (ha->port_num == 0) 5941 offset = BOOT_PARAM_OFFSET_PORT0; 5942 else if (ha->port_num == 1) 5943 offset = BOOT_PARAM_OFFSET_PORT1; 5944 else { 5945 ret = QLA_ERROR; 5946 goto exit_boot_info_free; 5947 } 5948 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5949 offset; 5950 if (qla4xxx_get_flash(ha, buf_dma, addr, 5951 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5952 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5953 " failed\n", ha->host_no, __func__)); 5954 ret = QLA_ERROR; 5955 goto exit_boot_info_free; 5956 } 5957 /* Check Boot Mode */ 5958 if (!(buf[1] & 0x07)) { 5959 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5960 " : 0x%x\n", buf[1])); 5961 ret = QLA_ERROR; 5962 goto exit_boot_info_free; 5963 } 5964 5965 /* get primary valid target index */ 5966 if (buf[2] & BIT_7) 5967 ddb_index[0] = buf[2] & 0x7f; 5968 5969 /* get secondary valid target index */ 5970 if (buf[11] & BIT_7) 5971 ddb_index[1] = buf[11] & 0x7f; 5972 } else { 5973 ret = QLA_ERROR; 5974 goto exit_boot_info; 5975 } 5976 5977 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 5978 " target ID %d\n", __func__, ddb_index[0], 5979 ddb_index[1])); 5980 5981 exit_boot_info_free: 5982 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 5983 exit_boot_info: 5984 ha->pri_ddb_idx = ddb_index[0]; 5985 ha->sec_ddb_idx = ddb_index[1]; 5986 return ret; 5987 } 5988 5989 /** 5990 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 5991 * @ha: pointer to adapter structure 5992 * @username: CHAP username to be returned 5993 * @password: CHAP password to be returned 5994 * 5995 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 5996 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 5997 * So from the CHAP cache find the first BIDI CHAP entry and set it 5998 * to the boot record in sysfs. 5999 **/ 6000 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 6001 char *password) 6002 { 6003 int i, ret = -EINVAL; 6004 int max_chap_entries = 0; 6005 struct ql4_chap_table *chap_table; 6006 6007 if (is_qla80XX(ha)) 6008 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6009 sizeof(struct ql4_chap_table); 6010 else 6011 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6012 6013 if (!ha->chap_list) { 6014 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6015 return ret; 6016 } 6017 6018 mutex_lock(&ha->chap_sem); 6019 for (i = 0; i < max_chap_entries; i++) { 6020 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6021 if (chap_table->cookie != 6022 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 6023 continue; 6024 } 6025 6026 if (chap_table->flags & BIT_7) /* local */ 6027 continue; 6028 6029 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6030 continue; 6031 6032 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6033 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6034 ret = 0; 6035 break; 6036 } 6037 mutex_unlock(&ha->chap_sem); 6038 6039 return ret; 6040 } 6041 6042 6043 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6044 struct ql4_boot_session_info *boot_sess, 6045 uint16_t ddb_index) 6046 { 6047 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6048 struct dev_db_entry *fw_ddb_entry; 6049 dma_addr_t fw_ddb_entry_dma; 6050 uint16_t idx; 6051 uint16_t options; 6052 int ret = QLA_SUCCESS; 6053 6054 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6055 &fw_ddb_entry_dma, GFP_KERNEL); 6056 if (!fw_ddb_entry) { 6057 DEBUG2(ql4_printk(KERN_ERR, ha, 6058 "%s: Unable to allocate dma buffer.\n", 6059 __func__)); 6060 ret = QLA_ERROR; 6061 return ret; 6062 } 6063 6064 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6065 fw_ddb_entry_dma, ddb_index)) { 6066 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6067 "index [%d]\n", __func__, ddb_index)); 6068 ret = QLA_ERROR; 6069 goto exit_boot_target; 6070 } 6071 6072 /* Update target name and IP from DDB */ 6073 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6074 min(sizeof(boot_sess->target_name), 6075 sizeof(fw_ddb_entry->iscsi_name))); 6076 6077 options = le16_to_cpu(fw_ddb_entry->options); 6078 if (options & DDB_OPT_IPV6_DEVICE) { 6079 memcpy(&boot_conn->dest_ipaddr.ip_address, 6080 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6081 } else { 6082 boot_conn->dest_ipaddr.ip_type = 0x1; 6083 memcpy(&boot_conn->dest_ipaddr.ip_address, 6084 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6085 } 6086 6087 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6088 6089 /* update chap information */ 6090 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6091 6092 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6093 6094 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6095 6096 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6097 target_chap_name, 6098 (char *)&boot_conn->chap.target_secret, 6099 idx); 6100 if (ret) { 6101 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6102 ret = QLA_ERROR; 6103 goto exit_boot_target; 6104 } 6105 6106 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6107 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6108 } 6109 6110 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6111 6112 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6113 6114 ret = qla4xxx_get_bidi_chap(ha, 6115 (char *)&boot_conn->chap.intr_chap_name, 6116 (char *)&boot_conn->chap.intr_secret); 6117 6118 if (ret) { 6119 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6120 ret = QLA_ERROR; 6121 goto exit_boot_target; 6122 } 6123 6124 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6125 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6126 } 6127 6128 exit_boot_target: 6129 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6130 fw_ddb_entry, fw_ddb_entry_dma); 6131 return ret; 6132 } 6133 6134 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6135 { 6136 uint16_t ddb_index[2]; 6137 int ret = QLA_ERROR; 6138 int rval; 6139 6140 memset(ddb_index, 0, sizeof(ddb_index)); 6141 ddb_index[0] = 0xffff; 6142 ddb_index[1] = 0xffff; 6143 ret = get_fw_boot_info(ha, ddb_index); 6144 if (ret != QLA_SUCCESS) { 6145 DEBUG2(ql4_printk(KERN_INFO, ha, 6146 "%s: No boot target configured.\n", __func__)); 6147 return ret; 6148 } 6149 6150 if (ql4xdisablesysfsboot) 6151 return QLA_SUCCESS; 6152 6153 if (ddb_index[0] == 0xffff) 6154 goto sec_target; 6155 6156 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6157 ddb_index[0]); 6158 if (rval != QLA_SUCCESS) { 6159 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6160 "configured\n", __func__)); 6161 } else 6162 ret = QLA_SUCCESS; 6163 6164 sec_target: 6165 if (ddb_index[1] == 0xffff) 6166 goto exit_get_boot_info; 6167 6168 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6169 ddb_index[1]); 6170 if (rval != QLA_SUCCESS) { 6171 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6172 " configured\n", __func__)); 6173 } else 6174 ret = QLA_SUCCESS; 6175 6176 exit_get_boot_info: 6177 return ret; 6178 } 6179 6180 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6181 { 6182 struct iscsi_boot_kobj *boot_kobj; 6183 6184 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6185 return QLA_ERROR; 6186 6187 if (ql4xdisablesysfsboot) { 6188 ql4_printk(KERN_INFO, ha, 6189 "%s: syfsboot disabled - driver will trigger login " 6190 "and publish session for discovery .\n", __func__); 6191 return QLA_SUCCESS; 6192 } 6193 6194 6195 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6196 if (!ha->boot_kset) 6197 goto kset_free; 6198 6199 if (!scsi_host_get(ha->host)) 6200 goto kset_free; 6201 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6202 qla4xxx_show_boot_tgt_pri_info, 6203 qla4xxx_tgt_get_attr_visibility, 6204 qla4xxx_boot_release); 6205 if (!boot_kobj) 6206 goto put_host; 6207 6208 if (!scsi_host_get(ha->host)) 6209 goto kset_free; 6210 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6211 qla4xxx_show_boot_tgt_sec_info, 6212 qla4xxx_tgt_get_attr_visibility, 6213 qla4xxx_boot_release); 6214 if (!boot_kobj) 6215 goto put_host; 6216 6217 if (!scsi_host_get(ha->host)) 6218 goto kset_free; 6219 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6220 qla4xxx_show_boot_ini_info, 6221 qla4xxx_ini_get_attr_visibility, 6222 qla4xxx_boot_release); 6223 if (!boot_kobj) 6224 goto put_host; 6225 6226 if (!scsi_host_get(ha->host)) 6227 goto kset_free; 6228 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6229 qla4xxx_show_boot_eth_info, 6230 qla4xxx_eth_get_attr_visibility, 6231 qla4xxx_boot_release); 6232 if (!boot_kobj) 6233 goto put_host; 6234 6235 return QLA_SUCCESS; 6236 6237 put_host: 6238 scsi_host_put(ha->host); 6239 kset_free: 6240 iscsi_boot_destroy_kset(ha->boot_kset); 6241 return -ENOMEM; 6242 } 6243 6244 6245 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6246 struct ql4_tuple_ddb *tddb) 6247 { 6248 struct scsi_qla_host *ha; 6249 struct iscsi_cls_session *cls_sess; 6250 struct iscsi_cls_conn *cls_conn; 6251 struct iscsi_session *sess; 6252 struct iscsi_conn *conn; 6253 6254 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6255 ha = ddb_entry->ha; 6256 cls_sess = ddb_entry->sess; 6257 sess = cls_sess->dd_data; 6258 cls_conn = ddb_entry->conn; 6259 conn = cls_conn->dd_data; 6260 6261 tddb->tpgt = sess->tpgt; 6262 tddb->port = conn->persistent_port; 6263 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6264 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6265 } 6266 6267 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6268 struct ql4_tuple_ddb *tddb, 6269 uint8_t *flash_isid) 6270 { 6271 uint16_t options = 0; 6272 6273 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6274 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6275 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6276 6277 options = le16_to_cpu(fw_ddb_entry->options); 6278 if (options & DDB_OPT_IPV6_DEVICE) 6279 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6280 else 6281 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6282 6283 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6284 6285 if (flash_isid == NULL) 6286 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6287 sizeof(tddb->isid)); 6288 else 6289 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6290 } 6291 6292 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6293 struct ql4_tuple_ddb *old_tddb, 6294 struct ql4_tuple_ddb *new_tddb, 6295 uint8_t is_isid_compare) 6296 { 6297 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6298 return QLA_ERROR; 6299 6300 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6301 return QLA_ERROR; 6302 6303 if (old_tddb->port != new_tddb->port) 6304 return QLA_ERROR; 6305 6306 /* For multi sessions, driver generates the ISID, so do not compare 6307 * ISID in reset path since it would be a comparison between the 6308 * driver generated ISID and firmware generated ISID. This could 6309 * lead to adding duplicated DDBs in the list as driver generated 6310 * ISID would not match firmware generated ISID. 6311 */ 6312 if (is_isid_compare) { 6313 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x" 6314 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", 6315 __func__, old_tddb->isid[5], old_tddb->isid[4], 6316 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1], 6317 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4], 6318 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1], 6319 new_tddb->isid[0])); 6320 6321 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6322 sizeof(old_tddb->isid))) 6323 return QLA_ERROR; 6324 } 6325 6326 DEBUG2(ql4_printk(KERN_INFO, ha, 6327 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6328 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6329 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6330 new_tddb->ip_addr, new_tddb->iscsi_name)); 6331 6332 return QLA_SUCCESS; 6333 } 6334 6335 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6336 struct dev_db_entry *fw_ddb_entry, 6337 uint32_t *index) 6338 { 6339 struct ddb_entry *ddb_entry; 6340 struct ql4_tuple_ddb *fw_tddb = NULL; 6341 struct ql4_tuple_ddb *tmp_tddb = NULL; 6342 int idx; 6343 int ret = QLA_ERROR; 6344 6345 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6346 if (!fw_tddb) { 6347 DEBUG2(ql4_printk(KERN_WARNING, ha, 6348 "Memory Allocation failed.\n")); 6349 ret = QLA_SUCCESS; 6350 goto exit_check; 6351 } 6352 6353 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6354 if (!tmp_tddb) { 6355 DEBUG2(ql4_printk(KERN_WARNING, ha, 6356 "Memory Allocation failed.\n")); 6357 ret = QLA_SUCCESS; 6358 goto exit_check; 6359 } 6360 6361 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6362 6363 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6364 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6365 if (ddb_entry == NULL) 6366 continue; 6367 6368 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6369 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6370 ret = QLA_SUCCESS; /* found */ 6371 if (index != NULL) 6372 *index = idx; 6373 goto exit_check; 6374 } 6375 } 6376 6377 exit_check: 6378 if (fw_tddb) 6379 vfree(fw_tddb); 6380 if (tmp_tddb) 6381 vfree(tmp_tddb); 6382 return ret; 6383 } 6384 6385 /** 6386 * qla4xxx_check_existing_isid - check if target with same isid exist 6387 * in target list 6388 * @list_nt: list of target 6389 * @isid: isid to check 6390 * 6391 * This routine return QLA_SUCCESS if target with same isid exist 6392 **/ 6393 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6394 { 6395 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6396 struct dev_db_entry *fw_ddb_entry; 6397 6398 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6399 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6400 6401 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6402 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6403 return QLA_SUCCESS; 6404 } 6405 } 6406 return QLA_ERROR; 6407 } 6408 6409 /** 6410 * qla4xxx_update_isid - compare ddbs and updated isid 6411 * @ha: Pointer to host adapter structure. 6412 * @list_nt: list of nt target 6413 * @fw_ddb_entry: firmware ddb entry 6414 * 6415 * This routine update isid if ddbs have same iqn, same isid and 6416 * different IP addr. 6417 * Return QLA_SUCCESS if isid is updated. 6418 **/ 6419 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6420 struct list_head *list_nt, 6421 struct dev_db_entry *fw_ddb_entry) 6422 { 6423 uint8_t base_value, i; 6424 6425 base_value = fw_ddb_entry->isid[1] & 0x1f; 6426 for (i = 0; i < 8; i++) { 6427 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6428 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6429 break; 6430 } 6431 6432 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6433 return QLA_ERROR; 6434 6435 return QLA_SUCCESS; 6436 } 6437 6438 /** 6439 * qla4xxx_should_update_isid - check if isid need to update 6440 * @ha: Pointer to host adapter structure. 6441 * @old_tddb: ddb tuple 6442 * @new_tddb: ddb tuple 6443 * 6444 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6445 * same isid 6446 **/ 6447 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6448 struct ql4_tuple_ddb *old_tddb, 6449 struct ql4_tuple_ddb *new_tddb) 6450 { 6451 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6452 /* Same ip */ 6453 if (old_tddb->port == new_tddb->port) 6454 return QLA_ERROR; 6455 } 6456 6457 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6458 /* different iqn */ 6459 return QLA_ERROR; 6460 6461 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6462 sizeof(old_tddb->isid))) 6463 /* different isid */ 6464 return QLA_ERROR; 6465 6466 return QLA_SUCCESS; 6467 } 6468 6469 /** 6470 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6471 * @ha: Pointer to host adapter structure. 6472 * @list_nt: list of nt target. 6473 * @fw_ddb_entry: firmware ddb entry. 6474 * 6475 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6476 * duplicate ddb in list_nt. 6477 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6478 * Note: This function also update isid of DDB if required. 6479 **/ 6480 6481 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6482 struct list_head *list_nt, 6483 struct dev_db_entry *fw_ddb_entry) 6484 { 6485 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6486 struct ql4_tuple_ddb *fw_tddb = NULL; 6487 struct ql4_tuple_ddb *tmp_tddb = NULL; 6488 int rval, ret = QLA_ERROR; 6489 6490 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6491 if (!fw_tddb) { 6492 DEBUG2(ql4_printk(KERN_WARNING, ha, 6493 "Memory Allocation failed.\n")); 6494 ret = QLA_SUCCESS; 6495 goto exit_check; 6496 } 6497 6498 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6499 if (!tmp_tddb) { 6500 DEBUG2(ql4_printk(KERN_WARNING, ha, 6501 "Memory Allocation failed.\n")); 6502 ret = QLA_SUCCESS; 6503 goto exit_check; 6504 } 6505 6506 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6507 6508 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6509 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6510 nt_ddb_idx->flash_isid); 6511 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6512 /* found duplicate ddb */ 6513 if (ret == QLA_SUCCESS) 6514 goto exit_check; 6515 } 6516 6517 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6518 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6519 6520 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6521 if (ret == QLA_SUCCESS) { 6522 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6523 if (rval == QLA_SUCCESS) 6524 ret = QLA_ERROR; 6525 else 6526 ret = QLA_SUCCESS; 6527 6528 goto exit_check; 6529 } 6530 } 6531 6532 exit_check: 6533 if (fw_tddb) 6534 vfree(fw_tddb); 6535 if (tmp_tddb) 6536 vfree(tmp_tddb); 6537 return ret; 6538 } 6539 6540 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6541 { 6542 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6543 6544 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6545 list_del_init(&ddb_idx->list); 6546 vfree(ddb_idx); 6547 } 6548 } 6549 6550 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6551 struct dev_db_entry *fw_ddb_entry) 6552 { 6553 struct iscsi_endpoint *ep; 6554 struct sockaddr_in *addr; 6555 struct sockaddr_in6 *addr6; 6556 struct sockaddr *t_addr; 6557 struct sockaddr_storage *dst_addr; 6558 char *ip; 6559 6560 /* TODO: need to destroy on unload iscsi_endpoint*/ 6561 dst_addr = vmalloc(sizeof(*dst_addr)); 6562 if (!dst_addr) 6563 return NULL; 6564 6565 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6566 t_addr = (struct sockaddr *)dst_addr; 6567 t_addr->sa_family = AF_INET6; 6568 addr6 = (struct sockaddr_in6 *)dst_addr; 6569 ip = (char *)&addr6->sin6_addr; 6570 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6571 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6572 6573 } else { 6574 t_addr = (struct sockaddr *)dst_addr; 6575 t_addr->sa_family = AF_INET; 6576 addr = (struct sockaddr_in *)dst_addr; 6577 ip = (char *)&addr->sin_addr; 6578 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6579 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6580 } 6581 6582 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6583 vfree(dst_addr); 6584 return ep; 6585 } 6586 6587 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6588 { 6589 if (ql4xdisablesysfsboot) 6590 return QLA_SUCCESS; 6591 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6592 return QLA_ERROR; 6593 return QLA_SUCCESS; 6594 } 6595 6596 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6597 struct ddb_entry *ddb_entry, 6598 uint16_t idx) 6599 { 6600 uint16_t def_timeout; 6601 6602 ddb_entry->ddb_type = FLASH_DDB; 6603 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6604 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6605 ddb_entry->ha = ha; 6606 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6607 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6608 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6609 6610 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6611 atomic_set(&ddb_entry->relogin_timer, 0); 6612 atomic_set(&ddb_entry->relogin_retry_count, 0); 6613 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6614 ddb_entry->default_relogin_timeout = 6615 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6616 def_timeout : LOGIN_TOV; 6617 ddb_entry->default_time2wait = 6618 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6619 6620 if (ql4xdisablesysfsboot && 6621 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6622 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6623 } 6624 6625 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6626 { 6627 uint32_t idx = 0; 6628 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6629 uint32_t sts[MBOX_REG_COUNT]; 6630 uint32_t ip_state; 6631 unsigned long wtime; 6632 int ret; 6633 6634 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6635 do { 6636 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6637 if (ip_idx[idx] == -1) 6638 continue; 6639 6640 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6641 6642 if (ret == QLA_ERROR) { 6643 ip_idx[idx] = -1; 6644 continue; 6645 } 6646 6647 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6648 6649 DEBUG2(ql4_printk(KERN_INFO, ha, 6650 "Waiting for IP state for idx = %d, state = 0x%x\n", 6651 ip_idx[idx], ip_state)); 6652 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6653 ip_state == IP_ADDRSTATE_INVALID || 6654 ip_state == IP_ADDRSTATE_PREFERRED || 6655 ip_state == IP_ADDRSTATE_DEPRICATED || 6656 ip_state == IP_ADDRSTATE_DISABLING) 6657 ip_idx[idx] = -1; 6658 } 6659 6660 /* Break if all IP states checked */ 6661 if ((ip_idx[0] == -1) && 6662 (ip_idx[1] == -1) && 6663 (ip_idx[2] == -1) && 6664 (ip_idx[3] == -1)) 6665 break; 6666 schedule_timeout_uninterruptible(HZ); 6667 } while (time_after(wtime, jiffies)); 6668 } 6669 6670 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6671 struct dev_db_entry *flash_ddb_entry) 6672 { 6673 uint16_t options = 0; 6674 size_t ip_len = IP_ADDR_LEN; 6675 6676 options = le16_to_cpu(fw_ddb_entry->options); 6677 if (options & DDB_OPT_IPV6_DEVICE) 6678 ip_len = IPv6_ADDR_LEN; 6679 6680 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6681 return QLA_ERROR; 6682 6683 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6684 sizeof(fw_ddb_entry->isid))) 6685 return QLA_ERROR; 6686 6687 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6688 sizeof(fw_ddb_entry->port))) 6689 return QLA_ERROR; 6690 6691 return QLA_SUCCESS; 6692 } 6693 6694 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6695 struct dev_db_entry *fw_ddb_entry, 6696 uint32_t fw_idx, uint32_t *flash_index) 6697 { 6698 struct dev_db_entry *flash_ddb_entry; 6699 dma_addr_t flash_ddb_entry_dma; 6700 uint32_t idx = 0; 6701 int max_ddbs; 6702 int ret = QLA_ERROR, status; 6703 6704 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6705 MAX_DEV_DB_ENTRIES; 6706 6707 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6708 &flash_ddb_entry_dma); 6709 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6710 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6711 goto exit_find_st_idx; 6712 } 6713 6714 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6715 flash_ddb_entry_dma, fw_idx); 6716 if (status == QLA_SUCCESS) { 6717 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6718 if (status == QLA_SUCCESS) { 6719 *flash_index = fw_idx; 6720 ret = QLA_SUCCESS; 6721 goto exit_find_st_idx; 6722 } 6723 } 6724 6725 for (idx = 0; idx < max_ddbs; idx++) { 6726 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6727 flash_ddb_entry_dma, idx); 6728 if (status == QLA_ERROR) 6729 continue; 6730 6731 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6732 if (status == QLA_SUCCESS) { 6733 *flash_index = idx; 6734 ret = QLA_SUCCESS; 6735 goto exit_find_st_idx; 6736 } 6737 } 6738 6739 if (idx == max_ddbs) 6740 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6741 fw_idx); 6742 6743 exit_find_st_idx: 6744 if (flash_ddb_entry) 6745 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6746 flash_ddb_entry_dma); 6747 6748 return ret; 6749 } 6750 6751 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6752 struct list_head *list_st) 6753 { 6754 struct qla_ddb_index *st_ddb_idx; 6755 int max_ddbs; 6756 int fw_idx_size; 6757 struct dev_db_entry *fw_ddb_entry; 6758 dma_addr_t fw_ddb_dma; 6759 int ret; 6760 uint32_t idx = 0, next_idx = 0; 6761 uint32_t state = 0, conn_err = 0; 6762 uint32_t flash_index = -1; 6763 uint16_t conn_id = 0; 6764 6765 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6766 &fw_ddb_dma); 6767 if (fw_ddb_entry == NULL) { 6768 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6769 goto exit_st_list; 6770 } 6771 6772 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6773 MAX_DEV_DB_ENTRIES; 6774 fw_idx_size = sizeof(struct qla_ddb_index); 6775 6776 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6777 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6778 NULL, &next_idx, &state, 6779 &conn_err, NULL, &conn_id); 6780 if (ret == QLA_ERROR) 6781 break; 6782 6783 /* Ignore DDB if invalid state (unassigned) */ 6784 if (state == DDB_DS_UNASSIGNED) 6785 goto continue_next_st; 6786 6787 /* Check if ST, add to the list_st */ 6788 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6789 goto continue_next_st; 6790 6791 st_ddb_idx = vzalloc(fw_idx_size); 6792 if (!st_ddb_idx) 6793 break; 6794 6795 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6796 &flash_index); 6797 if (ret == QLA_ERROR) { 6798 ql4_printk(KERN_ERR, ha, 6799 "No flash entry for ST at idx [%d]\n", idx); 6800 st_ddb_idx->flash_ddb_idx = idx; 6801 } else { 6802 ql4_printk(KERN_INFO, ha, 6803 "ST at idx [%d] is stored at flash [%d]\n", 6804 idx, flash_index); 6805 st_ddb_idx->flash_ddb_idx = flash_index; 6806 } 6807 6808 st_ddb_idx->fw_ddb_idx = idx; 6809 6810 list_add_tail(&st_ddb_idx->list, list_st); 6811 continue_next_st: 6812 if (next_idx == 0) 6813 break; 6814 } 6815 6816 exit_st_list: 6817 if (fw_ddb_entry) 6818 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6819 } 6820 6821 /** 6822 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6823 * @ha: pointer to adapter structure 6824 * @list_ddb: List from which failed ddb to be removed 6825 * 6826 * Iterate over the list of DDBs and find and remove DDBs that are either in 6827 * no connection active state or failed state 6828 **/ 6829 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6830 struct list_head *list_ddb) 6831 { 6832 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6833 uint32_t next_idx = 0; 6834 uint32_t state = 0, conn_err = 0; 6835 int ret; 6836 6837 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6838 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6839 NULL, 0, NULL, &next_idx, &state, 6840 &conn_err, NULL, NULL); 6841 if (ret == QLA_ERROR) 6842 continue; 6843 6844 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6845 state == DDB_DS_SESSION_FAILED) { 6846 list_del_init(&ddb_idx->list); 6847 vfree(ddb_idx); 6848 } 6849 } 6850 } 6851 6852 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6853 struct ddb_entry *ddb_entry, 6854 struct dev_db_entry *fw_ddb_entry) 6855 { 6856 struct iscsi_cls_session *cls_sess; 6857 struct iscsi_session *sess; 6858 uint32_t max_ddbs = 0; 6859 uint16_t ddb_link = -1; 6860 6861 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6862 MAX_DEV_DB_ENTRIES; 6863 6864 cls_sess = ddb_entry->sess; 6865 sess = cls_sess->dd_data; 6866 6867 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6868 if (ddb_link < max_ddbs) 6869 sess->discovery_parent_idx = ddb_link; 6870 else 6871 sess->discovery_parent_idx = DDB_NO_LINK; 6872 } 6873 6874 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6875 struct dev_db_entry *fw_ddb_entry, 6876 int is_reset, uint16_t idx) 6877 { 6878 struct iscsi_cls_session *cls_sess; 6879 struct iscsi_session *sess; 6880 struct iscsi_cls_conn *cls_conn; 6881 struct iscsi_endpoint *ep; 6882 uint16_t cmds_max = 32; 6883 uint16_t conn_id = 0; 6884 uint32_t initial_cmdsn = 0; 6885 int ret = QLA_SUCCESS; 6886 6887 struct ddb_entry *ddb_entry = NULL; 6888 6889 /* Create session object, with INVALID_ENTRY, 6890 * the targer_id would get set when we issue the login 6891 */ 6892 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6893 cmds_max, sizeof(struct ddb_entry), 6894 sizeof(struct ql4_task_data), 6895 initial_cmdsn, INVALID_ENTRY); 6896 if (!cls_sess) { 6897 ret = QLA_ERROR; 6898 goto exit_setup; 6899 } 6900 6901 /* 6902 * so calling module_put function to decrement the 6903 * reference count. 6904 **/ 6905 module_put(qla4xxx_iscsi_transport.owner); 6906 sess = cls_sess->dd_data; 6907 ddb_entry = sess->dd_data; 6908 ddb_entry->sess = cls_sess; 6909 6910 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6911 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6912 sizeof(struct dev_db_entry)); 6913 6914 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6915 6916 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6917 6918 if (!cls_conn) { 6919 ret = QLA_ERROR; 6920 goto exit_setup; 6921 } 6922 6923 ddb_entry->conn = cls_conn; 6924 6925 /* Setup ep, for displaying attributes in sysfs */ 6926 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6927 if (ep) { 6928 ep->conn = cls_conn; 6929 cls_conn->ep = ep; 6930 } else { 6931 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6932 ret = QLA_ERROR; 6933 goto exit_setup; 6934 } 6935 6936 /* Update sess/conn params */ 6937 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6938 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6939 6940 if (is_reset == RESET_ADAPTER) { 6941 iscsi_block_session(cls_sess); 6942 /* Use the relogin path to discover new devices 6943 * by short-circuting the logic of setting 6944 * timer to relogin - instead set the flags 6945 * to initiate login right away. 6946 */ 6947 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6948 set_bit(DF_RELOGIN, &ddb_entry->flags); 6949 } 6950 6951 exit_setup: 6952 return ret; 6953 } 6954 6955 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6956 struct list_head *list_ddb, 6957 struct dev_db_entry *fw_ddb_entry) 6958 { 6959 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6960 uint16_t ddb_link; 6961 6962 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6963 6964 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6965 if (ddb_idx->fw_ddb_idx == ddb_link) { 6966 DEBUG2(ql4_printk(KERN_INFO, ha, 6967 "Updating NT parent idx from [%d] to [%d]\n", 6968 ddb_link, ddb_idx->flash_ddb_idx)); 6969 fw_ddb_entry->ddb_link = 6970 cpu_to_le16(ddb_idx->flash_ddb_idx); 6971 return; 6972 } 6973 } 6974 } 6975 6976 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 6977 struct list_head *list_nt, 6978 struct list_head *list_st, 6979 int is_reset) 6980 { 6981 struct dev_db_entry *fw_ddb_entry; 6982 struct ddb_entry *ddb_entry = NULL; 6983 dma_addr_t fw_ddb_dma; 6984 int max_ddbs; 6985 int fw_idx_size; 6986 int ret; 6987 uint32_t idx = 0, next_idx = 0; 6988 uint32_t state = 0, conn_err = 0; 6989 uint32_t ddb_idx = -1; 6990 uint16_t conn_id = 0; 6991 uint16_t ddb_link = -1; 6992 struct qla_ddb_index *nt_ddb_idx; 6993 6994 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6995 &fw_ddb_dma); 6996 if (fw_ddb_entry == NULL) { 6997 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6998 goto exit_nt_list; 6999 } 7000 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7001 MAX_DEV_DB_ENTRIES; 7002 fw_idx_size = sizeof(struct qla_ddb_index); 7003 7004 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7005 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7006 NULL, &next_idx, &state, 7007 &conn_err, NULL, &conn_id); 7008 if (ret == QLA_ERROR) 7009 break; 7010 7011 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7012 goto continue_next_nt; 7013 7014 /* Check if NT, then add to list it */ 7015 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7016 goto continue_next_nt; 7017 7018 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7019 if (ddb_link < max_ddbs) 7020 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7021 7022 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7023 state == DDB_DS_SESSION_FAILED) && 7024 (is_reset == INIT_ADAPTER)) 7025 goto continue_next_nt; 7026 7027 DEBUG2(ql4_printk(KERN_INFO, ha, 7028 "Adding DDB to session = 0x%x\n", idx)); 7029 7030 if (is_reset == INIT_ADAPTER) { 7031 nt_ddb_idx = vmalloc(fw_idx_size); 7032 if (!nt_ddb_idx) 7033 break; 7034 7035 nt_ddb_idx->fw_ddb_idx = idx; 7036 7037 /* Copy original isid as it may get updated in function 7038 * qla4xxx_update_isid(). We need original isid in 7039 * function qla4xxx_compare_tuple_ddb to find duplicate 7040 * target */ 7041 memcpy(&nt_ddb_idx->flash_isid[0], 7042 &fw_ddb_entry->isid[0], 7043 sizeof(nt_ddb_idx->flash_isid)); 7044 7045 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7046 fw_ddb_entry); 7047 if (ret == QLA_SUCCESS) { 7048 /* free nt_ddb_idx and do not add to list_nt */ 7049 vfree(nt_ddb_idx); 7050 goto continue_next_nt; 7051 } 7052 7053 /* Copy updated isid */ 7054 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7055 sizeof(struct dev_db_entry)); 7056 7057 list_add_tail(&nt_ddb_idx->list, list_nt); 7058 } else if (is_reset == RESET_ADAPTER) { 7059 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7060 &ddb_idx); 7061 if (ret == QLA_SUCCESS) { 7062 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7063 ddb_idx); 7064 if (ddb_entry != NULL) 7065 qla4xxx_update_sess_disc_idx(ha, 7066 ddb_entry, 7067 fw_ddb_entry); 7068 goto continue_next_nt; 7069 } 7070 } 7071 7072 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7073 if (ret == QLA_ERROR) 7074 goto exit_nt_list; 7075 7076 continue_next_nt: 7077 if (next_idx == 0) 7078 break; 7079 } 7080 7081 exit_nt_list: 7082 if (fw_ddb_entry) 7083 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7084 } 7085 7086 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7087 struct list_head *list_nt, 7088 uint16_t target_id) 7089 { 7090 struct dev_db_entry *fw_ddb_entry; 7091 dma_addr_t fw_ddb_dma; 7092 int max_ddbs; 7093 int fw_idx_size; 7094 int ret; 7095 uint32_t idx = 0, next_idx = 0; 7096 uint32_t state = 0, conn_err = 0; 7097 uint16_t conn_id = 0; 7098 struct qla_ddb_index *nt_ddb_idx; 7099 7100 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7101 &fw_ddb_dma); 7102 if (fw_ddb_entry == NULL) { 7103 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7104 goto exit_new_nt_list; 7105 } 7106 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7107 MAX_DEV_DB_ENTRIES; 7108 fw_idx_size = sizeof(struct qla_ddb_index); 7109 7110 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7111 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7112 NULL, &next_idx, &state, 7113 &conn_err, NULL, &conn_id); 7114 if (ret == QLA_ERROR) 7115 break; 7116 7117 /* Check if NT, then add it to list */ 7118 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7119 goto continue_next_new_nt; 7120 7121 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7122 goto continue_next_new_nt; 7123 7124 DEBUG2(ql4_printk(KERN_INFO, ha, 7125 "Adding DDB to session = 0x%x\n", idx)); 7126 7127 nt_ddb_idx = vmalloc(fw_idx_size); 7128 if (!nt_ddb_idx) 7129 break; 7130 7131 nt_ddb_idx->fw_ddb_idx = idx; 7132 7133 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7134 if (ret == QLA_SUCCESS) { 7135 /* free nt_ddb_idx and do not add to list_nt */ 7136 vfree(nt_ddb_idx); 7137 goto continue_next_new_nt; 7138 } 7139 7140 if (target_id < max_ddbs) 7141 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7142 7143 list_add_tail(&nt_ddb_idx->list, list_nt); 7144 7145 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7146 idx); 7147 if (ret == QLA_ERROR) 7148 goto exit_new_nt_list; 7149 7150 continue_next_new_nt: 7151 if (next_idx == 0) 7152 break; 7153 } 7154 7155 exit_new_nt_list: 7156 if (fw_ddb_entry) 7157 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7158 } 7159 7160 /** 7161 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7162 * @dev: dev associated with the sysfs entry 7163 * @data: pointer to flashnode session object 7164 * 7165 * Returns: 7166 * 1: if flashnode entry is non-persistent 7167 * 0: if flashnode entry is persistent 7168 **/ 7169 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7170 { 7171 struct iscsi_bus_flash_session *fnode_sess; 7172 7173 if (!iscsi_flashnode_bus_match(dev, NULL)) 7174 return 0; 7175 7176 fnode_sess = iscsi_dev_to_flash_session(dev); 7177 7178 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7179 } 7180 7181 /** 7182 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7183 * @ha: pointer to host 7184 * @fw_ddb_entry: flash ddb data 7185 * @idx: target index 7186 * @user: if set then this call is made from userland else from kernel 7187 * 7188 * Returns: 7189 * On sucess: QLA_SUCCESS 7190 * On failure: QLA_ERROR 7191 * 7192 * This create separate sysfs entries for session and connection attributes of 7193 * the given fw ddb entry. 7194 * If this is invoked as a result of a userspace call then the entry is marked 7195 * as nonpersistent using flash_state field. 7196 **/ 7197 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7198 struct dev_db_entry *fw_ddb_entry, 7199 uint16_t *idx, int user) 7200 { 7201 struct iscsi_bus_flash_session *fnode_sess = NULL; 7202 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7203 int rc = QLA_ERROR; 7204 7205 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7206 &qla4xxx_iscsi_transport, 0); 7207 if (!fnode_sess) { 7208 ql4_printk(KERN_ERR, ha, 7209 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7210 __func__, *idx, ha->host_no); 7211 goto exit_tgt_create; 7212 } 7213 7214 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7215 &qla4xxx_iscsi_transport, 0); 7216 if (!fnode_conn) { 7217 ql4_printk(KERN_ERR, ha, 7218 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7219 __func__, *idx, ha->host_no); 7220 goto free_sess; 7221 } 7222 7223 if (user) { 7224 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7225 } else { 7226 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7227 7228 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7229 fnode_sess->is_boot_target = 1; 7230 else 7231 fnode_sess->is_boot_target = 0; 7232 } 7233 7234 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7235 fw_ddb_entry); 7236 7237 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7238 __func__, fnode_sess->dev.kobj.name); 7239 7240 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7241 __func__, fnode_conn->dev.kobj.name); 7242 7243 return QLA_SUCCESS; 7244 7245 free_sess: 7246 iscsi_destroy_flashnode_sess(fnode_sess); 7247 7248 exit_tgt_create: 7249 return QLA_ERROR; 7250 } 7251 7252 /** 7253 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7254 * @shost: pointer to host 7255 * @buf: type of ddb entry (ipv4/ipv6) 7256 * @len: length of buf 7257 * 7258 * This creates new ddb entry in the flash by finding first free index and 7259 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7260 **/ 7261 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7262 int len) 7263 { 7264 struct scsi_qla_host *ha = to_qla_host(shost); 7265 struct dev_db_entry *fw_ddb_entry = NULL; 7266 dma_addr_t fw_ddb_entry_dma; 7267 struct device *dev; 7268 uint16_t idx = 0; 7269 uint16_t max_ddbs = 0; 7270 uint32_t options = 0; 7271 uint32_t rval = QLA_ERROR; 7272 7273 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7274 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7275 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7276 __func__)); 7277 goto exit_ddb_add; 7278 } 7279 7280 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7281 MAX_DEV_DB_ENTRIES; 7282 7283 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7284 &fw_ddb_entry_dma, GFP_KERNEL); 7285 if (!fw_ddb_entry) { 7286 DEBUG2(ql4_printk(KERN_ERR, ha, 7287 "%s: Unable to allocate dma buffer\n", 7288 __func__)); 7289 goto exit_ddb_add; 7290 } 7291 7292 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7293 qla4xxx_sysfs_ddb_is_non_persistent); 7294 if (dev) { 7295 ql4_printk(KERN_ERR, ha, 7296 "%s: A non-persistent entry %s found\n", 7297 __func__, dev->kobj.name); 7298 put_device(dev); 7299 goto exit_ddb_add; 7300 } 7301 7302 /* Index 0 and 1 are reserved for boot target entries */ 7303 for (idx = 2; idx < max_ddbs; idx++) { 7304 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7305 fw_ddb_entry_dma, idx)) 7306 break; 7307 } 7308 7309 if (idx == max_ddbs) 7310 goto exit_ddb_add; 7311 7312 if (!strncasecmp("ipv6", buf, 4)) 7313 options |= IPV6_DEFAULT_DDB_ENTRY; 7314 7315 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7316 if (rval == QLA_ERROR) 7317 goto exit_ddb_add; 7318 7319 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7320 7321 exit_ddb_add: 7322 if (fw_ddb_entry) 7323 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7324 fw_ddb_entry, fw_ddb_entry_dma); 7325 if (rval == QLA_SUCCESS) 7326 return idx; 7327 else 7328 return -EIO; 7329 } 7330 7331 /** 7332 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7333 * @fnode_sess: pointer to session attrs of flash ddb entry 7334 * @fnode_conn: pointer to connection attrs of flash ddb entry 7335 * 7336 * This writes the contents of target ddb buffer to Flash with a valid cookie 7337 * value in order to make the ddb entry persistent. 7338 **/ 7339 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7340 struct iscsi_bus_flash_conn *fnode_conn) 7341 { 7342 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7343 struct scsi_qla_host *ha = to_qla_host(shost); 7344 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7345 struct dev_db_entry *fw_ddb_entry = NULL; 7346 dma_addr_t fw_ddb_entry_dma; 7347 uint32_t options = 0; 7348 int rval = 0; 7349 7350 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7351 &fw_ddb_entry_dma, GFP_KERNEL); 7352 if (!fw_ddb_entry) { 7353 DEBUG2(ql4_printk(KERN_ERR, ha, 7354 "%s: Unable to allocate dma buffer\n", 7355 __func__)); 7356 rval = -ENOMEM; 7357 goto exit_ddb_apply; 7358 } 7359 7360 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7361 options |= IPV6_DEFAULT_DDB_ENTRY; 7362 7363 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7364 if (rval == QLA_ERROR) 7365 goto exit_ddb_apply; 7366 7367 dev_db_start_offset += (fnode_sess->target_id * 7368 sizeof(*fw_ddb_entry)); 7369 7370 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7371 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7372 7373 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7374 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7375 7376 if (rval == QLA_SUCCESS) { 7377 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7378 ql4_printk(KERN_INFO, ha, 7379 "%s: flash node %u of host %lu written to flash\n", 7380 __func__, fnode_sess->target_id, ha->host_no); 7381 } else { 7382 rval = -EIO; 7383 ql4_printk(KERN_ERR, ha, 7384 "%s: Error while writing flash node %u of host %lu to flash\n", 7385 __func__, fnode_sess->target_id, ha->host_no); 7386 } 7387 7388 exit_ddb_apply: 7389 if (fw_ddb_entry) 7390 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7391 fw_ddb_entry, fw_ddb_entry_dma); 7392 return rval; 7393 } 7394 7395 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7396 struct dev_db_entry *fw_ddb_entry, 7397 uint16_t idx) 7398 { 7399 struct dev_db_entry *ddb_entry = NULL; 7400 dma_addr_t ddb_entry_dma; 7401 unsigned long wtime; 7402 uint32_t mbx_sts = 0; 7403 uint32_t state = 0, conn_err = 0; 7404 uint16_t tmo = 0; 7405 int ret = 0; 7406 7407 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7408 &ddb_entry_dma, GFP_KERNEL); 7409 if (!ddb_entry) { 7410 DEBUG2(ql4_printk(KERN_ERR, ha, 7411 "%s: Unable to allocate dma buffer\n", 7412 __func__)); 7413 return QLA_ERROR; 7414 } 7415 7416 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7417 7418 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7419 if (ret != QLA_SUCCESS) { 7420 DEBUG2(ql4_printk(KERN_ERR, ha, 7421 "%s: Unable to set ddb entry for index %d\n", 7422 __func__, idx)); 7423 goto exit_ddb_conn_open; 7424 } 7425 7426 qla4xxx_conn_open(ha, idx); 7427 7428 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7429 tmo = ((ha->def_timeout > LOGIN_TOV) && 7430 (ha->def_timeout < LOGIN_TOV * 10) ? 7431 ha->def_timeout : LOGIN_TOV); 7432 7433 DEBUG2(ql4_printk(KERN_INFO, ha, 7434 "Default time to wait for login to ddb %d\n", tmo)); 7435 7436 wtime = jiffies + (HZ * tmo); 7437 do { 7438 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7439 NULL, &state, &conn_err, NULL, 7440 NULL); 7441 if (ret == QLA_ERROR) 7442 continue; 7443 7444 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7445 state == DDB_DS_SESSION_FAILED) 7446 break; 7447 7448 schedule_timeout_uninterruptible(HZ / 10); 7449 } while (time_after(wtime, jiffies)); 7450 7451 exit_ddb_conn_open: 7452 if (ddb_entry) 7453 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7454 ddb_entry, ddb_entry_dma); 7455 return ret; 7456 } 7457 7458 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7459 struct dev_db_entry *fw_ddb_entry, 7460 uint16_t target_id) 7461 { 7462 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7463 struct list_head list_nt; 7464 uint16_t ddb_index; 7465 int ret = 0; 7466 7467 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7468 ql4_printk(KERN_WARNING, ha, 7469 "%s: A discovery already in progress!\n", __func__); 7470 return QLA_ERROR; 7471 } 7472 7473 INIT_LIST_HEAD(&list_nt); 7474 7475 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7476 7477 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7478 if (ret == QLA_ERROR) 7479 goto exit_login_st_clr_bit; 7480 7481 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7482 if (ret == QLA_ERROR) 7483 goto exit_login_st; 7484 7485 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7486 7487 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7488 list_del_init(&ddb_idx->list); 7489 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7490 vfree(ddb_idx); 7491 } 7492 7493 exit_login_st: 7494 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7495 ql4_printk(KERN_ERR, ha, 7496 "Unable to clear DDB index = 0x%x\n", ddb_index); 7497 } 7498 7499 clear_bit(ddb_index, ha->ddb_idx_map); 7500 7501 exit_login_st_clr_bit: 7502 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7503 return ret; 7504 } 7505 7506 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7507 struct dev_db_entry *fw_ddb_entry, 7508 uint16_t idx) 7509 { 7510 int ret = QLA_ERROR; 7511 7512 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7513 if (ret != QLA_SUCCESS) 7514 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7515 idx); 7516 else 7517 ret = -EPERM; 7518 7519 return ret; 7520 } 7521 7522 /** 7523 * qla4xxx_sysfs_ddb_login - Login to the specified target 7524 * @fnode_sess: pointer to session attrs of flash ddb entry 7525 * @fnode_conn: pointer to connection attrs of flash ddb entry 7526 * 7527 * This logs in to the specified target 7528 **/ 7529 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7530 struct iscsi_bus_flash_conn *fnode_conn) 7531 { 7532 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7533 struct scsi_qla_host *ha = to_qla_host(shost); 7534 struct dev_db_entry *fw_ddb_entry = NULL; 7535 dma_addr_t fw_ddb_entry_dma; 7536 uint32_t options = 0; 7537 int ret = 0; 7538 7539 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7540 ql4_printk(KERN_ERR, ha, 7541 "%s: Target info is not persistent\n", __func__); 7542 ret = -EIO; 7543 goto exit_ddb_login; 7544 } 7545 7546 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7547 &fw_ddb_entry_dma, GFP_KERNEL); 7548 if (!fw_ddb_entry) { 7549 DEBUG2(ql4_printk(KERN_ERR, ha, 7550 "%s: Unable to allocate dma buffer\n", 7551 __func__)); 7552 ret = -ENOMEM; 7553 goto exit_ddb_login; 7554 } 7555 7556 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7557 options |= IPV6_DEFAULT_DDB_ENTRY; 7558 7559 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7560 if (ret == QLA_ERROR) 7561 goto exit_ddb_login; 7562 7563 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7564 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7565 7566 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7567 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7568 fnode_sess->target_id); 7569 else 7570 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7571 fnode_sess->target_id); 7572 7573 if (ret > 0) 7574 ret = -EIO; 7575 7576 exit_ddb_login: 7577 if (fw_ddb_entry) 7578 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7579 fw_ddb_entry, fw_ddb_entry_dma); 7580 return ret; 7581 } 7582 7583 /** 7584 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7585 * @cls_sess: pointer to session to be logged out 7586 * 7587 * This performs session log out from the specified target 7588 **/ 7589 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7590 { 7591 struct iscsi_session *sess; 7592 struct ddb_entry *ddb_entry = NULL; 7593 struct scsi_qla_host *ha; 7594 struct dev_db_entry *fw_ddb_entry = NULL; 7595 dma_addr_t fw_ddb_entry_dma; 7596 unsigned long flags; 7597 unsigned long wtime; 7598 uint32_t ddb_state; 7599 int options; 7600 int ret = 0; 7601 7602 sess = cls_sess->dd_data; 7603 ddb_entry = sess->dd_data; 7604 ha = ddb_entry->ha; 7605 7606 if (ddb_entry->ddb_type != FLASH_DDB) { 7607 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7608 __func__); 7609 ret = -ENXIO; 7610 goto exit_ddb_logout; 7611 } 7612 7613 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7614 ql4_printk(KERN_ERR, ha, 7615 "%s: Logout from boot target entry is not permitted.\n", 7616 __func__); 7617 ret = -EPERM; 7618 goto exit_ddb_logout; 7619 } 7620 7621 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7622 &fw_ddb_entry_dma, GFP_KERNEL); 7623 if (!fw_ddb_entry) { 7624 ql4_printk(KERN_ERR, ha, 7625 "%s: Unable to allocate dma buffer\n", __func__); 7626 ret = -ENOMEM; 7627 goto exit_ddb_logout; 7628 } 7629 7630 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7631 goto ddb_logout_init; 7632 7633 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7634 fw_ddb_entry, fw_ddb_entry_dma, 7635 NULL, NULL, &ddb_state, NULL, 7636 NULL, NULL); 7637 if (ret == QLA_ERROR) 7638 goto ddb_logout_init; 7639 7640 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7641 goto ddb_logout_init; 7642 7643 /* wait until next relogin is triggered using DF_RELOGIN and 7644 * clear DF_RELOGIN to avoid invocation of further relogin 7645 */ 7646 wtime = jiffies + (HZ * RELOGIN_TOV); 7647 do { 7648 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7649 goto ddb_logout_init; 7650 7651 schedule_timeout_uninterruptible(HZ); 7652 } while ((time_after(wtime, jiffies))); 7653 7654 ddb_logout_init: 7655 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7656 atomic_set(&ddb_entry->relogin_timer, 0); 7657 7658 options = LOGOUT_OPTION_CLOSE_SESSION; 7659 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7660 7661 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7662 wtime = jiffies + (HZ * LOGOUT_TOV); 7663 do { 7664 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7665 fw_ddb_entry, fw_ddb_entry_dma, 7666 NULL, NULL, &ddb_state, NULL, 7667 NULL, NULL); 7668 if (ret == QLA_ERROR) 7669 goto ddb_logout_clr_sess; 7670 7671 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7672 (ddb_state == DDB_DS_SESSION_FAILED)) 7673 goto ddb_logout_clr_sess; 7674 7675 schedule_timeout_uninterruptible(HZ); 7676 } while ((time_after(wtime, jiffies))); 7677 7678 ddb_logout_clr_sess: 7679 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7680 /* 7681 * we have decremented the reference count of the driver 7682 * when we setup the session to have the driver unload 7683 * to be seamless without actually destroying the 7684 * session 7685 **/ 7686 try_module_get(qla4xxx_iscsi_transport.owner); 7687 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7688 7689 spin_lock_irqsave(&ha->hardware_lock, flags); 7690 qla4xxx_free_ddb(ha, ddb_entry); 7691 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7692 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7693 7694 iscsi_session_teardown(ddb_entry->sess); 7695 7696 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7697 ret = QLA_SUCCESS; 7698 7699 exit_ddb_logout: 7700 if (fw_ddb_entry) 7701 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7702 fw_ddb_entry, fw_ddb_entry_dma); 7703 return ret; 7704 } 7705 7706 /** 7707 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7708 * @fnode_sess: pointer to session attrs of flash ddb entry 7709 * @fnode_conn: pointer to connection attrs of flash ddb entry 7710 * 7711 * This performs log out from the specified target 7712 **/ 7713 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7714 struct iscsi_bus_flash_conn *fnode_conn) 7715 { 7716 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7717 struct scsi_qla_host *ha = to_qla_host(shost); 7718 struct ql4_tuple_ddb *flash_tddb = NULL; 7719 struct ql4_tuple_ddb *tmp_tddb = NULL; 7720 struct dev_db_entry *fw_ddb_entry = NULL; 7721 struct ddb_entry *ddb_entry = NULL; 7722 dma_addr_t fw_ddb_dma; 7723 uint32_t next_idx = 0; 7724 uint32_t state = 0, conn_err = 0; 7725 uint16_t conn_id = 0; 7726 int idx, index; 7727 int status, ret = 0; 7728 7729 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7730 &fw_ddb_dma); 7731 if (fw_ddb_entry == NULL) { 7732 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7733 ret = -ENOMEM; 7734 goto exit_ddb_logout; 7735 } 7736 7737 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7738 if (!flash_tddb) { 7739 ql4_printk(KERN_WARNING, ha, 7740 "%s:Memory Allocation failed.\n", __func__); 7741 ret = -ENOMEM; 7742 goto exit_ddb_logout; 7743 } 7744 7745 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7746 if (!tmp_tddb) { 7747 ql4_printk(KERN_WARNING, ha, 7748 "%s:Memory Allocation failed.\n", __func__); 7749 ret = -ENOMEM; 7750 goto exit_ddb_logout; 7751 } 7752 7753 if (!fnode_sess->targetname) { 7754 ql4_printk(KERN_ERR, ha, 7755 "%s:Cannot logout from SendTarget entry\n", 7756 __func__); 7757 ret = -EPERM; 7758 goto exit_ddb_logout; 7759 } 7760 7761 if (fnode_sess->is_boot_target) { 7762 ql4_printk(KERN_ERR, ha, 7763 "%s: Logout from boot target entry is not permitted.\n", 7764 __func__); 7765 ret = -EPERM; 7766 goto exit_ddb_logout; 7767 } 7768 7769 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7770 ISCSI_NAME_SIZE); 7771 7772 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7773 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7774 else 7775 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7776 7777 flash_tddb->tpgt = fnode_sess->tpgt; 7778 flash_tddb->port = fnode_conn->port; 7779 7780 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7781 7782 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7783 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7784 if (ddb_entry == NULL) 7785 continue; 7786 7787 if (ddb_entry->ddb_type != FLASH_DDB) 7788 continue; 7789 7790 index = ddb_entry->sess->target_id; 7791 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7792 fw_ddb_dma, NULL, &next_idx, 7793 &state, &conn_err, NULL, 7794 &conn_id); 7795 if (status == QLA_ERROR) { 7796 ret = -ENOMEM; 7797 break; 7798 } 7799 7800 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7801 7802 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7803 true); 7804 if (status == QLA_SUCCESS) { 7805 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7806 break; 7807 } 7808 } 7809 7810 if (idx == MAX_DDB_ENTRIES) 7811 ret = -ESRCH; 7812 7813 exit_ddb_logout: 7814 if (flash_tddb) 7815 vfree(flash_tddb); 7816 if (tmp_tddb) 7817 vfree(tmp_tddb); 7818 if (fw_ddb_entry) 7819 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7820 7821 return ret; 7822 } 7823 7824 static int 7825 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7826 int param, char *buf) 7827 { 7828 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7829 struct scsi_qla_host *ha = to_qla_host(shost); 7830 struct iscsi_bus_flash_conn *fnode_conn; 7831 struct ql4_chap_table chap_tbl; 7832 struct device *dev; 7833 int parent_type; 7834 int rc = 0; 7835 7836 dev = iscsi_find_flashnode_conn(fnode_sess); 7837 if (!dev) 7838 return -EIO; 7839 7840 fnode_conn = iscsi_dev_to_flash_conn(dev); 7841 7842 switch (param) { 7843 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7844 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7845 break; 7846 case ISCSI_FLASHNODE_PORTAL_TYPE: 7847 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7848 break; 7849 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7850 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7851 break; 7852 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7853 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7854 break; 7855 case ISCSI_FLASHNODE_ENTRY_EN: 7856 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7857 break; 7858 case ISCSI_FLASHNODE_HDR_DGST_EN: 7859 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7860 break; 7861 case ISCSI_FLASHNODE_DATA_DGST_EN: 7862 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7863 break; 7864 case ISCSI_FLASHNODE_IMM_DATA_EN: 7865 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7866 break; 7867 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7868 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7869 break; 7870 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7871 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7872 break; 7873 case ISCSI_FLASHNODE_PDU_INORDER: 7874 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7875 break; 7876 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7877 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7878 break; 7879 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7880 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7881 break; 7882 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7883 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7884 break; 7885 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7886 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7887 break; 7888 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7889 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7890 break; 7891 case ISCSI_FLASHNODE_ERL: 7892 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7893 break; 7894 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7895 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7896 break; 7897 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7898 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7899 break; 7900 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7901 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7902 break; 7903 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7904 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7905 break; 7906 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7907 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7908 break; 7909 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7910 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7911 break; 7912 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7913 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7914 break; 7915 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7916 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7917 break; 7918 case ISCSI_FLASHNODE_FIRST_BURST: 7919 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7920 break; 7921 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7922 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7923 break; 7924 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7925 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7926 break; 7927 case ISCSI_FLASHNODE_MAX_R2T: 7928 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7929 break; 7930 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7931 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7932 break; 7933 case ISCSI_FLASHNODE_ISID: 7934 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", 7935 fnode_sess->isid[0], fnode_sess->isid[1], 7936 fnode_sess->isid[2], fnode_sess->isid[3], 7937 fnode_sess->isid[4], fnode_sess->isid[5]); 7938 break; 7939 case ISCSI_FLASHNODE_TSID: 7940 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7941 break; 7942 case ISCSI_FLASHNODE_PORT: 7943 rc = sprintf(buf, "%d\n", fnode_conn->port); 7944 break; 7945 case ISCSI_FLASHNODE_MAX_BURST: 7946 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7947 break; 7948 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7949 rc = sprintf(buf, "%u\n", 7950 fnode_sess->default_taskmgmt_timeout); 7951 break; 7952 case ISCSI_FLASHNODE_IPADDR: 7953 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7954 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7955 else 7956 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7957 break; 7958 case ISCSI_FLASHNODE_ALIAS: 7959 if (fnode_sess->targetalias) 7960 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7961 else 7962 rc = sprintf(buf, "\n"); 7963 break; 7964 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7965 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7966 rc = sprintf(buf, "%pI6\n", 7967 fnode_conn->redirect_ipaddr); 7968 else 7969 rc = sprintf(buf, "%pI4\n", 7970 fnode_conn->redirect_ipaddr); 7971 break; 7972 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7973 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7974 break; 7975 case ISCSI_FLASHNODE_LOCAL_PORT: 7976 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 7977 break; 7978 case ISCSI_FLASHNODE_IPV4_TOS: 7979 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 7980 break; 7981 case ISCSI_FLASHNODE_IPV6_TC: 7982 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7983 rc = sprintf(buf, "%u\n", 7984 fnode_conn->ipv6_traffic_class); 7985 else 7986 rc = sprintf(buf, "\n"); 7987 break; 7988 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 7989 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 7990 break; 7991 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 7992 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7993 rc = sprintf(buf, "%pI6\n", 7994 fnode_conn->link_local_ipv6_addr); 7995 else 7996 rc = sprintf(buf, "\n"); 7997 break; 7998 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 7999 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 8000 break; 8001 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 8002 if (fnode_sess->discovery_parent_type == DDB_ISNS) 8003 parent_type = ISCSI_DISC_PARENT_ISNS; 8004 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 8005 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8006 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8007 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8008 else 8009 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8010 8011 rc = sprintf(buf, "%s\n", 8012 iscsi_get_discovery_parent_name(parent_type)); 8013 break; 8014 case ISCSI_FLASHNODE_NAME: 8015 if (fnode_sess->targetname) 8016 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8017 else 8018 rc = sprintf(buf, "\n"); 8019 break; 8020 case ISCSI_FLASHNODE_TPGT: 8021 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8022 break; 8023 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8024 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8025 break; 8026 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8027 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8028 break; 8029 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8030 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8031 break; 8032 case ISCSI_FLASHNODE_USERNAME: 8033 if (fnode_sess->chap_auth_en) { 8034 qla4xxx_get_uni_chap_at_index(ha, 8035 chap_tbl.name, 8036 chap_tbl.secret, 8037 fnode_sess->chap_out_idx); 8038 rc = sprintf(buf, "%s\n", chap_tbl.name); 8039 } else { 8040 rc = sprintf(buf, "\n"); 8041 } 8042 break; 8043 case ISCSI_FLASHNODE_PASSWORD: 8044 if (fnode_sess->chap_auth_en) { 8045 qla4xxx_get_uni_chap_at_index(ha, 8046 chap_tbl.name, 8047 chap_tbl.secret, 8048 fnode_sess->chap_out_idx); 8049 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8050 } else { 8051 rc = sprintf(buf, "\n"); 8052 } 8053 break; 8054 case ISCSI_FLASHNODE_STATSN: 8055 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8056 break; 8057 case ISCSI_FLASHNODE_EXP_STATSN: 8058 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8059 break; 8060 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8061 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8062 break; 8063 default: 8064 rc = -ENOSYS; 8065 break; 8066 } 8067 8068 put_device(dev); 8069 return rc; 8070 } 8071 8072 /** 8073 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8074 * @fnode_sess: pointer to session attrs of flash ddb entry 8075 * @fnode_conn: pointer to connection attrs of flash ddb entry 8076 * @data: Parameters and their values to update 8077 * @len: len of data 8078 * 8079 * This sets the parameter of flash ddb entry and writes them to flash 8080 **/ 8081 static int 8082 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8083 struct iscsi_bus_flash_conn *fnode_conn, 8084 void *data, int len) 8085 { 8086 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8087 struct scsi_qla_host *ha = to_qla_host(shost); 8088 struct iscsi_flashnode_param_info *fnode_param; 8089 struct ql4_chap_table chap_tbl; 8090 struct nlattr *attr; 8091 uint16_t chap_out_idx = INVALID_ENTRY; 8092 int rc = QLA_ERROR; 8093 uint32_t rem = len; 8094 8095 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8096 nla_for_each_attr(attr, data, len, rem) { 8097 fnode_param = nla_data(attr); 8098 8099 switch (fnode_param->param) { 8100 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8101 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8102 break; 8103 case ISCSI_FLASHNODE_PORTAL_TYPE: 8104 memcpy(fnode_sess->portal_type, fnode_param->value, 8105 strlen(fnode_sess->portal_type)); 8106 break; 8107 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8108 fnode_sess->auto_snd_tgt_disable = 8109 fnode_param->value[0]; 8110 break; 8111 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8112 fnode_sess->discovery_sess = fnode_param->value[0]; 8113 break; 8114 case ISCSI_FLASHNODE_ENTRY_EN: 8115 fnode_sess->entry_state = fnode_param->value[0]; 8116 break; 8117 case ISCSI_FLASHNODE_HDR_DGST_EN: 8118 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8119 break; 8120 case ISCSI_FLASHNODE_DATA_DGST_EN: 8121 fnode_conn->datadgst_en = fnode_param->value[0]; 8122 break; 8123 case ISCSI_FLASHNODE_IMM_DATA_EN: 8124 fnode_sess->imm_data_en = fnode_param->value[0]; 8125 break; 8126 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8127 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8128 break; 8129 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8130 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8131 break; 8132 case ISCSI_FLASHNODE_PDU_INORDER: 8133 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8134 break; 8135 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8136 fnode_sess->chap_auth_en = fnode_param->value[0]; 8137 /* Invalidate chap index if chap auth is disabled */ 8138 if (!fnode_sess->chap_auth_en) 8139 fnode_sess->chap_out_idx = INVALID_ENTRY; 8140 8141 break; 8142 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8143 fnode_conn->snack_req_en = fnode_param->value[0]; 8144 break; 8145 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8146 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8147 break; 8148 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8149 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8150 break; 8151 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8152 fnode_sess->discovery_auth_optional = 8153 fnode_param->value[0]; 8154 break; 8155 case ISCSI_FLASHNODE_ERL: 8156 fnode_sess->erl = fnode_param->value[0]; 8157 break; 8158 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8159 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8160 break; 8161 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8162 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8163 break; 8164 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8165 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8166 break; 8167 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8168 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8169 break; 8170 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8171 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8172 break; 8173 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8174 fnode_conn->fragment_disable = fnode_param->value[0]; 8175 break; 8176 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8177 fnode_conn->max_recv_dlength = 8178 *(unsigned *)fnode_param->value; 8179 break; 8180 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8181 fnode_conn->max_xmit_dlength = 8182 *(unsigned *)fnode_param->value; 8183 break; 8184 case ISCSI_FLASHNODE_FIRST_BURST: 8185 fnode_sess->first_burst = 8186 *(unsigned *)fnode_param->value; 8187 break; 8188 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8189 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8190 break; 8191 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8192 fnode_sess->time2retain = 8193 *(uint16_t *)fnode_param->value; 8194 break; 8195 case ISCSI_FLASHNODE_MAX_R2T: 8196 fnode_sess->max_r2t = 8197 *(uint16_t *)fnode_param->value; 8198 break; 8199 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8200 fnode_conn->keepalive_timeout = 8201 *(uint16_t *)fnode_param->value; 8202 break; 8203 case ISCSI_FLASHNODE_ISID: 8204 memcpy(fnode_sess->isid, fnode_param->value, 8205 sizeof(fnode_sess->isid)); 8206 break; 8207 case ISCSI_FLASHNODE_TSID: 8208 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8209 break; 8210 case ISCSI_FLASHNODE_PORT: 8211 fnode_conn->port = *(uint16_t *)fnode_param->value; 8212 break; 8213 case ISCSI_FLASHNODE_MAX_BURST: 8214 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8215 break; 8216 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8217 fnode_sess->default_taskmgmt_timeout = 8218 *(uint16_t *)fnode_param->value; 8219 break; 8220 case ISCSI_FLASHNODE_IPADDR: 8221 memcpy(fnode_conn->ipaddress, fnode_param->value, 8222 IPv6_ADDR_LEN); 8223 break; 8224 case ISCSI_FLASHNODE_ALIAS: 8225 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8226 (char *)fnode_param->value); 8227 break; 8228 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8229 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8230 IPv6_ADDR_LEN); 8231 break; 8232 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8233 fnode_conn->max_segment_size = 8234 *(unsigned *)fnode_param->value; 8235 break; 8236 case ISCSI_FLASHNODE_LOCAL_PORT: 8237 fnode_conn->local_port = 8238 *(uint16_t *)fnode_param->value; 8239 break; 8240 case ISCSI_FLASHNODE_IPV4_TOS: 8241 fnode_conn->ipv4_tos = fnode_param->value[0]; 8242 break; 8243 case ISCSI_FLASHNODE_IPV6_TC: 8244 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8245 break; 8246 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8247 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8248 break; 8249 case ISCSI_FLASHNODE_NAME: 8250 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8251 (char *)fnode_param->value); 8252 break; 8253 case ISCSI_FLASHNODE_TPGT: 8254 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8255 break; 8256 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8257 memcpy(fnode_conn->link_local_ipv6_addr, 8258 fnode_param->value, IPv6_ADDR_LEN); 8259 break; 8260 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8261 fnode_sess->discovery_parent_idx = 8262 *(uint16_t *)fnode_param->value; 8263 break; 8264 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8265 fnode_conn->tcp_xmit_wsf = 8266 *(uint8_t *)fnode_param->value; 8267 break; 8268 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8269 fnode_conn->tcp_recv_wsf = 8270 *(uint8_t *)fnode_param->value; 8271 break; 8272 case ISCSI_FLASHNODE_STATSN: 8273 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8274 break; 8275 case ISCSI_FLASHNODE_EXP_STATSN: 8276 fnode_conn->exp_statsn = 8277 *(uint32_t *)fnode_param->value; 8278 break; 8279 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8280 chap_out_idx = *(uint16_t *)fnode_param->value; 8281 if (!qla4xxx_get_uni_chap_at_index(ha, 8282 chap_tbl.name, 8283 chap_tbl.secret, 8284 chap_out_idx)) { 8285 fnode_sess->chap_out_idx = chap_out_idx; 8286 /* Enable chap auth if chap index is valid */ 8287 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8288 } 8289 break; 8290 default: 8291 ql4_printk(KERN_ERR, ha, 8292 "%s: No such sysfs attribute\n", __func__); 8293 rc = -ENOSYS; 8294 goto exit_set_param; 8295 } 8296 } 8297 8298 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8299 8300 exit_set_param: 8301 return rc; 8302 } 8303 8304 /** 8305 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8306 * @fnode_sess: pointer to session attrs of flash ddb entry 8307 * 8308 * This invalidates the flash ddb entry at the given index 8309 **/ 8310 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8311 { 8312 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8313 struct scsi_qla_host *ha = to_qla_host(shost); 8314 uint32_t dev_db_start_offset; 8315 uint32_t dev_db_end_offset; 8316 struct dev_db_entry *fw_ddb_entry = NULL; 8317 dma_addr_t fw_ddb_entry_dma; 8318 uint16_t *ddb_cookie = NULL; 8319 size_t ddb_size = 0; 8320 void *pddb = NULL; 8321 int target_id; 8322 int rc = 0; 8323 8324 if (fnode_sess->is_boot_target) { 8325 rc = -EPERM; 8326 DEBUG2(ql4_printk(KERN_ERR, ha, 8327 "%s: Deletion of boot target entry is not permitted.\n", 8328 __func__)); 8329 goto exit_ddb_del; 8330 } 8331 8332 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8333 goto sysfs_ddb_del; 8334 8335 if (is_qla40XX(ha)) { 8336 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8337 dev_db_end_offset = FLASH_OFFSET_DB_END; 8338 dev_db_start_offset += (fnode_sess->target_id * 8339 sizeof(*fw_ddb_entry)); 8340 ddb_size = sizeof(*fw_ddb_entry); 8341 } else { 8342 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8343 (ha->hw.flt_region_ddb << 2); 8344 /* flt_ddb_size is DDB table size for both ports 8345 * so divide it by 2 to calculate the offset for second port 8346 */ 8347 if (ha->port_num == 1) 8348 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8349 8350 dev_db_end_offset = dev_db_start_offset + 8351 (ha->hw.flt_ddb_size / 2); 8352 8353 dev_db_start_offset += (fnode_sess->target_id * 8354 sizeof(*fw_ddb_entry)); 8355 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8356 8357 ddb_size = sizeof(*ddb_cookie); 8358 } 8359 8360 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8361 __func__, dev_db_start_offset, dev_db_end_offset)); 8362 8363 if (dev_db_start_offset > dev_db_end_offset) { 8364 rc = -EIO; 8365 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8366 __func__, fnode_sess->target_id)); 8367 goto exit_ddb_del; 8368 } 8369 8370 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8371 &fw_ddb_entry_dma, GFP_KERNEL); 8372 if (!pddb) { 8373 rc = -ENOMEM; 8374 DEBUG2(ql4_printk(KERN_ERR, ha, 8375 "%s: Unable to allocate dma buffer\n", 8376 __func__)); 8377 goto exit_ddb_del; 8378 } 8379 8380 if (is_qla40XX(ha)) { 8381 fw_ddb_entry = pddb; 8382 memset(fw_ddb_entry, 0, ddb_size); 8383 ddb_cookie = &fw_ddb_entry->cookie; 8384 } else { 8385 ddb_cookie = pddb; 8386 } 8387 8388 /* invalidate the cookie */ 8389 *ddb_cookie = 0xFFEE; 8390 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8391 ddb_size, FLASH_OPT_RMW_COMMIT); 8392 8393 sysfs_ddb_del: 8394 target_id = fnode_sess->target_id; 8395 iscsi_destroy_flashnode_sess(fnode_sess); 8396 ql4_printk(KERN_INFO, ha, 8397 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8398 __func__, target_id, ha->host_no); 8399 exit_ddb_del: 8400 if (pddb) 8401 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8402 fw_ddb_entry_dma); 8403 return rc; 8404 } 8405 8406 /** 8407 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8408 * @ha: pointer to adapter structure 8409 * 8410 * Export the firmware DDB for all send targets and normal targets to sysfs. 8411 **/ 8412 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8413 { 8414 struct dev_db_entry *fw_ddb_entry = NULL; 8415 dma_addr_t fw_ddb_entry_dma; 8416 uint16_t max_ddbs; 8417 uint16_t idx = 0; 8418 int ret = QLA_SUCCESS; 8419 8420 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8421 sizeof(*fw_ddb_entry), 8422 &fw_ddb_entry_dma, GFP_KERNEL); 8423 if (!fw_ddb_entry) { 8424 DEBUG2(ql4_printk(KERN_ERR, ha, 8425 "%s: Unable to allocate dma buffer\n", 8426 __func__)); 8427 return -ENOMEM; 8428 } 8429 8430 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8431 MAX_DEV_DB_ENTRIES; 8432 8433 for (idx = 0; idx < max_ddbs; idx++) { 8434 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8435 idx)) 8436 continue; 8437 8438 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8439 if (ret) { 8440 ret = -EIO; 8441 break; 8442 } 8443 } 8444 8445 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8446 fw_ddb_entry_dma); 8447 8448 return ret; 8449 } 8450 8451 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8452 { 8453 iscsi_destroy_all_flashnode(ha->host); 8454 } 8455 8456 /** 8457 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8458 * @ha: pointer to adapter structure 8459 * @is_reset: Is this init path or reset path 8460 * 8461 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8462 * using connection open, then create the list of normal targets (nt) 8463 * from firmware DDBs. Based on the list of nt setup session and connection 8464 * objects. 8465 **/ 8466 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8467 { 8468 uint16_t tmo = 0; 8469 struct list_head list_st, list_nt; 8470 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8471 unsigned long wtime; 8472 8473 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8474 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8475 ha->is_reset = is_reset; 8476 return; 8477 } 8478 8479 INIT_LIST_HEAD(&list_st); 8480 INIT_LIST_HEAD(&list_nt); 8481 8482 qla4xxx_build_st_list(ha, &list_st); 8483 8484 /* Before issuing conn open mbox, ensure all IPs states are configured 8485 * Note, conn open fails if IPs are not configured 8486 */ 8487 qla4xxx_wait_for_ip_configuration(ha); 8488 8489 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8490 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8491 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8492 } 8493 8494 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8495 tmo = ((ha->def_timeout > LOGIN_TOV) && 8496 (ha->def_timeout < LOGIN_TOV * 10) ? 8497 ha->def_timeout : LOGIN_TOV); 8498 8499 DEBUG2(ql4_printk(KERN_INFO, ha, 8500 "Default time to wait for build ddb %d\n", tmo)); 8501 8502 wtime = jiffies + (HZ * tmo); 8503 do { 8504 if (list_empty(&list_st)) 8505 break; 8506 8507 qla4xxx_remove_failed_ddb(ha, &list_st); 8508 schedule_timeout_uninterruptible(HZ / 10); 8509 } while (time_after(wtime, jiffies)); 8510 8511 8512 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8513 8514 qla4xxx_free_ddb_list(&list_st); 8515 qla4xxx_free_ddb_list(&list_nt); 8516 8517 qla4xxx_free_ddb_index(ha); 8518 } 8519 8520 /** 8521 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8522 * response. 8523 * @ha: pointer to adapter structure 8524 * 8525 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8526 * set in DDB and we will wait for login response of boot targets during 8527 * probe. 8528 **/ 8529 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8530 { 8531 struct ddb_entry *ddb_entry; 8532 struct dev_db_entry *fw_ddb_entry = NULL; 8533 dma_addr_t fw_ddb_entry_dma; 8534 unsigned long wtime; 8535 uint32_t ddb_state; 8536 int max_ddbs, idx, ret; 8537 8538 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8539 MAX_DEV_DB_ENTRIES; 8540 8541 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8542 &fw_ddb_entry_dma, GFP_KERNEL); 8543 if (!fw_ddb_entry) { 8544 ql4_printk(KERN_ERR, ha, 8545 "%s: Unable to allocate dma buffer\n", __func__); 8546 goto exit_login_resp; 8547 } 8548 8549 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8550 8551 for (idx = 0; idx < max_ddbs; idx++) { 8552 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8553 if (ddb_entry == NULL) 8554 continue; 8555 8556 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8557 DEBUG2(ql4_printk(KERN_INFO, ha, 8558 "%s: DDB index [%d]\n", __func__, 8559 ddb_entry->fw_ddb_index)); 8560 do { 8561 ret = qla4xxx_get_fwddb_entry(ha, 8562 ddb_entry->fw_ddb_index, 8563 fw_ddb_entry, fw_ddb_entry_dma, 8564 NULL, NULL, &ddb_state, NULL, 8565 NULL, NULL); 8566 if (ret == QLA_ERROR) 8567 goto exit_login_resp; 8568 8569 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8570 (ddb_state == DDB_DS_SESSION_FAILED)) 8571 break; 8572 8573 schedule_timeout_uninterruptible(HZ); 8574 8575 } while ((time_after(wtime, jiffies))); 8576 8577 if (!time_after(wtime, jiffies)) { 8578 DEBUG2(ql4_printk(KERN_INFO, ha, 8579 "%s: Login response wait timer expired\n", 8580 __func__)); 8581 goto exit_login_resp; 8582 } 8583 } 8584 } 8585 8586 exit_login_resp: 8587 if (fw_ddb_entry) 8588 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8589 fw_ddb_entry, fw_ddb_entry_dma); 8590 } 8591 8592 /** 8593 * qla4xxx_probe_adapter - callback function to probe HBA 8594 * @pdev: pointer to pci_dev structure 8595 * @pci_device_id: pointer to pci_device entry 8596 * 8597 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8598 * It returns zero if successful. It also initializes all data necessary for 8599 * the driver. 8600 **/ 8601 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8602 const struct pci_device_id *ent) 8603 { 8604 int ret = -ENODEV, status; 8605 struct Scsi_Host *host; 8606 struct scsi_qla_host *ha; 8607 uint8_t init_retry_count = 0; 8608 char buf[34]; 8609 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8610 uint32_t dev_state; 8611 8612 if (pci_enable_device(pdev)) 8613 return -1; 8614 8615 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8616 if (host == NULL) { 8617 printk(KERN_WARNING 8618 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8619 goto probe_disable_device; 8620 } 8621 8622 /* Clear our data area */ 8623 ha = to_qla_host(host); 8624 memset(ha, 0, sizeof(*ha)); 8625 8626 /* Save the information from PCI BIOS. */ 8627 ha->pdev = pdev; 8628 ha->host = host; 8629 ha->host_no = host->host_no; 8630 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8631 8632 pci_enable_pcie_error_reporting(pdev); 8633 8634 /* Setup Runtime configurable options */ 8635 if (is_qla8022(ha)) { 8636 ha->isp_ops = &qla4_82xx_isp_ops; 8637 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8638 ha->qdr_sn_window = -1; 8639 ha->ddr_mn_window = -1; 8640 ha->curr_window = 255; 8641 nx_legacy_intr = &legacy_intr[ha->func_num]; 8642 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8643 ha->nx_legacy_intr.tgt_status_reg = 8644 nx_legacy_intr->tgt_status_reg; 8645 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8646 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8647 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8648 ha->isp_ops = &qla4_83xx_isp_ops; 8649 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8650 } else { 8651 ha->isp_ops = &qla4xxx_isp_ops; 8652 } 8653 8654 if (is_qla80XX(ha)) { 8655 rwlock_init(&ha->hw_lock); 8656 ha->pf_bit = ha->func_num << 16; 8657 /* Set EEH reset type to fundamental if required by hba */ 8658 pdev->needs_freset = 1; 8659 } 8660 8661 /* Configure PCI I/O space. */ 8662 ret = ha->isp_ops->iospace_config(ha); 8663 if (ret) 8664 goto probe_failed_ioconfig; 8665 8666 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8667 pdev->device, pdev->irq, ha->reg); 8668 8669 qla4xxx_config_dma_addressing(ha); 8670 8671 /* Initialize lists and spinlocks. */ 8672 INIT_LIST_HEAD(&ha->free_srb_q); 8673 8674 mutex_init(&ha->mbox_sem); 8675 mutex_init(&ha->chap_sem); 8676 init_completion(&ha->mbx_intr_comp); 8677 init_completion(&ha->disable_acb_comp); 8678 init_completion(&ha->idc_comp); 8679 init_completion(&ha->link_up_comp); 8680 init_completion(&ha->disable_acb_comp); 8681 8682 spin_lock_init(&ha->hardware_lock); 8683 spin_lock_init(&ha->work_lock); 8684 8685 /* Initialize work list */ 8686 INIT_LIST_HEAD(&ha->work_list); 8687 8688 /* Allocate dma buffers */ 8689 if (qla4xxx_mem_alloc(ha)) { 8690 ql4_printk(KERN_WARNING, ha, 8691 "[ERROR] Failed to allocate memory for adapter\n"); 8692 8693 ret = -ENOMEM; 8694 goto probe_failed; 8695 } 8696 8697 host->cmd_per_lun = 3; 8698 host->max_channel = 0; 8699 host->max_lun = MAX_LUNS - 1; 8700 host->max_id = MAX_TARGETS; 8701 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8702 host->can_queue = MAX_SRBS ; 8703 host->transportt = qla4xxx_scsi_transport; 8704 8705 ret = scsi_init_shared_tag_map(host, MAX_SRBS); 8706 if (ret) { 8707 ql4_printk(KERN_WARNING, ha, 8708 "%s: scsi_init_shared_tag_map failed\n", __func__); 8709 goto probe_failed; 8710 } 8711 8712 pci_set_drvdata(pdev, ha); 8713 8714 ret = scsi_add_host(host, &pdev->dev); 8715 if (ret) 8716 goto probe_failed; 8717 8718 if (is_qla80XX(ha)) 8719 qla4_8xxx_get_flash_info(ha); 8720 8721 if (is_qla8032(ha) || is_qla8042(ha)) { 8722 qla4_83xx_read_reset_template(ha); 8723 /* 8724 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8725 * If DONRESET_BIT0 is set, drivers should not set dev_state 8726 * to NEED_RESET. But if NEED_RESET is set, drivers should 8727 * should honor the reset. 8728 */ 8729 if (ql4xdontresethba == 1) 8730 qla4_83xx_set_idc_dontreset(ha); 8731 } 8732 8733 /* 8734 * Initialize the Host adapter request/response queues and 8735 * firmware 8736 * NOTE: interrupts enabled upon successful completion 8737 */ 8738 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8739 8740 /* Dont retry adapter initialization if IRQ allocation failed */ 8741 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8742 goto skip_retry_init; 8743 8744 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8745 init_retry_count++ < MAX_INIT_RETRIES) { 8746 8747 if (is_qla80XX(ha)) { 8748 ha->isp_ops->idc_lock(ha); 8749 dev_state = qla4_8xxx_rd_direct(ha, 8750 QLA8XXX_CRB_DEV_STATE); 8751 ha->isp_ops->idc_unlock(ha); 8752 if (dev_state == QLA8XXX_DEV_FAILED) { 8753 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8754 "initialize adapter. H/W is in failed state\n", 8755 __func__); 8756 break; 8757 } 8758 } 8759 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8760 "(%d)\n", __func__, init_retry_count)); 8761 8762 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8763 continue; 8764 8765 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8766 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8767 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8768 goto skip_retry_init; 8769 } 8770 } 8771 8772 skip_retry_init: 8773 if (!test_bit(AF_ONLINE, &ha->flags)) { 8774 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8775 8776 if ((is_qla8022(ha) && ql4xdontresethba) || 8777 ((is_qla8032(ha) || is_qla8042(ha)) && 8778 qla4_83xx_idc_dontreset(ha))) { 8779 /* Put the device in failed state. */ 8780 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8781 ha->isp_ops->idc_lock(ha); 8782 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8783 QLA8XXX_DEV_FAILED); 8784 ha->isp_ops->idc_unlock(ha); 8785 } 8786 ret = -ENODEV; 8787 goto remove_host; 8788 } 8789 8790 /* Startup the kernel thread for this host adapter. */ 8791 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8792 "qla4xxx_dpc\n", __func__)); 8793 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8794 ha->dpc_thread = create_singlethread_workqueue(buf); 8795 if (!ha->dpc_thread) { 8796 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8797 ret = -ENODEV; 8798 goto remove_host; 8799 } 8800 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8801 8802 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8803 ha->host_no); 8804 if (!ha->task_wq) { 8805 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8806 ret = -ENODEV; 8807 goto remove_host; 8808 } 8809 8810 /* 8811 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8812 * (which is called indirectly by qla4xxx_initialize_adapter), 8813 * so that irqs will be registered after crbinit but before 8814 * mbx_intr_enable. 8815 */ 8816 if (is_qla40XX(ha)) { 8817 ret = qla4xxx_request_irqs(ha); 8818 if (ret) { 8819 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8820 "interrupt %d already in use.\n", pdev->irq); 8821 goto remove_host; 8822 } 8823 } 8824 8825 pci_save_state(ha->pdev); 8826 ha->isp_ops->enable_intrs(ha); 8827 8828 /* Start timer thread. */ 8829 qla4xxx_start_timer(ha, qla4xxx_timer, 1); 8830 8831 set_bit(AF_INIT_DONE, &ha->flags); 8832 8833 qla4_8xxx_alloc_sysfs_attr(ha); 8834 8835 printk(KERN_INFO 8836 " QLogic iSCSI HBA Driver version: %s\n" 8837 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8838 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8839 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8840 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8841 8842 /* Set the driver version */ 8843 if (is_qla80XX(ha)) 8844 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8845 8846 if (qla4xxx_setup_boot_info(ha)) 8847 ql4_printk(KERN_ERR, ha, 8848 "%s: No iSCSI boot target configured\n", __func__); 8849 8850 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8851 /* Perform the build ddb list and login to each */ 8852 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8853 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8854 qla4xxx_wait_login_resp_boot_tgt(ha); 8855 8856 qla4xxx_create_chap_list(ha); 8857 8858 qla4xxx_create_ifaces(ha); 8859 return 0; 8860 8861 remove_host: 8862 scsi_remove_host(ha->host); 8863 8864 probe_failed: 8865 qla4xxx_free_adapter(ha); 8866 8867 probe_failed_ioconfig: 8868 pci_disable_pcie_error_reporting(pdev); 8869 scsi_host_put(ha->host); 8870 8871 probe_disable_device: 8872 pci_disable_device(pdev); 8873 8874 return ret; 8875 } 8876 8877 /** 8878 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8879 * @ha: pointer to adapter structure 8880 * 8881 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8882 * so that the other port will not re-initialize while in the process of 8883 * removing the ha due to driver unload or hba hotplug. 8884 **/ 8885 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8886 { 8887 struct scsi_qla_host *other_ha = NULL; 8888 struct pci_dev *other_pdev = NULL; 8889 int fn = ISP4XXX_PCI_FN_2; 8890 8891 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8892 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8893 fn = ISP4XXX_PCI_FN_1; 8894 8895 other_pdev = 8896 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8897 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8898 fn)); 8899 8900 /* Get other_ha if other_pdev is valid and state is enable*/ 8901 if (other_pdev) { 8902 if (atomic_read(&other_pdev->enable_cnt)) { 8903 other_ha = pci_get_drvdata(other_pdev); 8904 if (other_ha) { 8905 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8906 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8907 "Prevent %s reinit\n", __func__, 8908 dev_name(&other_ha->pdev->dev))); 8909 } 8910 } 8911 pci_dev_put(other_pdev); 8912 } 8913 } 8914 8915 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8916 struct ddb_entry *ddb_entry) 8917 { 8918 struct dev_db_entry *fw_ddb_entry = NULL; 8919 dma_addr_t fw_ddb_entry_dma; 8920 unsigned long wtime; 8921 uint32_t ddb_state; 8922 int options; 8923 int status; 8924 8925 options = LOGOUT_OPTION_CLOSE_SESSION; 8926 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8927 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8928 goto clear_ddb; 8929 } 8930 8931 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8932 &fw_ddb_entry_dma, GFP_KERNEL); 8933 if (!fw_ddb_entry) { 8934 ql4_printk(KERN_ERR, ha, 8935 "%s: Unable to allocate dma buffer\n", __func__); 8936 goto clear_ddb; 8937 } 8938 8939 wtime = jiffies + (HZ * LOGOUT_TOV); 8940 do { 8941 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8942 fw_ddb_entry, fw_ddb_entry_dma, 8943 NULL, NULL, &ddb_state, NULL, 8944 NULL, NULL); 8945 if (status == QLA_ERROR) 8946 goto free_ddb; 8947 8948 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8949 (ddb_state == DDB_DS_SESSION_FAILED)) 8950 goto free_ddb; 8951 8952 schedule_timeout_uninterruptible(HZ); 8953 } while ((time_after(wtime, jiffies))); 8954 8955 free_ddb: 8956 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8957 fw_ddb_entry, fw_ddb_entry_dma); 8958 clear_ddb: 8959 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8960 } 8961 8962 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8963 { 8964 struct ddb_entry *ddb_entry; 8965 int idx; 8966 8967 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8968 8969 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8970 if ((ddb_entry != NULL) && 8971 (ddb_entry->ddb_type == FLASH_DDB)) { 8972 8973 qla4xxx_destroy_ddb(ha, ddb_entry); 8974 /* 8975 * we have decremented the reference count of the driver 8976 * when we setup the session to have the driver unload 8977 * to be seamless without actually destroying the 8978 * session 8979 **/ 8980 try_module_get(qla4xxx_iscsi_transport.owner); 8981 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8982 qla4xxx_free_ddb(ha, ddb_entry); 8983 iscsi_session_teardown(ddb_entry->sess); 8984 } 8985 } 8986 } 8987 /** 8988 * qla4xxx_remove_adapter - callback function to remove adapter. 8989 * @pci_dev: PCI device pointer 8990 **/ 8991 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 8992 { 8993 struct scsi_qla_host *ha; 8994 8995 /* 8996 * If the PCI device is disabled then it means probe_adapter had 8997 * failed and resources already cleaned up on probe_adapter exit. 8998 */ 8999 if (!pci_is_enabled(pdev)) 9000 return; 9001 9002 ha = pci_get_drvdata(pdev); 9003 9004 if (is_qla40XX(ha)) 9005 qla4xxx_prevent_other_port_reinit(ha); 9006 9007 /* destroy iface from sysfs */ 9008 qla4xxx_destroy_ifaces(ha); 9009 9010 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 9011 iscsi_boot_destroy_kset(ha->boot_kset); 9012 9013 qla4xxx_destroy_fw_ddb_session(ha); 9014 qla4_8xxx_free_sysfs_attr(ha); 9015 9016 qla4xxx_sysfs_ddb_remove(ha); 9017 scsi_remove_host(ha->host); 9018 9019 qla4xxx_free_adapter(ha); 9020 9021 scsi_host_put(ha->host); 9022 9023 pci_disable_pcie_error_reporting(pdev); 9024 pci_disable_device(pdev); 9025 } 9026 9027 /** 9028 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9029 * @ha: HA context 9030 * 9031 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 9032 * supported addressing method. 9033 */ 9034 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9035 { 9036 int retval; 9037 9038 /* Update our PCI device dma_mask for full 64 bit mask */ 9039 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { 9040 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 9041 dev_dbg(&ha->pdev->dev, 9042 "Failed to set 64 bit PCI consistent mask; " 9043 "using 32 bit.\n"); 9044 retval = pci_set_consistent_dma_mask(ha->pdev, 9045 DMA_BIT_MASK(32)); 9046 } 9047 } else 9048 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 9049 } 9050 9051 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9052 { 9053 struct iscsi_cls_session *cls_sess; 9054 struct iscsi_session *sess; 9055 struct ddb_entry *ddb; 9056 int queue_depth = QL4_DEF_QDEPTH; 9057 9058 cls_sess = starget_to_session(sdev->sdev_target); 9059 sess = cls_sess->dd_data; 9060 ddb = sess->dd_data; 9061 9062 sdev->hostdata = ddb; 9063 sdev->tagged_supported = 1; 9064 9065 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9066 queue_depth = ql4xmaxqdepth; 9067 9068 scsi_activate_tcq(sdev, queue_depth); 9069 return 0; 9070 } 9071 9072 static int qla4xxx_slave_configure(struct scsi_device *sdev) 9073 { 9074 sdev->tagged_supported = 1; 9075 return 0; 9076 } 9077 9078 static void qla4xxx_slave_destroy(struct scsi_device *sdev) 9079 { 9080 scsi_deactivate_tcq(sdev, 1); 9081 } 9082 9083 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 9084 int reason) 9085 { 9086 if (!ql4xqfulltracking) 9087 return -EOPNOTSUPP; 9088 9089 return iscsi_change_queue_depth(sdev, qdepth, reason); 9090 } 9091 9092 /** 9093 * qla4xxx_del_from_active_array - returns an active srb 9094 * @ha: Pointer to host adapter structure. 9095 * @index: index into the active_array 9096 * 9097 * This routine removes and returns the srb at the specified index 9098 **/ 9099 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9100 uint32_t index) 9101 { 9102 struct srb *srb = NULL; 9103 struct scsi_cmnd *cmd = NULL; 9104 9105 cmd = scsi_host_find_tag(ha->host, index); 9106 if (!cmd) 9107 return srb; 9108 9109 srb = (struct srb *)CMD_SP(cmd); 9110 if (!srb) 9111 return srb; 9112 9113 /* update counters */ 9114 if (srb->flags & SRB_DMA_VALID) { 9115 ha->iocb_cnt -= srb->iocb_cnt; 9116 if (srb->cmd) 9117 srb->cmd->host_scribble = 9118 (unsigned char *)(unsigned long) MAX_SRBS; 9119 } 9120 return srb; 9121 } 9122 9123 /** 9124 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9125 * @ha: Pointer to host adapter structure. 9126 * @cmd: Scsi Command to wait on. 9127 * 9128 * This routine waits for the command to be returned by the Firmware 9129 * for some max time. 9130 **/ 9131 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9132 struct scsi_cmnd *cmd) 9133 { 9134 int done = 0; 9135 struct srb *rp; 9136 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9137 int ret = SUCCESS; 9138 9139 /* Dont wait on command if PCI error is being handled 9140 * by PCI AER driver 9141 */ 9142 if (unlikely(pci_channel_offline(ha->pdev)) || 9143 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9144 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9145 ha->host_no, __func__); 9146 return ret; 9147 } 9148 9149 do { 9150 /* Checking to see if its returned to OS */ 9151 rp = (struct srb *) CMD_SP(cmd); 9152 if (rp == NULL) { 9153 done++; 9154 break; 9155 } 9156 9157 msleep(2000); 9158 } while (max_wait_time--); 9159 9160 return done; 9161 } 9162 9163 /** 9164 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9165 * @ha: Pointer to host adapter structure 9166 **/ 9167 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9168 { 9169 unsigned long wait_online; 9170 9171 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9172 while (time_before(jiffies, wait_online)) { 9173 9174 if (adapter_up(ha)) 9175 return QLA_SUCCESS; 9176 9177 msleep(2000); 9178 } 9179 9180 return QLA_ERROR; 9181 } 9182 9183 /** 9184 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9185 * @ha: pointer to HBA 9186 * @t: target id 9187 * @l: lun id 9188 * 9189 * This function waits for all outstanding commands to a lun to complete. It 9190 * returns 0 if all pending commands are returned and 1 otherwise. 9191 **/ 9192 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9193 struct scsi_target *stgt, 9194 struct scsi_device *sdev) 9195 { 9196 int cnt; 9197 int status = 0; 9198 struct scsi_cmnd *cmd; 9199 9200 /* 9201 * Waiting for all commands for the designated target or dev 9202 * in the active array 9203 */ 9204 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9205 cmd = scsi_host_find_tag(ha->host, cnt); 9206 if (cmd && stgt == scsi_target(cmd->device) && 9207 (!sdev || sdev == cmd->device)) { 9208 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9209 status++; 9210 break; 9211 } 9212 } 9213 } 9214 return status; 9215 } 9216 9217 /** 9218 * qla4xxx_eh_abort - callback for abort task. 9219 * @cmd: Pointer to Linux's SCSI command structure 9220 * 9221 * This routine is called by the Linux OS to abort the specified 9222 * command. 9223 **/ 9224 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9225 { 9226 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9227 unsigned int id = cmd->device->id; 9228 uint64_t lun = cmd->device->lun; 9229 unsigned long flags; 9230 struct srb *srb = NULL; 9231 int ret = SUCCESS; 9232 int wait = 0; 9233 9234 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9235 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9236 9237 spin_lock_irqsave(&ha->hardware_lock, flags); 9238 srb = (struct srb *) CMD_SP(cmd); 9239 if (!srb) { 9240 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9241 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9242 ha->host_no, id, lun); 9243 return SUCCESS; 9244 } 9245 kref_get(&srb->srb_ref); 9246 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9247 9248 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9249 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9250 ha->host_no, id, lun)); 9251 ret = FAILED; 9252 } else { 9253 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9254 ha->host_no, id, lun)); 9255 wait = 1; 9256 } 9257 9258 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9259 9260 /* Wait for command to complete */ 9261 if (wait) { 9262 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9263 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9264 ha->host_no, id, lun)); 9265 ret = FAILED; 9266 } 9267 } 9268 9269 ql4_printk(KERN_INFO, ha, 9270 "scsi%ld:%d:%llu: Abort command - %s\n", 9271 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9272 9273 return ret; 9274 } 9275 9276 /** 9277 * qla4xxx_eh_device_reset - callback for target reset. 9278 * @cmd: Pointer to Linux's SCSI command structure 9279 * 9280 * This routine is called by the Linux OS to reset all luns on the 9281 * specified target. 9282 **/ 9283 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9284 { 9285 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9286 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9287 int ret = FAILED, stat; 9288 9289 if (!ddb_entry) 9290 return ret; 9291 9292 ret = iscsi_block_scsi_eh(cmd); 9293 if (ret) 9294 return ret; 9295 ret = FAILED; 9296 9297 ql4_printk(KERN_INFO, ha, 9298 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9299 cmd->device->channel, cmd->device->id, cmd->device->lun); 9300 9301 DEBUG2(printk(KERN_INFO 9302 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9303 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9304 cmd, jiffies, cmd->request->timeout / HZ, 9305 ha->dpc_flags, cmd->result, cmd->allowed)); 9306 9307 /* FIXME: wait for hba to go online */ 9308 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9309 if (stat != QLA_SUCCESS) { 9310 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9311 goto eh_dev_reset_done; 9312 } 9313 9314 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9315 cmd->device)) { 9316 ql4_printk(KERN_INFO, ha, 9317 "DEVICE RESET FAILED - waiting for " 9318 "commands.\n"); 9319 goto eh_dev_reset_done; 9320 } 9321 9322 /* Send marker. */ 9323 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9324 MM_LUN_RESET) != QLA_SUCCESS) 9325 goto eh_dev_reset_done; 9326 9327 ql4_printk(KERN_INFO, ha, 9328 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9329 ha->host_no, cmd->device->channel, cmd->device->id, 9330 cmd->device->lun); 9331 9332 ret = SUCCESS; 9333 9334 eh_dev_reset_done: 9335 9336 return ret; 9337 } 9338 9339 /** 9340 * qla4xxx_eh_target_reset - callback for target reset. 9341 * @cmd: Pointer to Linux's SCSI command structure 9342 * 9343 * This routine is called by the Linux OS to reset the target. 9344 **/ 9345 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9346 { 9347 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9348 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9349 int stat, ret; 9350 9351 if (!ddb_entry) 9352 return FAILED; 9353 9354 ret = iscsi_block_scsi_eh(cmd); 9355 if (ret) 9356 return ret; 9357 9358 starget_printk(KERN_INFO, scsi_target(cmd->device), 9359 "WARM TARGET RESET ISSUED.\n"); 9360 9361 DEBUG2(printk(KERN_INFO 9362 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9363 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9364 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9365 ha->dpc_flags, cmd->result, cmd->allowed)); 9366 9367 stat = qla4xxx_reset_target(ha, ddb_entry); 9368 if (stat != QLA_SUCCESS) { 9369 starget_printk(KERN_INFO, scsi_target(cmd->device), 9370 "WARM TARGET RESET FAILED.\n"); 9371 return FAILED; 9372 } 9373 9374 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9375 NULL)) { 9376 starget_printk(KERN_INFO, scsi_target(cmd->device), 9377 "WARM TARGET DEVICE RESET FAILED - " 9378 "waiting for commands.\n"); 9379 return FAILED; 9380 } 9381 9382 /* Send marker. */ 9383 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9384 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9385 starget_printk(KERN_INFO, scsi_target(cmd->device), 9386 "WARM TARGET DEVICE RESET FAILED - " 9387 "marker iocb failed.\n"); 9388 return FAILED; 9389 } 9390 9391 starget_printk(KERN_INFO, scsi_target(cmd->device), 9392 "WARM TARGET RESET SUCCEEDED.\n"); 9393 return SUCCESS; 9394 } 9395 9396 /** 9397 * qla4xxx_is_eh_active - check if error handler is running 9398 * @shost: Pointer to SCSI Host struct 9399 * 9400 * This routine finds that if reset host is called in EH 9401 * scenario or from some application like sg_reset 9402 **/ 9403 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9404 { 9405 if (shost->shost_state == SHOST_RECOVERY) 9406 return 1; 9407 return 0; 9408 } 9409 9410 /** 9411 * qla4xxx_eh_host_reset - kernel callback 9412 * @cmd: Pointer to Linux's SCSI command structure 9413 * 9414 * This routine is invoked by the Linux kernel to perform fatal error 9415 * recovery on the specified adapter. 9416 **/ 9417 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9418 { 9419 int return_status = FAILED; 9420 struct scsi_qla_host *ha; 9421 9422 ha = to_qla_host(cmd->device->host); 9423 9424 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9425 qla4_83xx_set_idc_dontreset(ha); 9426 9427 /* 9428 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9429 * protocol drivers, we should not set device_state to NEED_RESET 9430 */ 9431 if (ql4xdontresethba || 9432 ((is_qla8032(ha) || is_qla8042(ha)) && 9433 qla4_83xx_idc_dontreset(ha))) { 9434 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9435 ha->host_no, __func__)); 9436 9437 /* Clear outstanding srb in queues */ 9438 if (qla4xxx_is_eh_active(cmd->device->host)) 9439 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9440 9441 return FAILED; 9442 } 9443 9444 ql4_printk(KERN_INFO, ha, 9445 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9446 cmd->device->channel, cmd->device->id, cmd->device->lun); 9447 9448 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9449 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9450 "DEAD.\n", ha->host_no, cmd->device->channel, 9451 __func__)); 9452 9453 return FAILED; 9454 } 9455 9456 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9457 if (is_qla80XX(ha)) 9458 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9459 else 9460 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9461 } 9462 9463 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9464 return_status = SUCCESS; 9465 9466 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9467 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9468 9469 return return_status; 9470 } 9471 9472 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9473 { 9474 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9475 uint32_t mbox_sts[MBOX_REG_COUNT]; 9476 struct addr_ctrl_blk_def *acb = NULL; 9477 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9478 int rval = QLA_SUCCESS; 9479 dma_addr_t acb_dma; 9480 9481 acb = dma_alloc_coherent(&ha->pdev->dev, 9482 sizeof(struct addr_ctrl_blk_def), 9483 &acb_dma, GFP_KERNEL); 9484 if (!acb) { 9485 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9486 __func__); 9487 rval = -ENOMEM; 9488 goto exit_port_reset; 9489 } 9490 9491 memset(acb, 0, acb_len); 9492 9493 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9494 if (rval != QLA_SUCCESS) { 9495 rval = -EIO; 9496 goto exit_free_acb; 9497 } 9498 9499 rval = qla4xxx_disable_acb(ha); 9500 if (rval != QLA_SUCCESS) { 9501 rval = -EIO; 9502 goto exit_free_acb; 9503 } 9504 9505 wait_for_completion_timeout(&ha->disable_acb_comp, 9506 DISABLE_ACB_TOV * HZ); 9507 9508 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9509 if (rval != QLA_SUCCESS) { 9510 rval = -EIO; 9511 goto exit_free_acb; 9512 } 9513 9514 exit_free_acb: 9515 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9516 acb, acb_dma); 9517 exit_port_reset: 9518 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9519 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9520 return rval; 9521 } 9522 9523 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9524 { 9525 struct scsi_qla_host *ha = to_qla_host(shost); 9526 int rval = QLA_SUCCESS; 9527 uint32_t idc_ctrl; 9528 9529 if (ql4xdontresethba) { 9530 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9531 __func__)); 9532 rval = -EPERM; 9533 goto exit_host_reset; 9534 } 9535 9536 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9537 goto recover_adapter; 9538 9539 switch (reset_type) { 9540 case SCSI_ADAPTER_RESET: 9541 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9542 break; 9543 case SCSI_FIRMWARE_RESET: 9544 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9545 if (is_qla80XX(ha)) 9546 /* set firmware context reset */ 9547 set_bit(DPC_RESET_HA_FW_CONTEXT, 9548 &ha->dpc_flags); 9549 else { 9550 rval = qla4xxx_context_reset(ha); 9551 goto exit_host_reset; 9552 } 9553 } 9554 break; 9555 } 9556 9557 recover_adapter: 9558 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9559 * reset is issued by application */ 9560 if ((is_qla8032(ha) || is_qla8042(ha)) && 9561 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9562 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9563 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9564 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9565 } 9566 9567 rval = qla4xxx_recover_adapter(ha); 9568 if (rval != QLA_SUCCESS) { 9569 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9570 __func__)); 9571 rval = -EIO; 9572 } 9573 9574 exit_host_reset: 9575 return rval; 9576 } 9577 9578 /* PCI AER driver recovers from all correctable errors w/o 9579 * driver intervention. For uncorrectable errors PCI AER 9580 * driver calls the following device driver's callbacks 9581 * 9582 * - Fatal Errors - link_reset 9583 * - Non-Fatal Errors - driver's pci_error_detected() which 9584 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9585 * 9586 * PCI AER driver calls 9587 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled 9588 * returns RECOVERED or NEED_RESET if fw_hung 9589 * NEED_RESET - driver's slot_reset() 9590 * DISCONNECT - device is dead & cannot recover 9591 * RECOVERED - driver's pci_resume() 9592 */ 9593 static pci_ers_result_t 9594 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9595 { 9596 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9597 9598 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9599 ha->host_no, __func__, state); 9600 9601 if (!is_aer_supported(ha)) 9602 return PCI_ERS_RESULT_NONE; 9603 9604 switch (state) { 9605 case pci_channel_io_normal: 9606 clear_bit(AF_EEH_BUSY, &ha->flags); 9607 return PCI_ERS_RESULT_CAN_RECOVER; 9608 case pci_channel_io_frozen: 9609 set_bit(AF_EEH_BUSY, &ha->flags); 9610 qla4xxx_mailbox_premature_completion(ha); 9611 qla4xxx_free_irqs(ha); 9612 pci_disable_device(pdev); 9613 /* Return back all IOs */ 9614 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9615 return PCI_ERS_RESULT_NEED_RESET; 9616 case pci_channel_io_perm_failure: 9617 set_bit(AF_EEH_BUSY, &ha->flags); 9618 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9619 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9620 return PCI_ERS_RESULT_DISCONNECT; 9621 } 9622 return PCI_ERS_RESULT_NEED_RESET; 9623 } 9624 9625 /** 9626 * qla4xxx_pci_mmio_enabled() gets called if 9627 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9628 * and read/write to the device still works. 9629 **/ 9630 static pci_ers_result_t 9631 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9632 { 9633 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9634 9635 if (!is_aer_supported(ha)) 9636 return PCI_ERS_RESULT_NONE; 9637 9638 return PCI_ERS_RESULT_RECOVERED; 9639 } 9640 9641 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9642 { 9643 uint32_t rval = QLA_ERROR; 9644 int fn; 9645 struct pci_dev *other_pdev = NULL; 9646 9647 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9648 9649 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9650 9651 if (test_bit(AF_ONLINE, &ha->flags)) { 9652 clear_bit(AF_ONLINE, &ha->flags); 9653 clear_bit(AF_LINK_UP, &ha->flags); 9654 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9655 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9656 } 9657 9658 fn = PCI_FUNC(ha->pdev->devfn); 9659 if (is_qla8022(ha)) { 9660 while (fn > 0) { 9661 fn--; 9662 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9663 ha->host_no, __func__, fn); 9664 /* Get the pci device given the domain, bus, 9665 * slot/function number */ 9666 other_pdev = pci_get_domain_bus_and_slot( 9667 pci_domain_nr(ha->pdev->bus), 9668 ha->pdev->bus->number, 9669 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9670 fn)); 9671 9672 if (!other_pdev) 9673 continue; 9674 9675 if (atomic_read(&other_pdev->enable_cnt)) { 9676 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9677 ha->host_no, __func__, fn); 9678 pci_dev_put(other_pdev); 9679 break; 9680 } 9681 pci_dev_put(other_pdev); 9682 } 9683 } else { 9684 /* this case is meant for ISP83xx/ISP84xx only */ 9685 if (qla4_83xx_can_perform_reset(ha)) { 9686 /* reset fn as iSCSI is going to perform the reset */ 9687 fn = 0; 9688 } 9689 } 9690 9691 /* The first function on the card, the reset owner will 9692 * start & initialize the firmware. The other functions 9693 * on the card will reset the firmware context 9694 */ 9695 if (!fn) { 9696 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9697 "0x%x is the owner\n", ha->host_no, __func__, 9698 ha->pdev->devfn); 9699 9700 ha->isp_ops->idc_lock(ha); 9701 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9702 QLA8XXX_DEV_COLD); 9703 ha->isp_ops->idc_unlock(ha); 9704 9705 rval = qla4_8xxx_update_idc_reg(ha); 9706 if (rval == QLA_ERROR) { 9707 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9708 ha->host_no, __func__); 9709 ha->isp_ops->idc_lock(ha); 9710 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9711 QLA8XXX_DEV_FAILED); 9712 ha->isp_ops->idc_unlock(ha); 9713 goto exit_error_recovery; 9714 } 9715 9716 clear_bit(AF_FW_RECOVERY, &ha->flags); 9717 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9718 9719 if (rval != QLA_SUCCESS) { 9720 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9721 "FAILED\n", ha->host_no, __func__); 9722 qla4xxx_free_irqs(ha); 9723 ha->isp_ops->idc_lock(ha); 9724 qla4_8xxx_clear_drv_active(ha); 9725 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9726 QLA8XXX_DEV_FAILED); 9727 ha->isp_ops->idc_unlock(ha); 9728 } else { 9729 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9730 "READY\n", ha->host_no, __func__); 9731 ha->isp_ops->idc_lock(ha); 9732 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9733 QLA8XXX_DEV_READY); 9734 /* Clear driver state register */ 9735 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9736 qla4_8xxx_set_drv_active(ha); 9737 ha->isp_ops->idc_unlock(ha); 9738 ha->isp_ops->enable_intrs(ha); 9739 } 9740 } else { 9741 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9742 "the reset owner\n", ha->host_no, __func__, 9743 ha->pdev->devfn); 9744 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9745 QLA8XXX_DEV_READY)) { 9746 clear_bit(AF_FW_RECOVERY, &ha->flags); 9747 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9748 if (rval == QLA_SUCCESS) 9749 ha->isp_ops->enable_intrs(ha); 9750 else 9751 qla4xxx_free_irqs(ha); 9752 9753 ha->isp_ops->idc_lock(ha); 9754 qla4_8xxx_set_drv_active(ha); 9755 ha->isp_ops->idc_unlock(ha); 9756 } 9757 } 9758 exit_error_recovery: 9759 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9760 return rval; 9761 } 9762 9763 static pci_ers_result_t 9764 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9765 { 9766 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9767 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9768 int rc; 9769 9770 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9771 ha->host_no, __func__); 9772 9773 if (!is_aer_supported(ha)) 9774 return PCI_ERS_RESULT_NONE; 9775 9776 /* Restore the saved state of PCIe device - 9777 * BAR registers, PCI Config space, PCIX, MSI, 9778 * IOV states 9779 */ 9780 pci_restore_state(pdev); 9781 9782 /* pci_restore_state() clears the saved_state flag of the device 9783 * save restored state which resets saved_state flag 9784 */ 9785 pci_save_state(pdev); 9786 9787 /* Initialize device or resume if in suspended state */ 9788 rc = pci_enable_device(pdev); 9789 if (rc) { 9790 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9791 "device after reset\n", ha->host_no, __func__); 9792 goto exit_slot_reset; 9793 } 9794 9795 ha->isp_ops->disable_intrs(ha); 9796 9797 if (is_qla80XX(ha)) { 9798 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9799 ret = PCI_ERS_RESULT_RECOVERED; 9800 goto exit_slot_reset; 9801 } else 9802 goto exit_slot_reset; 9803 } 9804 9805 exit_slot_reset: 9806 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9807 "device after reset\n", ha->host_no, __func__, ret); 9808 return ret; 9809 } 9810 9811 static void 9812 qla4xxx_pci_resume(struct pci_dev *pdev) 9813 { 9814 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9815 int ret; 9816 9817 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9818 ha->host_no, __func__); 9819 9820 ret = qla4xxx_wait_for_hba_online(ha); 9821 if (ret != QLA_SUCCESS) { 9822 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9823 "resume I/O from slot/link_reset\n", ha->host_no, 9824 __func__); 9825 } 9826 9827 pci_cleanup_aer_uncorrect_error_status(pdev); 9828 clear_bit(AF_EEH_BUSY, &ha->flags); 9829 } 9830 9831 static const struct pci_error_handlers qla4xxx_err_handler = { 9832 .error_detected = qla4xxx_pci_error_detected, 9833 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9834 .slot_reset = qla4xxx_pci_slot_reset, 9835 .resume = qla4xxx_pci_resume, 9836 }; 9837 9838 static struct pci_device_id qla4xxx_pci_tbl[] = { 9839 { 9840 .vendor = PCI_VENDOR_ID_QLOGIC, 9841 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9842 .subvendor = PCI_ANY_ID, 9843 .subdevice = PCI_ANY_ID, 9844 }, 9845 { 9846 .vendor = PCI_VENDOR_ID_QLOGIC, 9847 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9848 .subvendor = PCI_ANY_ID, 9849 .subdevice = PCI_ANY_ID, 9850 }, 9851 { 9852 .vendor = PCI_VENDOR_ID_QLOGIC, 9853 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9854 .subvendor = PCI_ANY_ID, 9855 .subdevice = PCI_ANY_ID, 9856 }, 9857 { 9858 .vendor = PCI_VENDOR_ID_QLOGIC, 9859 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9860 .subvendor = PCI_ANY_ID, 9861 .subdevice = PCI_ANY_ID, 9862 }, 9863 { 9864 .vendor = PCI_VENDOR_ID_QLOGIC, 9865 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9866 .subvendor = PCI_ANY_ID, 9867 .subdevice = PCI_ANY_ID, 9868 }, 9869 { 9870 .vendor = PCI_VENDOR_ID_QLOGIC, 9871 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9872 .subvendor = PCI_ANY_ID, 9873 .subdevice = PCI_ANY_ID, 9874 }, 9875 {0, 0}, 9876 }; 9877 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9878 9879 static struct pci_driver qla4xxx_pci_driver = { 9880 .name = DRIVER_NAME, 9881 .id_table = qla4xxx_pci_tbl, 9882 .probe = qla4xxx_probe_adapter, 9883 .remove = qla4xxx_remove_adapter, 9884 .err_handler = &qla4xxx_err_handler, 9885 }; 9886 9887 static int __init qla4xxx_module_init(void) 9888 { 9889 int ret; 9890 9891 /* Allocate cache for SRBs. */ 9892 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9893 SLAB_HWCACHE_ALIGN, NULL); 9894 if (srb_cachep == NULL) { 9895 printk(KERN_ERR 9896 "%s: Unable to allocate SRB cache..." 9897 "Failing load!\n", DRIVER_NAME); 9898 ret = -ENOMEM; 9899 goto no_srp_cache; 9900 } 9901 9902 /* Derive version string. */ 9903 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9904 if (ql4xextended_error_logging) 9905 strcat(qla4xxx_version_str, "-debug"); 9906 9907 qla4xxx_scsi_transport = 9908 iscsi_register_transport(&qla4xxx_iscsi_transport); 9909 if (!qla4xxx_scsi_transport){ 9910 ret = -ENODEV; 9911 goto release_srb_cache; 9912 } 9913 9914 ret = pci_register_driver(&qla4xxx_pci_driver); 9915 if (ret) 9916 goto unregister_transport; 9917 9918 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9919 return 0; 9920 9921 unregister_transport: 9922 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9923 release_srb_cache: 9924 kmem_cache_destroy(srb_cachep); 9925 no_srp_cache: 9926 return ret; 9927 } 9928 9929 static void __exit qla4xxx_module_exit(void) 9930 { 9931 pci_unregister_driver(&qla4xxx_pci_driver); 9932 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9933 kmem_cache_destroy(srb_cachep); 9934 } 9935 9936 module_init(qla4xxx_module_init); 9937 module_exit(qla4xxx_module_exit); 9938 9939 MODULE_AUTHOR("QLogic Corporation"); 9940 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9941 MODULE_LICENSE("GPL"); 9942 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9943