1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 #include <linux/moduleparam.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/iscsi_boot_sysfs.h> 11 #include <linux/inet.h> 12 13 #include <scsi/scsi_tcq.h> 14 #include <scsi/scsicam.h> 15 16 #include "ql4_def.h" 17 #include "ql4_version.h" 18 #include "ql4_glbl.h" 19 #include "ql4_dbg.h" 20 #include "ql4_inline.h" 21 #include "ql4_83xx.h" 22 23 /* 24 * Driver version 25 */ 26 static char qla4xxx_version_str[40]; 27 28 /* 29 * SRB allocation cache 30 */ 31 static struct kmem_cache *srb_cachep; 32 33 /* 34 * Module parameter information and variables 35 */ 36 static int ql4xdisablesysfsboot = 1; 37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(ql4xdisablesysfsboot, 39 " Set to disable exporting boot targets to sysfs.\n" 40 "\t\t 0 - Export boot targets\n" 41 "\t\t 1 - Do not export boot targets (Default)"); 42 43 int ql4xdontresethba; 44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 45 MODULE_PARM_DESC(ql4xdontresethba, 46 " Don't reset the HBA for driver recovery.\n" 47 "\t\t 0 - It will reset HBA (Default)\n" 48 "\t\t 1 - It will NOT reset HBA"); 49 50 int ql4xextended_error_logging; 51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 52 MODULE_PARM_DESC(ql4xextended_error_logging, 53 " Option to enable extended error logging.\n" 54 "\t\t 0 - no logging (Default)\n" 55 "\t\t 2 - debug logging"); 56 57 int ql4xenablemsix = 1; 58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 59 MODULE_PARM_DESC(ql4xenablemsix, 60 " Set to enable MSI or MSI-X interrupt mechanism.\n" 61 "\t\t 0 = enable INTx interrupt mechanism.\n" 62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 63 "\t\t 2 = enable MSI interrupt mechanism."); 64 65 #define QL4_DEF_QDEPTH 32 66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 68 MODULE_PARM_DESC(ql4xmaxqdepth, 69 " Maximum queue depth to report for target devices.\n" 70 "\t\t Default: 32."); 71 72 static int ql4xqfulltracking = 1; 73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(ql4xqfulltracking, 75 " Enable or disable dynamic tracking and adjustment of\n" 76 "\t\t scsi device queue depth.\n" 77 "\t\t 0 - Disable.\n" 78 "\t\t 1 - Enable. (Default)"); 79 80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 82 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 83 " Target Session Recovery Timeout.\n" 84 "\t\t Default: 120 sec."); 85 86 int ql4xmdcapmask = 0x1F; 87 module_param(ql4xmdcapmask, int, S_IRUGO); 88 MODULE_PARM_DESC(ql4xmdcapmask, 89 " Set the Minidump driver capture mask level.\n" 90 "\t\t Default is 0x1F.\n" 91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); 92 93 int ql4xenablemd = 1; 94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 95 MODULE_PARM_DESC(ql4xenablemd, 96 " Set to enable minidump.\n" 97 "\t\t 0 - disable minidump\n" 98 "\t\t 1 - enable minidump (Default)"); 99 100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 101 /* 102 * SCSI host template entry points 103 */ 104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 105 106 /* 107 * iSCSI template entry points 108 */ 109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 110 enum iscsi_param param, char *buf); 111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 112 enum iscsi_param param, char *buf); 113 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 114 enum iscsi_host_param param, char *buf); 115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 116 uint32_t len); 117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 118 enum iscsi_param_type param_type, 119 int param, char *buf); 120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 122 struct sockaddr *dst_addr, 123 int non_blocking); 124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 127 enum iscsi_param param, char *buf); 128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 129 static struct iscsi_cls_conn * 130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 132 struct iscsi_cls_conn *cls_conn, 133 uint64_t transport_fd, int is_leading); 134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 135 static struct iscsi_cls_session * 136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 137 uint16_t qdepth, uint32_t initial_cmdsn); 138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 139 static void qla4xxx_task_work(struct work_struct *wdata); 140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 141 static int qla4xxx_task_xmit(struct iscsi_task *); 142 static void qla4xxx_task_cleanup(struct iscsi_task *); 143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 145 struct iscsi_stats *stats); 146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 147 uint32_t iface_type, uint32_t payload_size, 148 uint32_t pid, struct sockaddr *dst_addr); 149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 150 uint32_t *num_entries, char *buf); 151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 153 int len); 154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 155 156 /* 157 * SCSI host template entry points 158 */ 159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 164 static int qla4xxx_slave_alloc(struct scsi_device *device); 165 static int qla4xxx_slave_configure(struct scsi_device *device); 166 static void qla4xxx_slave_destroy(struct scsi_device *sdev); 167 static umode_t qla4_attr_is_visible(int param_type, int param); 168 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 169 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 170 int reason); 171 172 /* 173 * iSCSI Flash DDB sysfs entry points 174 */ 175 static int 176 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 177 struct iscsi_bus_flash_conn *fnode_conn, 178 void *data, int len); 179 static int 180 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 181 int param, char *buf); 182 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 183 int len); 184 static int 185 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 186 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 187 struct iscsi_bus_flash_conn *fnode_conn); 188 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 189 struct iscsi_bus_flash_conn *fnode_conn); 190 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 191 192 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 193 QLA82XX_LEGACY_INTR_CONFIG; 194 195 static struct scsi_host_template qla4xxx_driver_template = { 196 .module = THIS_MODULE, 197 .name = DRIVER_NAME, 198 .proc_name = DRIVER_NAME, 199 .queuecommand = qla4xxx_queuecommand, 200 201 .eh_abort_handler = qla4xxx_eh_abort, 202 .eh_device_reset_handler = qla4xxx_eh_device_reset, 203 .eh_target_reset_handler = qla4xxx_eh_target_reset, 204 .eh_host_reset_handler = qla4xxx_eh_host_reset, 205 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 206 207 .slave_configure = qla4xxx_slave_configure, 208 .slave_alloc = qla4xxx_slave_alloc, 209 .slave_destroy = qla4xxx_slave_destroy, 210 .change_queue_depth = qla4xxx_change_queue_depth, 211 212 .this_id = -1, 213 .cmd_per_lun = 3, 214 .use_clustering = ENABLE_CLUSTERING, 215 .sg_tablesize = SG_ALL, 216 217 .max_sectors = 0xFFFF, 218 .shost_attrs = qla4xxx_host_attrs, 219 .host_reset = qla4xxx_host_reset, 220 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 221 }; 222 223 static struct iscsi_transport qla4xxx_iscsi_transport = { 224 .owner = THIS_MODULE, 225 .name = DRIVER_NAME, 226 .caps = CAP_TEXT_NEGO | 227 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 228 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 229 CAP_MULTI_R2T, 230 .attr_is_visible = qla4_attr_is_visible, 231 .create_session = qla4xxx_session_create, 232 .destroy_session = qla4xxx_session_destroy, 233 .start_conn = qla4xxx_conn_start, 234 .create_conn = qla4xxx_conn_create, 235 .bind_conn = qla4xxx_conn_bind, 236 .stop_conn = iscsi_conn_stop, 237 .destroy_conn = qla4xxx_conn_destroy, 238 .set_param = iscsi_set_param, 239 .get_conn_param = qla4xxx_conn_get_param, 240 .get_session_param = qla4xxx_session_get_param, 241 .get_ep_param = qla4xxx_get_ep_param, 242 .ep_connect = qla4xxx_ep_connect, 243 .ep_poll = qla4xxx_ep_poll, 244 .ep_disconnect = qla4xxx_ep_disconnect, 245 .get_stats = qla4xxx_conn_get_stats, 246 .send_pdu = iscsi_conn_send_pdu, 247 .xmit_task = qla4xxx_task_xmit, 248 .cleanup_task = qla4xxx_task_cleanup, 249 .alloc_pdu = qla4xxx_alloc_pdu, 250 251 .get_host_param = qla4xxx_host_get_param, 252 .set_iface_param = qla4xxx_iface_set_param, 253 .get_iface_param = qla4xxx_get_iface_param, 254 .bsg_request = qla4xxx_bsg_request, 255 .send_ping = qla4xxx_send_ping, 256 .get_chap = qla4xxx_get_chap_list, 257 .delete_chap = qla4xxx_delete_chap, 258 .set_chap = qla4xxx_set_chap_entry, 259 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 260 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 261 .new_flashnode = qla4xxx_sysfs_ddb_add, 262 .del_flashnode = qla4xxx_sysfs_ddb_delete, 263 .login_flashnode = qla4xxx_sysfs_ddb_login, 264 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 265 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 266 .get_host_stats = qla4xxx_get_host_stats, 267 }; 268 269 static struct scsi_transport_template *qla4xxx_scsi_transport; 270 271 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 272 uint32_t iface_type, uint32_t payload_size, 273 uint32_t pid, struct sockaddr *dst_addr) 274 { 275 struct scsi_qla_host *ha = to_qla_host(shost); 276 struct sockaddr_in *addr; 277 struct sockaddr_in6 *addr6; 278 uint32_t options = 0; 279 uint8_t ipaddr[IPv6_ADDR_LEN]; 280 int rval; 281 282 memset(ipaddr, 0, IPv6_ADDR_LEN); 283 /* IPv4 to IPv4 */ 284 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 285 (dst_addr->sa_family == AF_INET)) { 286 addr = (struct sockaddr_in *)dst_addr; 287 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 288 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 289 "dest: %pI4\n", __func__, 290 &ha->ip_config.ip_address, ipaddr)); 291 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 292 ipaddr); 293 if (rval) 294 rval = -EINVAL; 295 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 296 (dst_addr->sa_family == AF_INET6)) { 297 /* IPv6 to IPv6 */ 298 addr6 = (struct sockaddr_in6 *)dst_addr; 299 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 300 301 options |= PING_IPV6_PROTOCOL_ENABLE; 302 303 /* Ping using LinkLocal address */ 304 if ((iface_num == 0) || (iface_num == 1)) { 305 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 306 "src: %pI6 dest: %pI6\n", __func__, 307 &ha->ip_config.ipv6_link_local_addr, 308 ipaddr)); 309 options |= PING_IPV6_LINKLOCAL_ADDR; 310 rval = qla4xxx_ping_iocb(ha, options, payload_size, 311 pid, ipaddr); 312 } else { 313 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 314 "not supported\n", __func__, iface_num); 315 rval = -ENOSYS; 316 goto exit_send_ping; 317 } 318 319 /* 320 * If ping using LinkLocal address fails, try ping using 321 * IPv6 address 322 */ 323 if (rval != QLA_SUCCESS) { 324 options &= ~PING_IPV6_LINKLOCAL_ADDR; 325 if (iface_num == 0) { 326 options |= PING_IPV6_ADDR0; 327 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 328 "Ping src: %pI6 " 329 "dest: %pI6\n", __func__, 330 &ha->ip_config.ipv6_addr0, 331 ipaddr)); 332 } else if (iface_num == 1) { 333 options |= PING_IPV6_ADDR1; 334 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 335 "Ping src: %pI6 " 336 "dest: %pI6\n", __func__, 337 &ha->ip_config.ipv6_addr1, 338 ipaddr)); 339 } 340 rval = qla4xxx_ping_iocb(ha, options, payload_size, 341 pid, ipaddr); 342 if (rval) 343 rval = -EINVAL; 344 } 345 } else 346 rval = -ENOSYS; 347 exit_send_ping: 348 return rval; 349 } 350 351 static umode_t qla4_attr_is_visible(int param_type, int param) 352 { 353 switch (param_type) { 354 case ISCSI_HOST_PARAM: 355 switch (param) { 356 case ISCSI_HOST_PARAM_HWADDRESS: 357 case ISCSI_HOST_PARAM_IPADDRESS: 358 case ISCSI_HOST_PARAM_INITIATOR_NAME: 359 case ISCSI_HOST_PARAM_PORT_STATE: 360 case ISCSI_HOST_PARAM_PORT_SPEED: 361 return S_IRUGO; 362 default: 363 return 0; 364 } 365 case ISCSI_PARAM: 366 switch (param) { 367 case ISCSI_PARAM_PERSISTENT_ADDRESS: 368 case ISCSI_PARAM_PERSISTENT_PORT: 369 case ISCSI_PARAM_CONN_ADDRESS: 370 case ISCSI_PARAM_CONN_PORT: 371 case ISCSI_PARAM_TARGET_NAME: 372 case ISCSI_PARAM_TPGT: 373 case ISCSI_PARAM_TARGET_ALIAS: 374 case ISCSI_PARAM_MAX_BURST: 375 case ISCSI_PARAM_MAX_R2T: 376 case ISCSI_PARAM_FIRST_BURST: 377 case ISCSI_PARAM_MAX_RECV_DLENGTH: 378 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 379 case ISCSI_PARAM_IFACE_NAME: 380 case ISCSI_PARAM_CHAP_OUT_IDX: 381 case ISCSI_PARAM_CHAP_IN_IDX: 382 case ISCSI_PARAM_USERNAME: 383 case ISCSI_PARAM_PASSWORD: 384 case ISCSI_PARAM_USERNAME_IN: 385 case ISCSI_PARAM_PASSWORD_IN: 386 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 387 case ISCSI_PARAM_DISCOVERY_SESS: 388 case ISCSI_PARAM_PORTAL_TYPE: 389 case ISCSI_PARAM_CHAP_AUTH_EN: 390 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 391 case ISCSI_PARAM_BIDI_CHAP_EN: 392 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 393 case ISCSI_PARAM_DEF_TIME2WAIT: 394 case ISCSI_PARAM_DEF_TIME2RETAIN: 395 case ISCSI_PARAM_HDRDGST_EN: 396 case ISCSI_PARAM_DATADGST_EN: 397 case ISCSI_PARAM_INITIAL_R2T_EN: 398 case ISCSI_PARAM_IMM_DATA_EN: 399 case ISCSI_PARAM_PDU_INORDER_EN: 400 case ISCSI_PARAM_DATASEQ_INORDER_EN: 401 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 402 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 403 case ISCSI_PARAM_TCP_WSF_DISABLE: 404 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 405 case ISCSI_PARAM_TCP_TIMER_SCALE: 406 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 407 case ISCSI_PARAM_TCP_XMIT_WSF: 408 case ISCSI_PARAM_TCP_RECV_WSF: 409 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 410 case ISCSI_PARAM_IPV4_TOS: 411 case ISCSI_PARAM_IPV6_TC: 412 case ISCSI_PARAM_IPV6_FLOW_LABEL: 413 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 414 case ISCSI_PARAM_KEEPALIVE_TMO: 415 case ISCSI_PARAM_LOCAL_PORT: 416 case ISCSI_PARAM_ISID: 417 case ISCSI_PARAM_TSID: 418 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 419 case ISCSI_PARAM_ERL: 420 case ISCSI_PARAM_STATSN: 421 case ISCSI_PARAM_EXP_STATSN: 422 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 423 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 424 case ISCSI_PARAM_LOCAL_IPADDR: 425 return S_IRUGO; 426 default: 427 return 0; 428 } 429 case ISCSI_NET_PARAM: 430 switch (param) { 431 case ISCSI_NET_PARAM_IPV4_ADDR: 432 case ISCSI_NET_PARAM_IPV4_SUBNET: 433 case ISCSI_NET_PARAM_IPV4_GW: 434 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 435 case ISCSI_NET_PARAM_IFACE_ENABLE: 436 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 437 case ISCSI_NET_PARAM_IPV6_ADDR: 438 case ISCSI_NET_PARAM_IPV6_ROUTER: 439 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 440 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 441 case ISCSI_NET_PARAM_VLAN_ID: 442 case ISCSI_NET_PARAM_VLAN_PRIORITY: 443 case ISCSI_NET_PARAM_VLAN_ENABLED: 444 case ISCSI_NET_PARAM_MTU: 445 case ISCSI_NET_PARAM_PORT: 446 case ISCSI_NET_PARAM_IPADDR_STATE: 447 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 448 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 449 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 450 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 451 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 452 case ISCSI_NET_PARAM_TCP_WSF: 453 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 454 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 455 case ISCSI_NET_PARAM_CACHE_ID: 456 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 457 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 458 case ISCSI_NET_PARAM_IPV4_TOS_EN: 459 case ISCSI_NET_PARAM_IPV4_TOS: 460 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 461 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 462 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 463 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 464 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 465 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 466 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 467 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 468 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 469 case ISCSI_NET_PARAM_REDIRECT_EN: 470 case ISCSI_NET_PARAM_IPV4_TTL: 471 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 472 case ISCSI_NET_PARAM_IPV6_MLD_EN: 473 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 474 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 475 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 476 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 477 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 478 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 479 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 480 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 481 return S_IRUGO; 482 default: 483 return 0; 484 } 485 case ISCSI_IFACE_PARAM: 486 switch (param) { 487 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 488 case ISCSI_IFACE_PARAM_HDRDGST_EN: 489 case ISCSI_IFACE_PARAM_DATADGST_EN: 490 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 491 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 492 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 493 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 494 case ISCSI_IFACE_PARAM_ERL: 495 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 496 case ISCSI_IFACE_PARAM_FIRST_BURST: 497 case ISCSI_IFACE_PARAM_MAX_R2T: 498 case ISCSI_IFACE_PARAM_MAX_BURST: 499 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 500 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 501 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 502 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 503 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 504 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 505 return S_IRUGO; 506 default: 507 return 0; 508 } 509 case ISCSI_FLASHNODE_PARAM: 510 switch (param) { 511 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 512 case ISCSI_FLASHNODE_PORTAL_TYPE: 513 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 514 case ISCSI_FLASHNODE_DISCOVERY_SESS: 515 case ISCSI_FLASHNODE_ENTRY_EN: 516 case ISCSI_FLASHNODE_HDR_DGST_EN: 517 case ISCSI_FLASHNODE_DATA_DGST_EN: 518 case ISCSI_FLASHNODE_IMM_DATA_EN: 519 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 520 case ISCSI_FLASHNODE_DATASEQ_INORDER: 521 case ISCSI_FLASHNODE_PDU_INORDER: 522 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 523 case ISCSI_FLASHNODE_SNACK_REQ_EN: 524 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 525 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 526 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 527 case ISCSI_FLASHNODE_ERL: 528 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 529 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 530 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 531 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 532 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 533 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 534 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 535 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 536 case ISCSI_FLASHNODE_FIRST_BURST: 537 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 538 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 539 case ISCSI_FLASHNODE_MAX_R2T: 540 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 541 case ISCSI_FLASHNODE_ISID: 542 case ISCSI_FLASHNODE_TSID: 543 case ISCSI_FLASHNODE_PORT: 544 case ISCSI_FLASHNODE_MAX_BURST: 545 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 546 case ISCSI_FLASHNODE_IPADDR: 547 case ISCSI_FLASHNODE_ALIAS: 548 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 549 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 550 case ISCSI_FLASHNODE_LOCAL_PORT: 551 case ISCSI_FLASHNODE_IPV4_TOS: 552 case ISCSI_FLASHNODE_IPV6_TC: 553 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 554 case ISCSI_FLASHNODE_NAME: 555 case ISCSI_FLASHNODE_TPGT: 556 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 557 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 558 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 559 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 560 case ISCSI_FLASHNODE_TCP_RECV_WSF: 561 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 562 case ISCSI_FLASHNODE_USERNAME: 563 case ISCSI_FLASHNODE_PASSWORD: 564 case ISCSI_FLASHNODE_STATSN: 565 case ISCSI_FLASHNODE_EXP_STATSN: 566 case ISCSI_FLASHNODE_IS_BOOT_TGT: 567 return S_IRUGO; 568 default: 569 return 0; 570 } 571 } 572 573 return 0; 574 } 575 576 /** 577 * qla4xxx_create chap_list - Create CHAP list from FLASH 578 * @ha: pointer to adapter structure 579 * 580 * Read flash and make a list of CHAP entries, during login when a CHAP entry 581 * is received, it will be checked in this list. If entry exist then the CHAP 582 * entry index is set in the DDB. If CHAP entry does not exist in this list 583 * then a new entry is added in FLASH in CHAP table and the index obtained is 584 * used in the DDB. 585 **/ 586 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 587 { 588 int rval = 0; 589 uint8_t *chap_flash_data = NULL; 590 uint32_t offset; 591 dma_addr_t chap_dma; 592 uint32_t chap_size = 0; 593 594 if (is_qla40XX(ha)) 595 chap_size = MAX_CHAP_ENTRIES_40XX * 596 sizeof(struct ql4_chap_table); 597 else /* Single region contains CHAP info for both 598 * ports which is divided into half for each port. 599 */ 600 chap_size = ha->hw.flt_chap_size / 2; 601 602 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 603 &chap_dma, GFP_KERNEL); 604 if (!chap_flash_data) { 605 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 606 return; 607 } 608 609 if (is_qla40XX(ha)) { 610 offset = FLASH_CHAP_OFFSET; 611 } else { 612 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 613 if (ha->port_num == 1) 614 offset += chap_size; 615 } 616 617 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 618 if (rval != QLA_SUCCESS) 619 goto exit_chap_list; 620 621 if (ha->chap_list == NULL) 622 ha->chap_list = vmalloc(chap_size); 623 if (ha->chap_list == NULL) { 624 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 625 goto exit_chap_list; 626 } 627 628 memset(ha->chap_list, 0, chap_size); 629 memcpy(ha->chap_list, chap_flash_data, chap_size); 630 631 exit_chap_list: 632 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 633 } 634 635 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 636 int16_t chap_index, 637 struct ql4_chap_table **chap_entry) 638 { 639 int rval = QLA_ERROR; 640 int max_chap_entries; 641 642 if (!ha->chap_list) { 643 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 644 rval = QLA_ERROR; 645 goto exit_get_chap; 646 } 647 648 if (is_qla80XX(ha)) 649 max_chap_entries = (ha->hw.flt_chap_size / 2) / 650 sizeof(struct ql4_chap_table); 651 else 652 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 653 654 if (chap_index > max_chap_entries) { 655 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 656 rval = QLA_ERROR; 657 goto exit_get_chap; 658 } 659 660 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 661 if ((*chap_entry)->cookie != 662 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 663 rval = QLA_ERROR; 664 *chap_entry = NULL; 665 } else { 666 rval = QLA_SUCCESS; 667 } 668 669 exit_get_chap: 670 return rval; 671 } 672 673 /** 674 * qla4xxx_find_free_chap_index - Find the first free chap index 675 * @ha: pointer to adapter structure 676 * @chap_index: CHAP index to be returned 677 * 678 * Find the first free chap index available in the chap table 679 * 680 * Note: Caller should acquire the chap lock before getting here. 681 **/ 682 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 683 uint16_t *chap_index) 684 { 685 int i, rval; 686 int free_index = -1; 687 int max_chap_entries = 0; 688 struct ql4_chap_table *chap_table; 689 690 if (is_qla80XX(ha)) 691 max_chap_entries = (ha->hw.flt_chap_size / 2) / 692 sizeof(struct ql4_chap_table); 693 else 694 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 695 696 if (!ha->chap_list) { 697 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 698 rval = QLA_ERROR; 699 goto exit_find_chap; 700 } 701 702 for (i = 0; i < max_chap_entries; i++) { 703 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 704 705 if ((chap_table->cookie != 706 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 707 (i > MAX_RESRV_CHAP_IDX)) { 708 free_index = i; 709 break; 710 } 711 } 712 713 if (free_index != -1) { 714 *chap_index = free_index; 715 rval = QLA_SUCCESS; 716 } else { 717 rval = QLA_ERROR; 718 } 719 720 exit_find_chap: 721 return rval; 722 } 723 724 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 725 uint32_t *num_entries, char *buf) 726 { 727 struct scsi_qla_host *ha = to_qla_host(shost); 728 struct ql4_chap_table *chap_table; 729 struct iscsi_chap_rec *chap_rec; 730 int max_chap_entries = 0; 731 int valid_chap_entries = 0; 732 int ret = 0, i; 733 734 if (is_qla80XX(ha)) 735 max_chap_entries = (ha->hw.flt_chap_size / 2) / 736 sizeof(struct ql4_chap_table); 737 else 738 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 739 740 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 741 __func__, *num_entries, chap_tbl_idx); 742 743 if (!buf) { 744 ret = -ENOMEM; 745 goto exit_get_chap_list; 746 } 747 748 qla4xxx_create_chap_list(ha); 749 750 chap_rec = (struct iscsi_chap_rec *) buf; 751 mutex_lock(&ha->chap_sem); 752 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 753 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 754 if (chap_table->cookie != 755 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 756 continue; 757 758 chap_rec->chap_tbl_idx = i; 759 strncpy(chap_rec->username, chap_table->name, 760 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 761 strncpy(chap_rec->password, chap_table->secret, 762 QL4_CHAP_MAX_SECRET_LEN); 763 chap_rec->password_length = chap_table->secret_len; 764 765 if (chap_table->flags & BIT_7) /* local */ 766 chap_rec->chap_type = CHAP_TYPE_OUT; 767 768 if (chap_table->flags & BIT_6) /* peer */ 769 chap_rec->chap_type = CHAP_TYPE_IN; 770 771 chap_rec++; 772 773 valid_chap_entries++; 774 if (valid_chap_entries == *num_entries) 775 break; 776 else 777 continue; 778 } 779 mutex_unlock(&ha->chap_sem); 780 781 exit_get_chap_list: 782 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 783 __func__, valid_chap_entries); 784 *num_entries = valid_chap_entries; 785 return ret; 786 } 787 788 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 789 { 790 int ret = 0; 791 uint16_t *chap_tbl_idx = (uint16_t *) data; 792 struct iscsi_cls_session *cls_session; 793 struct iscsi_session *sess; 794 struct ddb_entry *ddb_entry; 795 796 if (!iscsi_is_session_dev(dev)) 797 goto exit_is_chap_active; 798 799 cls_session = iscsi_dev_to_session(dev); 800 sess = cls_session->dd_data; 801 ddb_entry = sess->dd_data; 802 803 if (iscsi_session_chkready(cls_session)) 804 goto exit_is_chap_active; 805 806 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 807 ret = 1; 808 809 exit_is_chap_active: 810 return ret; 811 } 812 813 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 814 uint16_t chap_tbl_idx) 815 { 816 int ret = 0; 817 818 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 819 __qla4xxx_is_chap_active); 820 821 return ret; 822 } 823 824 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 825 { 826 struct scsi_qla_host *ha = to_qla_host(shost); 827 struct ql4_chap_table *chap_table; 828 dma_addr_t chap_dma; 829 int max_chap_entries = 0; 830 uint32_t offset = 0; 831 uint32_t chap_size; 832 int ret = 0; 833 834 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 835 if (chap_table == NULL) 836 return -ENOMEM; 837 838 memset(chap_table, 0, sizeof(struct ql4_chap_table)); 839 840 if (is_qla80XX(ha)) 841 max_chap_entries = (ha->hw.flt_chap_size / 2) / 842 sizeof(struct ql4_chap_table); 843 else 844 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 845 846 if (chap_tbl_idx > max_chap_entries) { 847 ret = -EINVAL; 848 goto exit_delete_chap; 849 } 850 851 /* Check if chap index is in use. 852 * If chap is in use don't delet chap entry */ 853 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 854 if (ret) { 855 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 856 "delete from flash\n", chap_tbl_idx); 857 ret = -EBUSY; 858 goto exit_delete_chap; 859 } 860 861 chap_size = sizeof(struct ql4_chap_table); 862 if (is_qla40XX(ha)) 863 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 864 else { 865 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 866 /* flt_chap_size is CHAP table size for both ports 867 * so divide it by 2 to calculate the offset for second port 868 */ 869 if (ha->port_num == 1) 870 offset += (ha->hw.flt_chap_size / 2); 871 offset += (chap_tbl_idx * chap_size); 872 } 873 874 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 875 if (ret != QLA_SUCCESS) { 876 ret = -EINVAL; 877 goto exit_delete_chap; 878 } 879 880 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 881 __le16_to_cpu(chap_table->cookie))); 882 883 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 884 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 885 goto exit_delete_chap; 886 } 887 888 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 889 890 offset = FLASH_CHAP_OFFSET | 891 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 892 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 893 FLASH_OPT_RMW_COMMIT); 894 if (ret == QLA_SUCCESS && ha->chap_list) { 895 mutex_lock(&ha->chap_sem); 896 /* Update ha chap_list cache */ 897 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 898 chap_table, sizeof(struct ql4_chap_table)); 899 mutex_unlock(&ha->chap_sem); 900 } 901 if (ret != QLA_SUCCESS) 902 ret = -EINVAL; 903 904 exit_delete_chap: 905 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 906 return ret; 907 } 908 909 /** 910 * qla4xxx_set_chap_entry - Make chap entry with given information 911 * @shost: pointer to host 912 * @data: chap info - credentials, index and type to make chap entry 913 * @len: length of data 914 * 915 * Add or update chap entry with the given information 916 **/ 917 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 918 { 919 struct scsi_qla_host *ha = to_qla_host(shost); 920 struct iscsi_chap_rec chap_rec; 921 struct ql4_chap_table *chap_entry = NULL; 922 struct iscsi_param_info *param_info; 923 struct nlattr *attr; 924 int max_chap_entries = 0; 925 int type; 926 int rem = len; 927 int rc = 0; 928 int size; 929 930 memset(&chap_rec, 0, sizeof(chap_rec)); 931 932 nla_for_each_attr(attr, data, len, rem) { 933 param_info = nla_data(attr); 934 935 switch (param_info->param) { 936 case ISCSI_CHAP_PARAM_INDEX: 937 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 938 break; 939 case ISCSI_CHAP_PARAM_CHAP_TYPE: 940 chap_rec.chap_type = param_info->value[0]; 941 break; 942 case ISCSI_CHAP_PARAM_USERNAME: 943 size = min_t(size_t, sizeof(chap_rec.username), 944 param_info->len); 945 memcpy(chap_rec.username, param_info->value, size); 946 break; 947 case ISCSI_CHAP_PARAM_PASSWORD: 948 size = min_t(size_t, sizeof(chap_rec.password), 949 param_info->len); 950 memcpy(chap_rec.password, param_info->value, size); 951 break; 952 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 953 chap_rec.password_length = param_info->value[0]; 954 break; 955 default: 956 ql4_printk(KERN_ERR, ha, 957 "%s: No such sysfs attribute\n", __func__); 958 rc = -ENOSYS; 959 goto exit_set_chap; 960 }; 961 } 962 963 if (chap_rec.chap_type == CHAP_TYPE_IN) 964 type = BIDI_CHAP; 965 else 966 type = LOCAL_CHAP; 967 968 if (is_qla80XX(ha)) 969 max_chap_entries = (ha->hw.flt_chap_size / 2) / 970 sizeof(struct ql4_chap_table); 971 else 972 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 973 974 mutex_lock(&ha->chap_sem); 975 if (chap_rec.chap_tbl_idx < max_chap_entries) { 976 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 977 &chap_entry); 978 if (!rc) { 979 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 980 ql4_printk(KERN_INFO, ha, 981 "Type mismatch for CHAP entry %d\n", 982 chap_rec.chap_tbl_idx); 983 rc = -EINVAL; 984 goto exit_unlock_chap; 985 } 986 987 /* If chap index is in use then don't modify it */ 988 rc = qla4xxx_is_chap_active(shost, 989 chap_rec.chap_tbl_idx); 990 if (rc) { 991 ql4_printk(KERN_INFO, ha, 992 "CHAP entry %d is in use\n", 993 chap_rec.chap_tbl_idx); 994 rc = -EBUSY; 995 goto exit_unlock_chap; 996 } 997 } 998 } else { 999 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1000 if (rc) { 1001 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1002 rc = -EBUSY; 1003 goto exit_unlock_chap; 1004 } 1005 } 1006 1007 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1008 chap_rec.chap_tbl_idx, type); 1009 1010 exit_unlock_chap: 1011 mutex_unlock(&ha->chap_sem); 1012 1013 exit_set_chap: 1014 return rc; 1015 } 1016 1017 1018 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1019 { 1020 struct scsi_qla_host *ha = to_qla_host(shost); 1021 struct iscsi_offload_host_stats *host_stats = NULL; 1022 int host_stats_size; 1023 int ret = 0; 1024 int ddb_idx = 0; 1025 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1026 int stats_size; 1027 dma_addr_t iscsi_stats_dma; 1028 1029 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1030 1031 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1032 1033 if (host_stats_size != len) { 1034 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1035 __func__, len, host_stats_size); 1036 ret = -EINVAL; 1037 goto exit_host_stats; 1038 } 1039 host_stats = (struct iscsi_offload_host_stats *)buf; 1040 1041 if (!buf) { 1042 ret = -ENOMEM; 1043 goto exit_host_stats; 1044 } 1045 1046 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1047 1048 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1049 &iscsi_stats_dma, GFP_KERNEL); 1050 if (!ql_iscsi_stats) { 1051 ql4_printk(KERN_ERR, ha, 1052 "Unable to allocate memory for iscsi stats\n"); 1053 goto exit_host_stats; 1054 } 1055 1056 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1057 iscsi_stats_dma); 1058 if (ret != QLA_SUCCESS) { 1059 ql4_printk(KERN_ERR, ha, 1060 "Unable to retrieve iscsi stats\n"); 1061 goto exit_host_stats; 1062 } 1063 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1064 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1065 host_stats->mactx_multicast_frames = 1066 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1067 host_stats->mactx_broadcast_frames = 1068 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1069 host_stats->mactx_pause_frames = 1070 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1071 host_stats->mactx_control_frames = 1072 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1073 host_stats->mactx_deferral = 1074 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1075 host_stats->mactx_excess_deferral = 1076 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1077 host_stats->mactx_late_collision = 1078 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1079 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1080 host_stats->mactx_single_collision = 1081 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1082 host_stats->mactx_multiple_collision = 1083 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1084 host_stats->mactx_collision = 1085 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1086 host_stats->mactx_frames_dropped = 1087 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1088 host_stats->mactx_jumbo_frames = 1089 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1090 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1091 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1092 host_stats->macrx_unknown_control_frames = 1093 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1094 host_stats->macrx_pause_frames = 1095 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1096 host_stats->macrx_control_frames = 1097 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1098 host_stats->macrx_dribble = 1099 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1100 host_stats->macrx_frame_length_error = 1101 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1102 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1103 host_stats->macrx_carrier_sense_error = 1104 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1105 host_stats->macrx_frame_discarded = 1106 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1107 host_stats->macrx_frames_dropped = 1108 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1109 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1110 host_stats->mac_encoding_error = 1111 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1112 host_stats->macrx_length_error_large = 1113 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1114 host_stats->macrx_length_error_small = 1115 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1116 host_stats->macrx_multicast_frames = 1117 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1118 host_stats->macrx_broadcast_frames = 1119 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1120 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1121 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1122 host_stats->iptx_fragments = 1123 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1124 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1125 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1126 host_stats->iprx_fragments = 1127 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1128 host_stats->ip_datagram_reassembly = 1129 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1130 host_stats->ip_invalid_address_error = 1131 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1132 host_stats->ip_error_packets = 1133 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1134 host_stats->ip_fragrx_overlap = 1135 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1136 host_stats->ip_fragrx_outoforder = 1137 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1138 host_stats->ip_datagram_reassembly_timeout = 1139 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1140 host_stats->ipv6tx_packets = 1141 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1142 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1143 host_stats->ipv6tx_fragments = 1144 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1145 host_stats->ipv6rx_packets = 1146 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1147 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1148 host_stats->ipv6rx_fragments = 1149 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1150 host_stats->ipv6_datagram_reassembly = 1151 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1152 host_stats->ipv6_invalid_address_error = 1153 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1154 host_stats->ipv6_error_packets = 1155 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1156 host_stats->ipv6_fragrx_overlap = 1157 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1158 host_stats->ipv6_fragrx_outoforder = 1159 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1160 host_stats->ipv6_datagram_reassembly_timeout = 1161 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1162 host_stats->tcptx_segments = 1163 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1164 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1165 host_stats->tcprx_segments = 1166 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1167 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1168 host_stats->tcp_duplicate_ack_retx = 1169 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1170 host_stats->tcp_retx_timer_expired = 1171 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1172 host_stats->tcprx_duplicate_ack = 1173 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1174 host_stats->tcprx_pure_ackr = 1175 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1176 host_stats->tcptx_delayed_ack = 1177 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1178 host_stats->tcptx_pure_ack = 1179 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1180 host_stats->tcprx_segment_error = 1181 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1182 host_stats->tcprx_segment_outoforder = 1183 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1184 host_stats->tcprx_window_probe = 1185 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1186 host_stats->tcprx_window_update = 1187 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1188 host_stats->tcptx_window_probe_persist = 1189 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1190 host_stats->ecc_error_correction = 1191 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1192 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1193 host_stats->iscsi_data_bytes_tx = 1194 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1195 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1196 host_stats->iscsi_data_bytes_rx = 1197 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1198 host_stats->iscsi_io_completed = 1199 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1200 host_stats->iscsi_unexpected_io_rx = 1201 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1202 host_stats->iscsi_format_error = 1203 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1204 host_stats->iscsi_hdr_digest_error = 1205 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1206 host_stats->iscsi_data_digest_error = 1207 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1208 host_stats->iscsi_sequence_error = 1209 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1210 exit_host_stats: 1211 if (ql_iscsi_stats) 1212 dma_free_coherent(&ha->pdev->dev, host_stats_size, 1213 ql_iscsi_stats, iscsi_stats_dma); 1214 1215 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1216 __func__); 1217 return ret; 1218 } 1219 1220 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1221 enum iscsi_param_type param_type, 1222 int param, char *buf) 1223 { 1224 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1225 struct scsi_qla_host *ha = to_qla_host(shost); 1226 int ival; 1227 char *pval = NULL; 1228 int len = -ENOSYS; 1229 1230 if (param_type == ISCSI_NET_PARAM) { 1231 switch (param) { 1232 case ISCSI_NET_PARAM_IPV4_ADDR: 1233 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1234 break; 1235 case ISCSI_NET_PARAM_IPV4_SUBNET: 1236 len = sprintf(buf, "%pI4\n", 1237 &ha->ip_config.subnet_mask); 1238 break; 1239 case ISCSI_NET_PARAM_IPV4_GW: 1240 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1241 break; 1242 case ISCSI_NET_PARAM_IFACE_ENABLE: 1243 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1244 OP_STATE(ha->ip_config.ipv4_options, 1245 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1246 } else { 1247 OP_STATE(ha->ip_config.ipv6_options, 1248 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1249 } 1250 1251 len = sprintf(buf, "%s\n", pval); 1252 break; 1253 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1254 len = sprintf(buf, "%s\n", 1255 (ha->ip_config.tcp_options & 1256 TCPOPT_DHCP_ENABLE) ? 1257 "dhcp" : "static"); 1258 break; 1259 case ISCSI_NET_PARAM_IPV6_ADDR: 1260 if (iface->iface_num == 0) 1261 len = sprintf(buf, "%pI6\n", 1262 &ha->ip_config.ipv6_addr0); 1263 if (iface->iface_num == 1) 1264 len = sprintf(buf, "%pI6\n", 1265 &ha->ip_config.ipv6_addr1); 1266 break; 1267 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1268 len = sprintf(buf, "%pI6\n", 1269 &ha->ip_config.ipv6_link_local_addr); 1270 break; 1271 case ISCSI_NET_PARAM_IPV6_ROUTER: 1272 len = sprintf(buf, "%pI6\n", 1273 &ha->ip_config.ipv6_default_router_addr); 1274 break; 1275 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1276 pval = (ha->ip_config.ipv6_addl_options & 1277 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1278 "nd" : "static"; 1279 1280 len = sprintf(buf, "%s\n", pval); 1281 break; 1282 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1283 pval = (ha->ip_config.ipv6_addl_options & 1284 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1285 "auto" : "static"; 1286 1287 len = sprintf(buf, "%s\n", pval); 1288 break; 1289 case ISCSI_NET_PARAM_VLAN_ID: 1290 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1291 ival = ha->ip_config.ipv4_vlan_tag & 1292 ISCSI_MAX_VLAN_ID; 1293 else 1294 ival = ha->ip_config.ipv6_vlan_tag & 1295 ISCSI_MAX_VLAN_ID; 1296 1297 len = sprintf(buf, "%d\n", ival); 1298 break; 1299 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1300 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1301 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1302 ISCSI_MAX_VLAN_PRIORITY; 1303 else 1304 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1305 ISCSI_MAX_VLAN_PRIORITY; 1306 1307 len = sprintf(buf, "%d\n", ival); 1308 break; 1309 case ISCSI_NET_PARAM_VLAN_ENABLED: 1310 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1311 OP_STATE(ha->ip_config.ipv4_options, 1312 IPOPT_VLAN_TAGGING_ENABLE, pval); 1313 } else { 1314 OP_STATE(ha->ip_config.ipv6_options, 1315 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1316 } 1317 len = sprintf(buf, "%s\n", pval); 1318 break; 1319 case ISCSI_NET_PARAM_MTU: 1320 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1321 break; 1322 case ISCSI_NET_PARAM_PORT: 1323 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1324 len = sprintf(buf, "%d\n", 1325 ha->ip_config.ipv4_port); 1326 else 1327 len = sprintf(buf, "%d\n", 1328 ha->ip_config.ipv6_port); 1329 break; 1330 case ISCSI_NET_PARAM_IPADDR_STATE: 1331 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1332 pval = iscsi_get_ipaddress_state_name( 1333 ha->ip_config.ipv4_addr_state); 1334 } else { 1335 if (iface->iface_num == 0) 1336 pval = iscsi_get_ipaddress_state_name( 1337 ha->ip_config.ipv6_addr0_state); 1338 else if (iface->iface_num == 1) 1339 pval = iscsi_get_ipaddress_state_name( 1340 ha->ip_config.ipv6_addr1_state); 1341 } 1342 1343 len = sprintf(buf, "%s\n", pval); 1344 break; 1345 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1346 pval = iscsi_get_ipaddress_state_name( 1347 ha->ip_config.ipv6_link_local_state); 1348 len = sprintf(buf, "%s\n", pval); 1349 break; 1350 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1351 pval = iscsi_get_router_state_name( 1352 ha->ip_config.ipv6_default_router_state); 1353 len = sprintf(buf, "%s\n", pval); 1354 break; 1355 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1356 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1357 OP_STATE(~ha->ip_config.tcp_options, 1358 TCPOPT_DELAYED_ACK_DISABLE, pval); 1359 } else { 1360 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1361 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1362 } 1363 len = sprintf(buf, "%s\n", pval); 1364 break; 1365 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1366 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1367 OP_STATE(~ha->ip_config.tcp_options, 1368 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1369 } else { 1370 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1371 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1372 } 1373 len = sprintf(buf, "%s\n", pval); 1374 break; 1375 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1376 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1377 OP_STATE(~ha->ip_config.tcp_options, 1378 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1379 } else { 1380 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1381 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1382 pval); 1383 } 1384 len = sprintf(buf, "%s\n", pval); 1385 break; 1386 case ISCSI_NET_PARAM_TCP_WSF: 1387 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1388 len = sprintf(buf, "%d\n", 1389 ha->ip_config.tcp_wsf); 1390 else 1391 len = sprintf(buf, "%d\n", 1392 ha->ip_config.ipv6_tcp_wsf); 1393 break; 1394 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1395 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1396 ival = (ha->ip_config.tcp_options & 1397 TCPOPT_TIMER_SCALE) >> 1; 1398 else 1399 ival = (ha->ip_config.ipv6_tcp_options & 1400 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1401 1402 len = sprintf(buf, "%d\n", ival); 1403 break; 1404 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1405 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1406 OP_STATE(ha->ip_config.tcp_options, 1407 TCPOPT_TIMESTAMP_ENABLE, pval); 1408 } else { 1409 OP_STATE(ha->ip_config.ipv6_tcp_options, 1410 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1411 } 1412 len = sprintf(buf, "%s\n", pval); 1413 break; 1414 case ISCSI_NET_PARAM_CACHE_ID: 1415 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1416 len = sprintf(buf, "%d\n", 1417 ha->ip_config.ipv4_cache_id); 1418 else 1419 len = sprintf(buf, "%d\n", 1420 ha->ip_config.ipv6_cache_id); 1421 break; 1422 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1423 OP_STATE(ha->ip_config.tcp_options, 1424 TCPOPT_DNS_SERVER_IP_EN, pval); 1425 1426 len = sprintf(buf, "%s\n", pval); 1427 break; 1428 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1429 OP_STATE(ha->ip_config.tcp_options, 1430 TCPOPT_SLP_DA_INFO_EN, pval); 1431 1432 len = sprintf(buf, "%s\n", pval); 1433 break; 1434 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1435 OP_STATE(ha->ip_config.ipv4_options, 1436 IPOPT_IPV4_TOS_EN, pval); 1437 1438 len = sprintf(buf, "%s\n", pval); 1439 break; 1440 case ISCSI_NET_PARAM_IPV4_TOS: 1441 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1442 break; 1443 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1444 OP_STATE(ha->ip_config.ipv4_options, 1445 IPOPT_GRAT_ARP_EN, pval); 1446 1447 len = sprintf(buf, "%s\n", pval); 1448 break; 1449 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1450 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1451 pval); 1452 1453 len = sprintf(buf, "%s\n", pval); 1454 break; 1455 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1456 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1457 (char *)ha->ip_config.ipv4_alt_cid : ""; 1458 1459 len = sprintf(buf, "%s\n", pval); 1460 break; 1461 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1462 OP_STATE(ha->ip_config.ipv4_options, 1463 IPOPT_REQ_VID_EN, pval); 1464 1465 len = sprintf(buf, "%s\n", pval); 1466 break; 1467 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1468 OP_STATE(ha->ip_config.ipv4_options, 1469 IPOPT_USE_VID_EN, pval); 1470 1471 len = sprintf(buf, "%s\n", pval); 1472 break; 1473 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1474 pval = (ha->ip_config.ipv4_vid_len) ? 1475 (char *)ha->ip_config.ipv4_vid : ""; 1476 1477 len = sprintf(buf, "%s\n", pval); 1478 break; 1479 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1480 OP_STATE(ha->ip_config.ipv4_options, 1481 IPOPT_LEARN_IQN_EN, pval); 1482 1483 len = sprintf(buf, "%s\n", pval); 1484 break; 1485 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1486 OP_STATE(~ha->ip_config.ipv4_options, 1487 IPOPT_FRAGMENTATION_DISABLE, pval); 1488 1489 len = sprintf(buf, "%s\n", pval); 1490 break; 1491 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1492 OP_STATE(ha->ip_config.ipv4_options, 1493 IPOPT_IN_FORWARD_EN, pval); 1494 1495 len = sprintf(buf, "%s\n", pval); 1496 break; 1497 case ISCSI_NET_PARAM_REDIRECT_EN: 1498 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1499 OP_STATE(ha->ip_config.ipv4_options, 1500 IPOPT_ARP_REDIRECT_EN, pval); 1501 } else { 1502 OP_STATE(ha->ip_config.ipv6_options, 1503 IPV6_OPT_REDIRECT_EN, pval); 1504 } 1505 len = sprintf(buf, "%s\n", pval); 1506 break; 1507 case ISCSI_NET_PARAM_IPV4_TTL: 1508 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1509 break; 1510 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1511 OP_STATE(ha->ip_config.ipv6_options, 1512 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1513 1514 len = sprintf(buf, "%s\n", pval); 1515 break; 1516 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1517 OP_STATE(ha->ip_config.ipv6_addl_options, 1518 IPV6_ADDOPT_MLD_EN, pval); 1519 1520 len = sprintf(buf, "%s\n", pval); 1521 break; 1522 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1523 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1524 break; 1525 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1526 len = sprintf(buf, "%d\n", 1527 ha->ip_config.ipv6_traffic_class); 1528 break; 1529 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1530 len = sprintf(buf, "%d\n", 1531 ha->ip_config.ipv6_hop_limit); 1532 break; 1533 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1534 len = sprintf(buf, "%d\n", 1535 ha->ip_config.ipv6_nd_reach_time); 1536 break; 1537 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1538 len = sprintf(buf, "%d\n", 1539 ha->ip_config.ipv6_nd_rexmit_timer); 1540 break; 1541 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1542 len = sprintf(buf, "%d\n", 1543 ha->ip_config.ipv6_nd_stale_timeout); 1544 break; 1545 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1546 len = sprintf(buf, "%d\n", 1547 ha->ip_config.ipv6_dup_addr_detect_count); 1548 break; 1549 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1550 len = sprintf(buf, "%d\n", 1551 ha->ip_config.ipv6_gw_advrt_mtu); 1552 break; 1553 default: 1554 len = -ENOSYS; 1555 } 1556 } else if (param_type == ISCSI_IFACE_PARAM) { 1557 switch (param) { 1558 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1559 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1560 break; 1561 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1562 OP_STATE(ha->ip_config.iscsi_options, 1563 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1564 1565 len = sprintf(buf, "%s\n", pval); 1566 break; 1567 case ISCSI_IFACE_PARAM_DATADGST_EN: 1568 OP_STATE(ha->ip_config.iscsi_options, 1569 ISCSIOPTS_DATA_DIGEST_EN, pval); 1570 1571 len = sprintf(buf, "%s\n", pval); 1572 break; 1573 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1574 OP_STATE(ha->ip_config.iscsi_options, 1575 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1576 1577 len = sprintf(buf, "%s\n", pval); 1578 break; 1579 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1580 OP_STATE(ha->ip_config.iscsi_options, 1581 ISCSIOPTS_INITIAL_R2T_EN, pval); 1582 1583 len = sprintf(buf, "%s\n", pval); 1584 break; 1585 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1586 OP_STATE(ha->ip_config.iscsi_options, 1587 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1588 1589 len = sprintf(buf, "%s\n", pval); 1590 break; 1591 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1592 OP_STATE(ha->ip_config.iscsi_options, 1593 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1594 1595 len = sprintf(buf, "%s\n", pval); 1596 break; 1597 case ISCSI_IFACE_PARAM_ERL: 1598 len = sprintf(buf, "%d\n", 1599 (ha->ip_config.iscsi_options & 1600 ISCSIOPTS_ERL)); 1601 break; 1602 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1603 len = sprintf(buf, "%u\n", 1604 ha->ip_config.iscsi_max_pdu_size * 1605 BYTE_UNITS); 1606 break; 1607 case ISCSI_IFACE_PARAM_FIRST_BURST: 1608 len = sprintf(buf, "%u\n", 1609 ha->ip_config.iscsi_first_burst_len * 1610 BYTE_UNITS); 1611 break; 1612 case ISCSI_IFACE_PARAM_MAX_R2T: 1613 len = sprintf(buf, "%d\n", 1614 ha->ip_config.iscsi_max_outstnd_r2t); 1615 break; 1616 case ISCSI_IFACE_PARAM_MAX_BURST: 1617 len = sprintf(buf, "%u\n", 1618 ha->ip_config.iscsi_max_burst_len * 1619 BYTE_UNITS); 1620 break; 1621 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1622 OP_STATE(ha->ip_config.iscsi_options, 1623 ISCSIOPTS_CHAP_AUTH_EN, pval); 1624 1625 len = sprintf(buf, "%s\n", pval); 1626 break; 1627 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1628 OP_STATE(ha->ip_config.iscsi_options, 1629 ISCSIOPTS_BIDI_CHAP_EN, pval); 1630 1631 len = sprintf(buf, "%s\n", pval); 1632 break; 1633 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1634 OP_STATE(ha->ip_config.iscsi_options, 1635 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1636 1637 len = sprintf(buf, "%s\n", pval); 1638 break; 1639 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1640 OP_STATE(ha->ip_config.iscsi_options, 1641 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1642 1643 len = sprintf(buf, "%s\n", pval); 1644 break; 1645 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1646 OP_STATE(ha->ip_config.iscsi_options, 1647 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1648 1649 len = sprintf(buf, "%s\n", pval); 1650 break; 1651 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1652 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1653 break; 1654 default: 1655 len = -ENOSYS; 1656 } 1657 } 1658 1659 return len; 1660 } 1661 1662 static struct iscsi_endpoint * 1663 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1664 int non_blocking) 1665 { 1666 int ret; 1667 struct iscsi_endpoint *ep; 1668 struct qla_endpoint *qla_ep; 1669 struct scsi_qla_host *ha; 1670 struct sockaddr_in *addr; 1671 struct sockaddr_in6 *addr6; 1672 1673 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1674 if (!shost) { 1675 ret = -ENXIO; 1676 printk(KERN_ERR "%s: shost is NULL\n", 1677 __func__); 1678 return ERR_PTR(ret); 1679 } 1680 1681 ha = iscsi_host_priv(shost); 1682 1683 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1684 if (!ep) { 1685 ret = -ENOMEM; 1686 return ERR_PTR(ret); 1687 } 1688 1689 qla_ep = ep->dd_data; 1690 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1691 if (dst_addr->sa_family == AF_INET) { 1692 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1693 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1694 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1695 (char *)&addr->sin_addr)); 1696 } else if (dst_addr->sa_family == AF_INET6) { 1697 memcpy(&qla_ep->dst_addr, dst_addr, 1698 sizeof(struct sockaddr_in6)); 1699 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1700 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1701 (char *)&addr6->sin6_addr)); 1702 } 1703 1704 qla_ep->host = shost; 1705 1706 return ep; 1707 } 1708 1709 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1710 { 1711 struct qla_endpoint *qla_ep; 1712 struct scsi_qla_host *ha; 1713 int ret = 0; 1714 1715 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1716 qla_ep = ep->dd_data; 1717 ha = to_qla_host(qla_ep->host); 1718 1719 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1720 ret = 1; 1721 1722 return ret; 1723 } 1724 1725 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1726 { 1727 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1728 iscsi_destroy_endpoint(ep); 1729 } 1730 1731 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1732 enum iscsi_param param, 1733 char *buf) 1734 { 1735 struct qla_endpoint *qla_ep = ep->dd_data; 1736 struct sockaddr *dst_addr; 1737 1738 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1739 1740 switch (param) { 1741 case ISCSI_PARAM_CONN_PORT: 1742 case ISCSI_PARAM_CONN_ADDRESS: 1743 if (!qla_ep) 1744 return -ENOTCONN; 1745 1746 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1747 if (!dst_addr) 1748 return -ENOTCONN; 1749 1750 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1751 &qla_ep->dst_addr, param, buf); 1752 default: 1753 return -ENOSYS; 1754 } 1755 } 1756 1757 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1758 struct iscsi_stats *stats) 1759 { 1760 struct iscsi_session *sess; 1761 struct iscsi_cls_session *cls_sess; 1762 struct ddb_entry *ddb_entry; 1763 struct scsi_qla_host *ha; 1764 struct ql_iscsi_stats *ql_iscsi_stats; 1765 int stats_size; 1766 int ret; 1767 dma_addr_t iscsi_stats_dma; 1768 1769 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1770 1771 cls_sess = iscsi_conn_to_session(cls_conn); 1772 sess = cls_sess->dd_data; 1773 ddb_entry = sess->dd_data; 1774 ha = ddb_entry->ha; 1775 1776 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1777 /* Allocate memory */ 1778 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1779 &iscsi_stats_dma, GFP_KERNEL); 1780 if (!ql_iscsi_stats) { 1781 ql4_printk(KERN_ERR, ha, 1782 "Unable to allocate memory for iscsi stats\n"); 1783 goto exit_get_stats; 1784 } 1785 1786 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1787 iscsi_stats_dma); 1788 if (ret != QLA_SUCCESS) { 1789 ql4_printk(KERN_ERR, ha, 1790 "Unable to retrieve iscsi stats\n"); 1791 goto free_stats; 1792 } 1793 1794 /* octets */ 1795 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1796 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1797 /* xmit pdus */ 1798 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1799 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1800 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1801 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1802 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1803 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1804 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1805 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1806 /* recv pdus */ 1807 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1808 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1809 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1810 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1811 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1812 stats->logoutrsp_pdus = 1813 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1814 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1815 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1816 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1817 1818 free_stats: 1819 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1820 iscsi_stats_dma); 1821 exit_get_stats: 1822 return; 1823 } 1824 1825 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1826 { 1827 struct iscsi_cls_session *session; 1828 struct iscsi_session *sess; 1829 unsigned long flags; 1830 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED; 1831 1832 session = starget_to_session(scsi_target(sc->device)); 1833 sess = session->dd_data; 1834 1835 spin_lock_irqsave(&session->lock, flags); 1836 if (session->state == ISCSI_SESSION_FAILED) 1837 ret = BLK_EH_RESET_TIMER; 1838 spin_unlock_irqrestore(&session->lock, flags); 1839 1840 return ret; 1841 } 1842 1843 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1844 { 1845 struct scsi_qla_host *ha = to_qla_host(shost); 1846 struct iscsi_cls_host *ihost = shost->shost_data; 1847 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1848 1849 qla4xxx_get_firmware_state(ha); 1850 1851 switch (ha->addl_fw_state & 0x0F00) { 1852 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1853 speed = ISCSI_PORT_SPEED_10MBPS; 1854 break; 1855 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1856 speed = ISCSI_PORT_SPEED_100MBPS; 1857 break; 1858 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1859 speed = ISCSI_PORT_SPEED_1GBPS; 1860 break; 1861 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1862 speed = ISCSI_PORT_SPEED_10GBPS; 1863 break; 1864 } 1865 ihost->port_speed = speed; 1866 } 1867 1868 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1869 { 1870 struct scsi_qla_host *ha = to_qla_host(shost); 1871 struct iscsi_cls_host *ihost = shost->shost_data; 1872 uint32_t state = ISCSI_PORT_STATE_DOWN; 1873 1874 if (test_bit(AF_LINK_UP, &ha->flags)) 1875 state = ISCSI_PORT_STATE_UP; 1876 1877 ihost->port_state = state; 1878 } 1879 1880 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1881 enum iscsi_host_param param, char *buf) 1882 { 1883 struct scsi_qla_host *ha = to_qla_host(shost); 1884 int len; 1885 1886 switch (param) { 1887 case ISCSI_HOST_PARAM_HWADDRESS: 1888 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1889 break; 1890 case ISCSI_HOST_PARAM_IPADDRESS: 1891 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1892 break; 1893 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1894 len = sprintf(buf, "%s\n", ha->name_string); 1895 break; 1896 case ISCSI_HOST_PARAM_PORT_STATE: 1897 qla4xxx_set_port_state(shost); 1898 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1899 break; 1900 case ISCSI_HOST_PARAM_PORT_SPEED: 1901 qla4xxx_set_port_speed(shost); 1902 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1903 break; 1904 default: 1905 return -ENOSYS; 1906 } 1907 1908 return len; 1909 } 1910 1911 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1912 { 1913 if (ha->iface_ipv4) 1914 return; 1915 1916 /* IPv4 */ 1917 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1918 &qla4xxx_iscsi_transport, 1919 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1920 if (!ha->iface_ipv4) 1921 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1922 "iface0.\n"); 1923 } 1924 1925 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1926 { 1927 if (!ha->iface_ipv6_0) 1928 /* IPv6 iface-0 */ 1929 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1930 &qla4xxx_iscsi_transport, 1931 ISCSI_IFACE_TYPE_IPV6, 0, 1932 0); 1933 if (!ha->iface_ipv6_0) 1934 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1935 "iface0.\n"); 1936 1937 if (!ha->iface_ipv6_1) 1938 /* IPv6 iface-1 */ 1939 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1940 &qla4xxx_iscsi_transport, 1941 ISCSI_IFACE_TYPE_IPV6, 1, 1942 0); 1943 if (!ha->iface_ipv6_1) 1944 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1945 "iface1.\n"); 1946 } 1947 1948 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 1949 { 1950 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 1951 qla4xxx_create_ipv4_iface(ha); 1952 1953 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 1954 qla4xxx_create_ipv6_iface(ha); 1955 } 1956 1957 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 1958 { 1959 if (ha->iface_ipv4) { 1960 iscsi_destroy_iface(ha->iface_ipv4); 1961 ha->iface_ipv4 = NULL; 1962 } 1963 } 1964 1965 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 1966 { 1967 if (ha->iface_ipv6_0) { 1968 iscsi_destroy_iface(ha->iface_ipv6_0); 1969 ha->iface_ipv6_0 = NULL; 1970 } 1971 if (ha->iface_ipv6_1) { 1972 iscsi_destroy_iface(ha->iface_ipv6_1); 1973 ha->iface_ipv6_1 = NULL; 1974 } 1975 } 1976 1977 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 1978 { 1979 qla4xxx_destroy_ipv4_iface(ha); 1980 qla4xxx_destroy_ipv6_iface(ha); 1981 } 1982 1983 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 1984 struct iscsi_iface_param_info *iface_param, 1985 struct addr_ctrl_blk *init_fw_cb) 1986 { 1987 /* 1988 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 1989 * iface_num 1 is valid only for IPv6 Addr. 1990 */ 1991 switch (iface_param->param) { 1992 case ISCSI_NET_PARAM_IPV6_ADDR: 1993 if (iface_param->iface_num & 0x1) 1994 /* IPv6 Addr 1 */ 1995 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 1996 sizeof(init_fw_cb->ipv6_addr1)); 1997 else 1998 /* IPv6 Addr 0 */ 1999 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2000 sizeof(init_fw_cb->ipv6_addr0)); 2001 break; 2002 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2003 if (iface_param->iface_num & 0x1) 2004 break; 2005 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2006 sizeof(init_fw_cb->ipv6_if_id)); 2007 break; 2008 case ISCSI_NET_PARAM_IPV6_ROUTER: 2009 if (iface_param->iface_num & 0x1) 2010 break; 2011 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2012 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2013 break; 2014 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2015 /* Autocfg applies to even interface */ 2016 if (iface_param->iface_num & 0x1) 2017 break; 2018 2019 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2020 init_fw_cb->ipv6_addtl_opts &= 2021 cpu_to_le16( 2022 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2023 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2024 init_fw_cb->ipv6_addtl_opts |= 2025 cpu_to_le16( 2026 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2027 else 2028 ql4_printk(KERN_ERR, ha, 2029 "Invalid autocfg setting for IPv6 addr\n"); 2030 break; 2031 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2032 /* Autocfg applies to even interface */ 2033 if (iface_param->iface_num & 0x1) 2034 break; 2035 2036 if (iface_param->value[0] == 2037 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2038 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2039 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2040 else if (iface_param->value[0] == 2041 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2042 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2043 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2044 else 2045 ql4_printk(KERN_ERR, ha, 2046 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2047 break; 2048 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2049 /* Autocfg applies to even interface */ 2050 if (iface_param->iface_num & 0x1) 2051 break; 2052 2053 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2054 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2055 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2056 break; 2057 case ISCSI_NET_PARAM_IFACE_ENABLE: 2058 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2059 init_fw_cb->ipv6_opts |= 2060 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2061 qla4xxx_create_ipv6_iface(ha); 2062 } else { 2063 init_fw_cb->ipv6_opts &= 2064 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2065 0xFFFF); 2066 qla4xxx_destroy_ipv6_iface(ha); 2067 } 2068 break; 2069 case ISCSI_NET_PARAM_VLAN_TAG: 2070 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2071 break; 2072 init_fw_cb->ipv6_vlan_tag = 2073 cpu_to_be16(*(uint16_t *)iface_param->value); 2074 break; 2075 case ISCSI_NET_PARAM_VLAN_ENABLED: 2076 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2077 init_fw_cb->ipv6_opts |= 2078 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2079 else 2080 init_fw_cb->ipv6_opts &= 2081 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2082 break; 2083 case ISCSI_NET_PARAM_MTU: 2084 init_fw_cb->eth_mtu_size = 2085 cpu_to_le16(*(uint16_t *)iface_param->value); 2086 break; 2087 case ISCSI_NET_PARAM_PORT: 2088 /* Autocfg applies to even interface */ 2089 if (iface_param->iface_num & 0x1) 2090 break; 2091 2092 init_fw_cb->ipv6_port = 2093 cpu_to_le16(*(uint16_t *)iface_param->value); 2094 break; 2095 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2096 if (iface_param->iface_num & 0x1) 2097 break; 2098 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2099 init_fw_cb->ipv6_tcp_opts |= 2100 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2101 else 2102 init_fw_cb->ipv6_tcp_opts &= 2103 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2104 break; 2105 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2106 if (iface_param->iface_num & 0x1) 2107 break; 2108 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2109 init_fw_cb->ipv6_tcp_opts |= 2110 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2111 else 2112 init_fw_cb->ipv6_tcp_opts &= 2113 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2114 break; 2115 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2116 if (iface_param->iface_num & 0x1) 2117 break; 2118 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2119 init_fw_cb->ipv6_tcp_opts |= 2120 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2121 else 2122 init_fw_cb->ipv6_tcp_opts &= 2123 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2124 break; 2125 case ISCSI_NET_PARAM_TCP_WSF: 2126 if (iface_param->iface_num & 0x1) 2127 break; 2128 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2129 break; 2130 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2131 if (iface_param->iface_num & 0x1) 2132 break; 2133 init_fw_cb->ipv6_tcp_opts &= 2134 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2135 init_fw_cb->ipv6_tcp_opts |= 2136 cpu_to_le16((iface_param->value[0] << 1) & 2137 IPV6_TCPOPT_TIMER_SCALE); 2138 break; 2139 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2140 if (iface_param->iface_num & 0x1) 2141 break; 2142 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2143 init_fw_cb->ipv6_tcp_opts |= 2144 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2145 else 2146 init_fw_cb->ipv6_tcp_opts &= 2147 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2148 break; 2149 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2150 if (iface_param->iface_num & 0x1) 2151 break; 2152 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2153 init_fw_cb->ipv6_opts |= 2154 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2155 else 2156 init_fw_cb->ipv6_opts &= 2157 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2158 break; 2159 case ISCSI_NET_PARAM_REDIRECT_EN: 2160 if (iface_param->iface_num & 0x1) 2161 break; 2162 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2163 init_fw_cb->ipv6_opts |= 2164 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2165 else 2166 init_fw_cb->ipv6_opts &= 2167 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2168 break; 2169 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2170 if (iface_param->iface_num & 0x1) 2171 break; 2172 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2173 init_fw_cb->ipv6_addtl_opts |= 2174 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2175 else 2176 init_fw_cb->ipv6_addtl_opts &= 2177 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2178 break; 2179 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2180 if (iface_param->iface_num & 0x1) 2181 break; 2182 init_fw_cb->ipv6_flow_lbl = 2183 cpu_to_le16(*(uint16_t *)iface_param->value); 2184 break; 2185 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2186 if (iface_param->iface_num & 0x1) 2187 break; 2188 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2189 break; 2190 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2191 if (iface_param->iface_num & 0x1) 2192 break; 2193 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2194 break; 2195 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2196 if (iface_param->iface_num & 0x1) 2197 break; 2198 init_fw_cb->ipv6_nd_reach_time = 2199 cpu_to_le32(*(uint32_t *)iface_param->value); 2200 break; 2201 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2202 if (iface_param->iface_num & 0x1) 2203 break; 2204 init_fw_cb->ipv6_nd_rexmit_timer = 2205 cpu_to_le32(*(uint32_t *)iface_param->value); 2206 break; 2207 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2208 if (iface_param->iface_num & 0x1) 2209 break; 2210 init_fw_cb->ipv6_nd_stale_timeout = 2211 cpu_to_le32(*(uint32_t *)iface_param->value); 2212 break; 2213 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2214 if (iface_param->iface_num & 0x1) 2215 break; 2216 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2217 break; 2218 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2219 if (iface_param->iface_num & 0x1) 2220 break; 2221 init_fw_cb->ipv6_gw_advrt_mtu = 2222 cpu_to_le32(*(uint32_t *)iface_param->value); 2223 break; 2224 default: 2225 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2226 iface_param->param); 2227 break; 2228 } 2229 } 2230 2231 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2232 struct iscsi_iface_param_info *iface_param, 2233 struct addr_ctrl_blk *init_fw_cb) 2234 { 2235 switch (iface_param->param) { 2236 case ISCSI_NET_PARAM_IPV4_ADDR: 2237 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2238 sizeof(init_fw_cb->ipv4_addr)); 2239 break; 2240 case ISCSI_NET_PARAM_IPV4_SUBNET: 2241 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2242 sizeof(init_fw_cb->ipv4_subnet)); 2243 break; 2244 case ISCSI_NET_PARAM_IPV4_GW: 2245 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2246 sizeof(init_fw_cb->ipv4_gw_addr)); 2247 break; 2248 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2249 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2250 init_fw_cb->ipv4_tcp_opts |= 2251 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2252 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2253 init_fw_cb->ipv4_tcp_opts &= 2254 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2255 else 2256 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2257 break; 2258 case ISCSI_NET_PARAM_IFACE_ENABLE: 2259 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2260 init_fw_cb->ipv4_ip_opts |= 2261 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2262 qla4xxx_create_ipv4_iface(ha); 2263 } else { 2264 init_fw_cb->ipv4_ip_opts &= 2265 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2266 0xFFFF); 2267 qla4xxx_destroy_ipv4_iface(ha); 2268 } 2269 break; 2270 case ISCSI_NET_PARAM_VLAN_TAG: 2271 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2272 break; 2273 init_fw_cb->ipv4_vlan_tag = 2274 cpu_to_be16(*(uint16_t *)iface_param->value); 2275 break; 2276 case ISCSI_NET_PARAM_VLAN_ENABLED: 2277 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2278 init_fw_cb->ipv4_ip_opts |= 2279 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2280 else 2281 init_fw_cb->ipv4_ip_opts &= 2282 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2283 break; 2284 case ISCSI_NET_PARAM_MTU: 2285 init_fw_cb->eth_mtu_size = 2286 cpu_to_le16(*(uint16_t *)iface_param->value); 2287 break; 2288 case ISCSI_NET_PARAM_PORT: 2289 init_fw_cb->ipv4_port = 2290 cpu_to_le16(*(uint16_t *)iface_param->value); 2291 break; 2292 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2293 if (iface_param->iface_num & 0x1) 2294 break; 2295 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2296 init_fw_cb->ipv4_tcp_opts |= 2297 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2298 else 2299 init_fw_cb->ipv4_tcp_opts &= 2300 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE); 2301 break; 2302 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2303 if (iface_param->iface_num & 0x1) 2304 break; 2305 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2306 init_fw_cb->ipv4_tcp_opts |= 2307 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2308 else 2309 init_fw_cb->ipv4_tcp_opts &= 2310 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2311 break; 2312 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2313 if (iface_param->iface_num & 0x1) 2314 break; 2315 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2316 init_fw_cb->ipv4_tcp_opts |= 2317 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2318 else 2319 init_fw_cb->ipv4_tcp_opts &= 2320 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2321 break; 2322 case ISCSI_NET_PARAM_TCP_WSF: 2323 if (iface_param->iface_num & 0x1) 2324 break; 2325 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2326 break; 2327 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2328 if (iface_param->iface_num & 0x1) 2329 break; 2330 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2331 init_fw_cb->ipv4_tcp_opts |= 2332 cpu_to_le16((iface_param->value[0] << 1) & 2333 TCPOPT_TIMER_SCALE); 2334 break; 2335 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2336 if (iface_param->iface_num & 0x1) 2337 break; 2338 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2339 init_fw_cb->ipv4_tcp_opts |= 2340 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2341 else 2342 init_fw_cb->ipv4_tcp_opts &= 2343 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2344 break; 2345 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2346 if (iface_param->iface_num & 0x1) 2347 break; 2348 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2349 init_fw_cb->ipv4_tcp_opts |= 2350 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2351 else 2352 init_fw_cb->ipv4_tcp_opts &= 2353 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2354 break; 2355 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2356 if (iface_param->iface_num & 0x1) 2357 break; 2358 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2359 init_fw_cb->ipv4_tcp_opts |= 2360 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2361 else 2362 init_fw_cb->ipv4_tcp_opts &= 2363 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2364 break; 2365 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2366 if (iface_param->iface_num & 0x1) 2367 break; 2368 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2369 init_fw_cb->ipv4_ip_opts |= 2370 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2371 else 2372 init_fw_cb->ipv4_ip_opts &= 2373 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2374 break; 2375 case ISCSI_NET_PARAM_IPV4_TOS: 2376 if (iface_param->iface_num & 0x1) 2377 break; 2378 init_fw_cb->ipv4_tos = iface_param->value[0]; 2379 break; 2380 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2381 if (iface_param->iface_num & 0x1) 2382 break; 2383 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2384 init_fw_cb->ipv4_ip_opts |= 2385 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2386 else 2387 init_fw_cb->ipv4_ip_opts &= 2388 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2389 break; 2390 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2391 if (iface_param->iface_num & 0x1) 2392 break; 2393 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2394 init_fw_cb->ipv4_ip_opts |= 2395 cpu_to_le16(IPOPT_ALT_CID_EN); 2396 else 2397 init_fw_cb->ipv4_ip_opts &= 2398 cpu_to_le16(~IPOPT_ALT_CID_EN); 2399 break; 2400 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2401 if (iface_param->iface_num & 0x1) 2402 break; 2403 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2404 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2405 init_fw_cb->ipv4_dhcp_alt_cid_len = 2406 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2407 break; 2408 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2409 if (iface_param->iface_num & 0x1) 2410 break; 2411 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2412 init_fw_cb->ipv4_ip_opts |= 2413 cpu_to_le16(IPOPT_REQ_VID_EN); 2414 else 2415 init_fw_cb->ipv4_ip_opts &= 2416 cpu_to_le16(~IPOPT_REQ_VID_EN); 2417 break; 2418 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2419 if (iface_param->iface_num & 0x1) 2420 break; 2421 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2422 init_fw_cb->ipv4_ip_opts |= 2423 cpu_to_le16(IPOPT_USE_VID_EN); 2424 else 2425 init_fw_cb->ipv4_ip_opts &= 2426 cpu_to_le16(~IPOPT_USE_VID_EN); 2427 break; 2428 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2429 if (iface_param->iface_num & 0x1) 2430 break; 2431 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2432 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2433 init_fw_cb->ipv4_dhcp_vid_len = 2434 strlen(init_fw_cb->ipv4_dhcp_vid); 2435 break; 2436 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2437 if (iface_param->iface_num & 0x1) 2438 break; 2439 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2440 init_fw_cb->ipv4_ip_opts |= 2441 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2442 else 2443 init_fw_cb->ipv4_ip_opts &= 2444 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2445 break; 2446 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2447 if (iface_param->iface_num & 0x1) 2448 break; 2449 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2450 init_fw_cb->ipv4_ip_opts |= 2451 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2452 else 2453 init_fw_cb->ipv4_ip_opts &= 2454 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2455 break; 2456 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2457 if (iface_param->iface_num & 0x1) 2458 break; 2459 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2460 init_fw_cb->ipv4_ip_opts |= 2461 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2462 else 2463 init_fw_cb->ipv4_ip_opts &= 2464 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2465 break; 2466 case ISCSI_NET_PARAM_REDIRECT_EN: 2467 if (iface_param->iface_num & 0x1) 2468 break; 2469 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2470 init_fw_cb->ipv4_ip_opts |= 2471 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2472 else 2473 init_fw_cb->ipv4_ip_opts &= 2474 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2475 break; 2476 case ISCSI_NET_PARAM_IPV4_TTL: 2477 if (iface_param->iface_num & 0x1) 2478 break; 2479 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2480 break; 2481 default: 2482 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2483 iface_param->param); 2484 break; 2485 } 2486 } 2487 2488 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2489 struct iscsi_iface_param_info *iface_param, 2490 struct addr_ctrl_blk *init_fw_cb) 2491 { 2492 switch (iface_param->param) { 2493 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2494 if (iface_param->iface_num & 0x1) 2495 break; 2496 init_fw_cb->def_timeout = 2497 cpu_to_le16(*(uint16_t *)iface_param->value); 2498 break; 2499 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2500 if (iface_param->iface_num & 0x1) 2501 break; 2502 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2503 init_fw_cb->iscsi_opts |= 2504 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2505 else 2506 init_fw_cb->iscsi_opts &= 2507 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2508 break; 2509 case ISCSI_IFACE_PARAM_DATADGST_EN: 2510 if (iface_param->iface_num & 0x1) 2511 break; 2512 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2513 init_fw_cb->iscsi_opts |= 2514 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2515 else 2516 init_fw_cb->iscsi_opts &= 2517 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2518 break; 2519 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2520 if (iface_param->iface_num & 0x1) 2521 break; 2522 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2523 init_fw_cb->iscsi_opts |= 2524 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2525 else 2526 init_fw_cb->iscsi_opts &= 2527 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2528 break; 2529 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2530 if (iface_param->iface_num & 0x1) 2531 break; 2532 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2533 init_fw_cb->iscsi_opts |= 2534 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2535 else 2536 init_fw_cb->iscsi_opts &= 2537 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2538 break; 2539 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2540 if (iface_param->iface_num & 0x1) 2541 break; 2542 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2543 init_fw_cb->iscsi_opts |= 2544 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2545 else 2546 init_fw_cb->iscsi_opts &= 2547 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2548 break; 2549 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2550 if (iface_param->iface_num & 0x1) 2551 break; 2552 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2553 init_fw_cb->iscsi_opts |= 2554 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2555 else 2556 init_fw_cb->iscsi_opts &= 2557 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2558 break; 2559 case ISCSI_IFACE_PARAM_ERL: 2560 if (iface_param->iface_num & 0x1) 2561 break; 2562 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2563 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2564 ISCSIOPTS_ERL); 2565 break; 2566 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2567 if (iface_param->iface_num & 0x1) 2568 break; 2569 init_fw_cb->iscsi_max_pdu_size = 2570 cpu_to_le32(*(uint32_t *)iface_param->value) / 2571 BYTE_UNITS; 2572 break; 2573 case ISCSI_IFACE_PARAM_FIRST_BURST: 2574 if (iface_param->iface_num & 0x1) 2575 break; 2576 init_fw_cb->iscsi_fburst_len = 2577 cpu_to_le32(*(uint32_t *)iface_param->value) / 2578 BYTE_UNITS; 2579 break; 2580 case ISCSI_IFACE_PARAM_MAX_R2T: 2581 if (iface_param->iface_num & 0x1) 2582 break; 2583 init_fw_cb->iscsi_max_outstnd_r2t = 2584 cpu_to_le16(*(uint16_t *)iface_param->value); 2585 break; 2586 case ISCSI_IFACE_PARAM_MAX_BURST: 2587 if (iface_param->iface_num & 0x1) 2588 break; 2589 init_fw_cb->iscsi_max_burst_len = 2590 cpu_to_le32(*(uint32_t *)iface_param->value) / 2591 BYTE_UNITS; 2592 break; 2593 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2594 if (iface_param->iface_num & 0x1) 2595 break; 2596 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2597 init_fw_cb->iscsi_opts |= 2598 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2599 else 2600 init_fw_cb->iscsi_opts &= 2601 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2602 break; 2603 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2604 if (iface_param->iface_num & 0x1) 2605 break; 2606 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2607 init_fw_cb->iscsi_opts |= 2608 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2609 else 2610 init_fw_cb->iscsi_opts &= 2611 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2612 break; 2613 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2614 if (iface_param->iface_num & 0x1) 2615 break; 2616 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2617 init_fw_cb->iscsi_opts |= 2618 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2619 else 2620 init_fw_cb->iscsi_opts &= 2621 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2622 break; 2623 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2624 if (iface_param->iface_num & 0x1) 2625 break; 2626 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2627 init_fw_cb->iscsi_opts |= 2628 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2629 else 2630 init_fw_cb->iscsi_opts &= 2631 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2632 break; 2633 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2634 if (iface_param->iface_num & 0x1) 2635 break; 2636 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2637 init_fw_cb->iscsi_opts |= 2638 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2639 else 2640 init_fw_cb->iscsi_opts &= 2641 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2642 break; 2643 default: 2644 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2645 iface_param->param); 2646 break; 2647 } 2648 } 2649 2650 static void 2651 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2652 { 2653 struct addr_ctrl_blk_def *acb; 2654 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2655 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2656 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2657 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2658 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2659 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2660 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2661 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2662 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2663 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2664 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2665 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2666 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2667 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2668 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2669 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2670 } 2671 2672 static int 2673 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2674 { 2675 struct scsi_qla_host *ha = to_qla_host(shost); 2676 int rval = 0; 2677 struct iscsi_iface_param_info *iface_param = NULL; 2678 struct addr_ctrl_blk *init_fw_cb = NULL; 2679 dma_addr_t init_fw_cb_dma; 2680 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2681 uint32_t mbox_sts[MBOX_REG_COUNT]; 2682 uint32_t rem = len; 2683 struct nlattr *attr; 2684 2685 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2686 sizeof(struct addr_ctrl_blk), 2687 &init_fw_cb_dma, GFP_KERNEL); 2688 if (!init_fw_cb) { 2689 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2690 __func__); 2691 return -ENOMEM; 2692 } 2693 2694 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2695 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2696 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2697 2698 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2699 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2700 rval = -EIO; 2701 goto exit_init_fw_cb; 2702 } 2703 2704 nla_for_each_attr(attr, data, len, rem) { 2705 iface_param = nla_data(attr); 2706 2707 if (iface_param->param_type == ISCSI_NET_PARAM) { 2708 switch (iface_param->iface_type) { 2709 case ISCSI_IFACE_TYPE_IPV4: 2710 switch (iface_param->iface_num) { 2711 case 0: 2712 qla4xxx_set_ipv4(ha, iface_param, 2713 init_fw_cb); 2714 break; 2715 default: 2716 /* Cannot have more than one IPv4 interface */ 2717 ql4_printk(KERN_ERR, ha, 2718 "Invalid IPv4 iface number = %d\n", 2719 iface_param->iface_num); 2720 break; 2721 } 2722 break; 2723 case ISCSI_IFACE_TYPE_IPV6: 2724 switch (iface_param->iface_num) { 2725 case 0: 2726 case 1: 2727 qla4xxx_set_ipv6(ha, iface_param, 2728 init_fw_cb); 2729 break; 2730 default: 2731 /* Cannot have more than two IPv6 interface */ 2732 ql4_printk(KERN_ERR, ha, 2733 "Invalid IPv6 iface number = %d\n", 2734 iface_param->iface_num); 2735 break; 2736 } 2737 break; 2738 default: 2739 ql4_printk(KERN_ERR, ha, 2740 "Invalid iface type\n"); 2741 break; 2742 } 2743 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2744 qla4xxx_set_iscsi_param(ha, iface_param, 2745 init_fw_cb); 2746 } else { 2747 continue; 2748 } 2749 } 2750 2751 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2752 2753 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2754 sizeof(struct addr_ctrl_blk), 2755 FLASH_OPT_RMW_COMMIT); 2756 if (rval != QLA_SUCCESS) { 2757 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2758 __func__); 2759 rval = -EIO; 2760 goto exit_init_fw_cb; 2761 } 2762 2763 rval = qla4xxx_disable_acb(ha); 2764 if (rval != QLA_SUCCESS) { 2765 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2766 __func__); 2767 rval = -EIO; 2768 goto exit_init_fw_cb; 2769 } 2770 2771 wait_for_completion_timeout(&ha->disable_acb_comp, 2772 DISABLE_ACB_TOV * HZ); 2773 2774 qla4xxx_initcb_to_acb(init_fw_cb); 2775 2776 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2777 if (rval != QLA_SUCCESS) { 2778 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2779 __func__); 2780 rval = -EIO; 2781 goto exit_init_fw_cb; 2782 } 2783 2784 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2785 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2786 init_fw_cb_dma); 2787 2788 exit_init_fw_cb: 2789 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2790 init_fw_cb, init_fw_cb_dma); 2791 2792 return rval; 2793 } 2794 2795 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2796 enum iscsi_param param, char *buf) 2797 { 2798 struct iscsi_session *sess = cls_sess->dd_data; 2799 struct ddb_entry *ddb_entry = sess->dd_data; 2800 struct scsi_qla_host *ha = ddb_entry->ha; 2801 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2802 struct ql4_chap_table chap_tbl; 2803 int rval, len; 2804 uint16_t idx; 2805 2806 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2807 switch (param) { 2808 case ISCSI_PARAM_CHAP_IN_IDX: 2809 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2810 sess->password_in, BIDI_CHAP, 2811 &idx); 2812 if (rval) 2813 len = sprintf(buf, "\n"); 2814 else 2815 len = sprintf(buf, "%hu\n", idx); 2816 break; 2817 case ISCSI_PARAM_CHAP_OUT_IDX: 2818 if (ddb_entry->ddb_type == FLASH_DDB) { 2819 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2820 idx = ddb_entry->chap_tbl_idx; 2821 rval = QLA_SUCCESS; 2822 } else { 2823 rval = QLA_ERROR; 2824 } 2825 } else { 2826 rval = qla4xxx_get_chap_index(ha, sess->username, 2827 sess->password, 2828 LOCAL_CHAP, &idx); 2829 } 2830 if (rval) 2831 len = sprintf(buf, "\n"); 2832 else 2833 len = sprintf(buf, "%hu\n", idx); 2834 break; 2835 case ISCSI_PARAM_USERNAME: 2836 case ISCSI_PARAM_PASSWORD: 2837 /* First, populate session username and password for FLASH DDB, 2838 * if not already done. This happens when session login fails 2839 * for a FLASH DDB. 2840 */ 2841 if (ddb_entry->ddb_type == FLASH_DDB && 2842 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2843 !sess->username && !sess->password) { 2844 idx = ddb_entry->chap_tbl_idx; 2845 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2846 chap_tbl.secret, 2847 idx); 2848 if (!rval) { 2849 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2850 (char *)chap_tbl.name, 2851 strlen((char *)chap_tbl.name)); 2852 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2853 (char *)chap_tbl.secret, 2854 chap_tbl.secret_len); 2855 } 2856 } 2857 /* allow fall-through */ 2858 default: 2859 return iscsi_session_get_param(cls_sess, param, buf); 2860 } 2861 2862 return len; 2863 } 2864 2865 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2866 enum iscsi_param param, char *buf) 2867 { 2868 struct iscsi_conn *conn; 2869 struct qla_conn *qla_conn; 2870 struct sockaddr *dst_addr; 2871 int len = 0; 2872 2873 conn = cls_conn->dd_data; 2874 qla_conn = conn->dd_data; 2875 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2876 2877 switch (param) { 2878 case ISCSI_PARAM_CONN_PORT: 2879 case ISCSI_PARAM_CONN_ADDRESS: 2880 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2881 dst_addr, param, buf); 2882 default: 2883 return iscsi_conn_get_param(cls_conn, param, buf); 2884 } 2885 2886 return len; 2887 2888 } 2889 2890 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2891 { 2892 uint32_t mbx_sts = 0; 2893 uint16_t tmp_ddb_index; 2894 int ret; 2895 2896 get_ddb_index: 2897 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2898 2899 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2900 DEBUG2(ql4_printk(KERN_INFO, ha, 2901 "Free DDB index not available\n")); 2902 ret = QLA_ERROR; 2903 goto exit_get_ddb_index; 2904 } 2905 2906 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2907 goto get_ddb_index; 2908 2909 DEBUG2(ql4_printk(KERN_INFO, ha, 2910 "Found a free DDB index at %d\n", tmp_ddb_index)); 2911 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2912 if (ret == QLA_ERROR) { 2913 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2914 ql4_printk(KERN_INFO, ha, 2915 "DDB index = %d not available trying next\n", 2916 tmp_ddb_index); 2917 goto get_ddb_index; 2918 } 2919 DEBUG2(ql4_printk(KERN_INFO, ha, 2920 "Free FW DDB not available\n")); 2921 } 2922 2923 *ddb_index = tmp_ddb_index; 2924 2925 exit_get_ddb_index: 2926 return ret; 2927 } 2928 2929 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2930 struct ddb_entry *ddb_entry, 2931 char *existing_ipaddr, 2932 char *user_ipaddr) 2933 { 2934 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2935 char formatted_ipaddr[DDB_IPADDR_LEN]; 2936 int status = QLA_SUCCESS, ret = 0; 2937 2938 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2939 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2940 '\0', NULL); 2941 if (ret == 0) { 2942 status = QLA_ERROR; 2943 goto out_match; 2944 } 2945 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2946 } else { 2947 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2948 '\0', NULL); 2949 if (ret == 0) { 2950 status = QLA_ERROR; 2951 goto out_match; 2952 } 2953 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 2954 } 2955 2956 if (strcmp(existing_ipaddr, formatted_ipaddr)) 2957 status = QLA_ERROR; 2958 2959 out_match: 2960 return status; 2961 } 2962 2963 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 2964 struct iscsi_cls_conn *cls_conn) 2965 { 2966 int idx = 0, max_ddbs, rval; 2967 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 2968 struct iscsi_session *sess, *existing_sess; 2969 struct iscsi_conn *conn, *existing_conn; 2970 struct ddb_entry *ddb_entry; 2971 2972 sess = cls_sess->dd_data; 2973 conn = cls_conn->dd_data; 2974 2975 if (sess->targetname == NULL || 2976 conn->persistent_address == NULL || 2977 conn->persistent_port == 0) 2978 return QLA_ERROR; 2979 2980 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 2981 MAX_DEV_DB_ENTRIES; 2982 2983 for (idx = 0; idx < max_ddbs; idx++) { 2984 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 2985 if (ddb_entry == NULL) 2986 continue; 2987 2988 if (ddb_entry->ddb_type != FLASH_DDB) 2989 continue; 2990 2991 existing_sess = ddb_entry->sess->dd_data; 2992 existing_conn = ddb_entry->conn->dd_data; 2993 2994 if (existing_sess->targetname == NULL || 2995 existing_conn->persistent_address == NULL || 2996 existing_conn->persistent_port == 0) 2997 continue; 2998 2999 DEBUG2(ql4_printk(KERN_INFO, ha, 3000 "IQN = %s User IQN = %s\n", 3001 existing_sess->targetname, 3002 sess->targetname)); 3003 3004 DEBUG2(ql4_printk(KERN_INFO, ha, 3005 "IP = %s User IP = %s\n", 3006 existing_conn->persistent_address, 3007 conn->persistent_address)); 3008 3009 DEBUG2(ql4_printk(KERN_INFO, ha, 3010 "Port = %d User Port = %d\n", 3011 existing_conn->persistent_port, 3012 conn->persistent_port)); 3013 3014 if (strcmp(existing_sess->targetname, sess->targetname)) 3015 continue; 3016 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3017 existing_conn->persistent_address, 3018 conn->persistent_address); 3019 if (rval == QLA_ERROR) 3020 continue; 3021 if (existing_conn->persistent_port != conn->persistent_port) 3022 continue; 3023 break; 3024 } 3025 3026 if (idx == max_ddbs) 3027 return QLA_ERROR; 3028 3029 DEBUG2(ql4_printk(KERN_INFO, ha, 3030 "Match found in fwdb sessions\n")); 3031 return QLA_SUCCESS; 3032 } 3033 3034 static struct iscsi_cls_session * 3035 qla4xxx_session_create(struct iscsi_endpoint *ep, 3036 uint16_t cmds_max, uint16_t qdepth, 3037 uint32_t initial_cmdsn) 3038 { 3039 struct iscsi_cls_session *cls_sess; 3040 struct scsi_qla_host *ha; 3041 struct qla_endpoint *qla_ep; 3042 struct ddb_entry *ddb_entry; 3043 uint16_t ddb_index; 3044 struct iscsi_session *sess; 3045 struct sockaddr *dst_addr; 3046 int ret; 3047 3048 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3049 if (!ep) { 3050 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3051 return NULL; 3052 } 3053 3054 qla_ep = ep->dd_data; 3055 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 3056 ha = to_qla_host(qla_ep->host); 3057 3058 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3059 if (ret == QLA_ERROR) 3060 return NULL; 3061 3062 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3063 cmds_max, sizeof(struct ddb_entry), 3064 sizeof(struct ql4_task_data), 3065 initial_cmdsn, ddb_index); 3066 if (!cls_sess) 3067 return NULL; 3068 3069 sess = cls_sess->dd_data; 3070 ddb_entry = sess->dd_data; 3071 ddb_entry->fw_ddb_index = ddb_index; 3072 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3073 ddb_entry->ha = ha; 3074 ddb_entry->sess = cls_sess; 3075 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3076 ddb_entry->ddb_change = qla4xxx_ddb_change; 3077 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3078 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3079 ha->tot_ddbs++; 3080 3081 return cls_sess; 3082 } 3083 3084 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3085 { 3086 struct iscsi_session *sess; 3087 struct ddb_entry *ddb_entry; 3088 struct scsi_qla_host *ha; 3089 unsigned long flags, wtime; 3090 struct dev_db_entry *fw_ddb_entry = NULL; 3091 dma_addr_t fw_ddb_entry_dma; 3092 uint32_t ddb_state; 3093 int ret; 3094 3095 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3096 sess = cls_sess->dd_data; 3097 ddb_entry = sess->dd_data; 3098 ha = ddb_entry->ha; 3099 3100 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3101 &fw_ddb_entry_dma, GFP_KERNEL); 3102 if (!fw_ddb_entry) { 3103 ql4_printk(KERN_ERR, ha, 3104 "%s: Unable to allocate dma buffer\n", __func__); 3105 goto destroy_session; 3106 } 3107 3108 wtime = jiffies + (HZ * LOGOUT_TOV); 3109 do { 3110 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3111 fw_ddb_entry, fw_ddb_entry_dma, 3112 NULL, NULL, &ddb_state, NULL, 3113 NULL, NULL); 3114 if (ret == QLA_ERROR) 3115 goto destroy_session; 3116 3117 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3118 (ddb_state == DDB_DS_SESSION_FAILED)) 3119 goto destroy_session; 3120 3121 schedule_timeout_uninterruptible(HZ); 3122 } while ((time_after(wtime, jiffies))); 3123 3124 destroy_session: 3125 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3126 3127 spin_lock_irqsave(&ha->hardware_lock, flags); 3128 qla4xxx_free_ddb(ha, ddb_entry); 3129 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3130 3131 iscsi_session_teardown(cls_sess); 3132 3133 if (fw_ddb_entry) 3134 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3135 fw_ddb_entry, fw_ddb_entry_dma); 3136 } 3137 3138 static struct iscsi_cls_conn * 3139 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3140 { 3141 struct iscsi_cls_conn *cls_conn; 3142 struct iscsi_session *sess; 3143 struct ddb_entry *ddb_entry; 3144 3145 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3146 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3147 conn_idx); 3148 if (!cls_conn) 3149 return NULL; 3150 3151 sess = cls_sess->dd_data; 3152 ddb_entry = sess->dd_data; 3153 ddb_entry->conn = cls_conn; 3154 3155 return cls_conn; 3156 } 3157 3158 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3159 struct iscsi_cls_conn *cls_conn, 3160 uint64_t transport_fd, int is_leading) 3161 { 3162 struct iscsi_conn *conn; 3163 struct qla_conn *qla_conn; 3164 struct iscsi_endpoint *ep; 3165 3166 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3167 3168 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3169 return -EINVAL; 3170 ep = iscsi_lookup_endpoint(transport_fd); 3171 conn = cls_conn->dd_data; 3172 qla_conn = conn->dd_data; 3173 qla_conn->qla_ep = ep->dd_data; 3174 return 0; 3175 } 3176 3177 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3178 { 3179 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3180 struct iscsi_session *sess; 3181 struct ddb_entry *ddb_entry; 3182 struct scsi_qla_host *ha; 3183 struct dev_db_entry *fw_ddb_entry = NULL; 3184 dma_addr_t fw_ddb_entry_dma; 3185 uint32_t mbx_sts = 0; 3186 int ret = 0; 3187 int status = QLA_SUCCESS; 3188 3189 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3190 sess = cls_sess->dd_data; 3191 ddb_entry = sess->dd_data; 3192 ha = ddb_entry->ha; 3193 3194 /* Check if we have matching FW DDB, if yes then do not 3195 * login to this target. This could cause target to logout previous 3196 * connection 3197 */ 3198 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3199 if (ret == QLA_SUCCESS) { 3200 ql4_printk(KERN_INFO, ha, 3201 "Session already exist in FW.\n"); 3202 ret = -EEXIST; 3203 goto exit_conn_start; 3204 } 3205 3206 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3207 &fw_ddb_entry_dma, GFP_KERNEL); 3208 if (!fw_ddb_entry) { 3209 ql4_printk(KERN_ERR, ha, 3210 "%s: Unable to allocate dma buffer\n", __func__); 3211 ret = -ENOMEM; 3212 goto exit_conn_start; 3213 } 3214 3215 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3216 if (ret) { 3217 /* If iscsid is stopped and started then no need to do 3218 * set param again since ddb state will be already 3219 * active and FW does not allow set ddb to an 3220 * active session. 3221 */ 3222 if (mbx_sts) 3223 if (ddb_entry->fw_ddb_device_state == 3224 DDB_DS_SESSION_ACTIVE) { 3225 ddb_entry->unblock_sess(ddb_entry->sess); 3226 goto exit_set_param; 3227 } 3228 3229 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3230 __func__, ddb_entry->fw_ddb_index); 3231 goto exit_conn_start; 3232 } 3233 3234 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3235 if (status == QLA_ERROR) { 3236 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3237 sess->targetname); 3238 ret = -EINVAL; 3239 goto exit_conn_start; 3240 } 3241 3242 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3243 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3244 3245 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3246 ddb_entry->fw_ddb_device_state)); 3247 3248 exit_set_param: 3249 ret = 0; 3250 3251 exit_conn_start: 3252 if (fw_ddb_entry) 3253 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3254 fw_ddb_entry, fw_ddb_entry_dma); 3255 return ret; 3256 } 3257 3258 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3259 { 3260 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3261 struct iscsi_session *sess; 3262 struct scsi_qla_host *ha; 3263 struct ddb_entry *ddb_entry; 3264 int options; 3265 3266 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3267 sess = cls_sess->dd_data; 3268 ddb_entry = sess->dd_data; 3269 ha = ddb_entry->ha; 3270 3271 options = LOGOUT_OPTION_CLOSE_SESSION; 3272 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3273 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3274 } 3275 3276 static void qla4xxx_task_work(struct work_struct *wdata) 3277 { 3278 struct ql4_task_data *task_data; 3279 struct scsi_qla_host *ha; 3280 struct passthru_status *sts; 3281 struct iscsi_task *task; 3282 struct iscsi_hdr *hdr; 3283 uint8_t *data; 3284 uint32_t data_len; 3285 struct iscsi_conn *conn; 3286 int hdr_len; 3287 itt_t itt; 3288 3289 task_data = container_of(wdata, struct ql4_task_data, task_work); 3290 ha = task_data->ha; 3291 task = task_data->task; 3292 sts = &task_data->sts; 3293 hdr_len = sizeof(struct iscsi_hdr); 3294 3295 DEBUG3(printk(KERN_INFO "Status returned\n")); 3296 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3297 DEBUG3(printk(KERN_INFO "Response buffer")); 3298 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3299 3300 conn = task->conn; 3301 3302 switch (sts->completionStatus) { 3303 case PASSTHRU_STATUS_COMPLETE: 3304 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3305 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3306 itt = sts->handle; 3307 hdr->itt = itt; 3308 data = task_data->resp_buffer + hdr_len; 3309 data_len = task_data->resp_len - hdr_len; 3310 iscsi_complete_pdu(conn, hdr, data, data_len); 3311 break; 3312 default: 3313 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3314 sts->completionStatus); 3315 break; 3316 } 3317 return; 3318 } 3319 3320 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3321 { 3322 struct ql4_task_data *task_data; 3323 struct iscsi_session *sess; 3324 struct ddb_entry *ddb_entry; 3325 struct scsi_qla_host *ha; 3326 int hdr_len; 3327 3328 sess = task->conn->session; 3329 ddb_entry = sess->dd_data; 3330 ha = ddb_entry->ha; 3331 task_data = task->dd_data; 3332 memset(task_data, 0, sizeof(struct ql4_task_data)); 3333 3334 if (task->sc) { 3335 ql4_printk(KERN_INFO, ha, 3336 "%s: SCSI Commands not implemented\n", __func__); 3337 return -EINVAL; 3338 } 3339 3340 hdr_len = sizeof(struct iscsi_hdr); 3341 task_data->ha = ha; 3342 task_data->task = task; 3343 3344 if (task->data_count) { 3345 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3346 task->data_count, 3347 PCI_DMA_TODEVICE); 3348 } 3349 3350 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3351 __func__, task->conn->max_recv_dlength, hdr_len)); 3352 3353 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3354 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3355 task_data->resp_len, 3356 &task_data->resp_dma, 3357 GFP_ATOMIC); 3358 if (!task_data->resp_buffer) 3359 goto exit_alloc_pdu; 3360 3361 task_data->req_len = task->data_count + hdr_len; 3362 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3363 task_data->req_len, 3364 &task_data->req_dma, 3365 GFP_ATOMIC); 3366 if (!task_data->req_buffer) 3367 goto exit_alloc_pdu; 3368 3369 task->hdr = task_data->req_buffer; 3370 3371 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3372 3373 return 0; 3374 3375 exit_alloc_pdu: 3376 if (task_data->resp_buffer) 3377 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3378 task_data->resp_buffer, task_data->resp_dma); 3379 3380 if (task_data->req_buffer) 3381 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3382 task_data->req_buffer, task_data->req_dma); 3383 return -ENOMEM; 3384 } 3385 3386 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3387 { 3388 struct ql4_task_data *task_data; 3389 struct iscsi_session *sess; 3390 struct ddb_entry *ddb_entry; 3391 struct scsi_qla_host *ha; 3392 int hdr_len; 3393 3394 hdr_len = sizeof(struct iscsi_hdr); 3395 sess = task->conn->session; 3396 ddb_entry = sess->dd_data; 3397 ha = ddb_entry->ha; 3398 task_data = task->dd_data; 3399 3400 if (task->data_count) { 3401 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3402 task->data_count, PCI_DMA_TODEVICE); 3403 } 3404 3405 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3406 __func__, task->conn->max_recv_dlength, hdr_len)); 3407 3408 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3409 task_data->resp_buffer, task_data->resp_dma); 3410 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3411 task_data->req_buffer, task_data->req_dma); 3412 return; 3413 } 3414 3415 static int qla4xxx_task_xmit(struct iscsi_task *task) 3416 { 3417 struct scsi_cmnd *sc = task->sc; 3418 struct iscsi_session *sess = task->conn->session; 3419 struct ddb_entry *ddb_entry = sess->dd_data; 3420 struct scsi_qla_host *ha = ddb_entry->ha; 3421 3422 if (!sc) 3423 return qla4xxx_send_passthru0(task); 3424 3425 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3426 __func__); 3427 return -ENOSYS; 3428 } 3429 3430 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3431 struct iscsi_bus_flash_conn *conn, 3432 struct dev_db_entry *fw_ddb_entry) 3433 { 3434 unsigned long options = 0; 3435 int rc = 0; 3436 3437 options = le16_to_cpu(fw_ddb_entry->options); 3438 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3439 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3440 rc = iscsi_switch_str_param(&sess->portal_type, 3441 PORTAL_TYPE_IPV6); 3442 if (rc) 3443 goto exit_copy; 3444 } else { 3445 rc = iscsi_switch_str_param(&sess->portal_type, 3446 PORTAL_TYPE_IPV4); 3447 if (rc) 3448 goto exit_copy; 3449 } 3450 3451 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3452 &options); 3453 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3454 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3455 3456 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3457 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3458 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3459 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3460 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3461 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3462 &options); 3463 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3464 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3465 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3466 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3467 &options); 3468 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3469 sess->discovery_auth_optional = 3470 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3471 if (test_bit(ISCSIOPT_ERL1, &options)) 3472 sess->erl |= BIT_1; 3473 if (test_bit(ISCSIOPT_ERL0, &options)) 3474 sess->erl |= BIT_0; 3475 3476 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3477 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3478 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3479 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3480 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3481 conn->tcp_timer_scale |= BIT_3; 3482 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3483 conn->tcp_timer_scale |= BIT_2; 3484 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3485 conn->tcp_timer_scale |= BIT_1; 3486 3487 conn->tcp_timer_scale >>= 1; 3488 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3489 3490 options = le16_to_cpu(fw_ddb_entry->ip_options); 3491 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3492 3493 conn->max_recv_dlength = BYTE_UNITS * 3494 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3495 conn->max_xmit_dlength = BYTE_UNITS * 3496 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3497 sess->first_burst = BYTE_UNITS * 3498 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3499 sess->max_burst = BYTE_UNITS * 3500 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3501 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3502 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3503 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3504 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3505 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3506 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3507 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3508 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3509 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3510 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3511 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3512 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3513 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3514 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3515 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3516 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3517 3518 sess->default_taskmgmt_timeout = 3519 le16_to_cpu(fw_ddb_entry->def_timeout); 3520 conn->port = le16_to_cpu(fw_ddb_entry->port); 3521 3522 options = le16_to_cpu(fw_ddb_entry->options); 3523 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3524 if (!conn->ipaddress) { 3525 rc = -ENOMEM; 3526 goto exit_copy; 3527 } 3528 3529 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3530 if (!conn->redirect_ipaddr) { 3531 rc = -ENOMEM; 3532 goto exit_copy; 3533 } 3534 3535 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3536 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3537 3538 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3539 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3540 3541 conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3542 if (!conn->link_local_ipv6_addr) { 3543 rc = -ENOMEM; 3544 goto exit_copy; 3545 } 3546 3547 memcpy(conn->link_local_ipv6_addr, 3548 fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN); 3549 } else { 3550 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3551 } 3552 3553 if (fw_ddb_entry->iscsi_name[0]) { 3554 rc = iscsi_switch_str_param(&sess->targetname, 3555 (char *)fw_ddb_entry->iscsi_name); 3556 if (rc) 3557 goto exit_copy; 3558 } 3559 3560 if (fw_ddb_entry->iscsi_alias[0]) { 3561 rc = iscsi_switch_str_param(&sess->targetalias, 3562 (char *)fw_ddb_entry->iscsi_alias); 3563 if (rc) 3564 goto exit_copy; 3565 } 3566 3567 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3568 3569 exit_copy: 3570 return rc; 3571 } 3572 3573 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3574 struct iscsi_bus_flash_conn *conn, 3575 struct dev_db_entry *fw_ddb_entry) 3576 { 3577 uint16_t options; 3578 int rc = 0; 3579 3580 options = le16_to_cpu(fw_ddb_entry->options); 3581 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3582 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3583 options |= BIT_8; 3584 else 3585 options &= ~BIT_8; 3586 3587 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3588 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3589 SET_BITVAL(sess->entry_state, options, BIT_3); 3590 fw_ddb_entry->options = cpu_to_le16(options); 3591 3592 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3593 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3594 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3595 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3596 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3597 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3598 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3599 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3600 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3601 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3602 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3603 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3604 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3605 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3606 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3607 3608 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3609 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3610 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3611 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3612 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3613 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3614 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3615 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3616 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3617 3618 options = le16_to_cpu(fw_ddb_entry->ip_options); 3619 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3620 fw_ddb_entry->ip_options = cpu_to_le16(options); 3621 3622 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3623 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3624 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3625 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3626 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3627 fw_ddb_entry->iscsi_first_burst_len = 3628 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3629 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3630 BYTE_UNITS); 3631 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3632 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3633 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3634 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3635 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3636 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3637 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3638 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3639 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3640 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3641 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3642 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3643 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3644 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3645 fw_ddb_entry->port = cpu_to_le16(conn->port); 3646 fw_ddb_entry->def_timeout = 3647 cpu_to_le16(sess->default_taskmgmt_timeout); 3648 3649 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3650 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3651 else 3652 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3653 3654 if (conn->ipaddress) 3655 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3656 sizeof(fw_ddb_entry->ip_addr)); 3657 3658 if (conn->redirect_ipaddr) 3659 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3660 sizeof(fw_ddb_entry->tgt_addr)); 3661 3662 if (conn->link_local_ipv6_addr) 3663 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3664 conn->link_local_ipv6_addr, 3665 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3666 3667 if (sess->targetname) 3668 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3669 sizeof(fw_ddb_entry->iscsi_name)); 3670 3671 if (sess->targetalias) 3672 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3673 sizeof(fw_ddb_entry->iscsi_alias)); 3674 3675 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3676 3677 return rc; 3678 } 3679 3680 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3681 struct iscsi_session *sess, 3682 struct dev_db_entry *fw_ddb_entry) 3683 { 3684 unsigned long options = 0; 3685 uint16_t ddb_link; 3686 uint16_t disc_parent; 3687 char ip_addr[DDB_IPADDR_LEN]; 3688 3689 options = le16_to_cpu(fw_ddb_entry->options); 3690 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3691 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3692 &options); 3693 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3694 3695 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3696 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3697 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3698 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3699 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3700 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3701 &options); 3702 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3703 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3704 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3705 &options); 3706 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3707 sess->discovery_auth_optional = 3708 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3709 if (test_bit(ISCSIOPT_ERL1, &options)) 3710 sess->erl |= BIT_1; 3711 if (test_bit(ISCSIOPT_ERL0, &options)) 3712 sess->erl |= BIT_0; 3713 3714 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3715 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3716 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3717 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3718 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3719 conn->tcp_timer_scale |= BIT_3; 3720 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3721 conn->tcp_timer_scale |= BIT_2; 3722 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3723 conn->tcp_timer_scale |= BIT_1; 3724 3725 conn->tcp_timer_scale >>= 1; 3726 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3727 3728 options = le16_to_cpu(fw_ddb_entry->ip_options); 3729 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3730 3731 conn->max_recv_dlength = BYTE_UNITS * 3732 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3733 conn->max_xmit_dlength = BYTE_UNITS * 3734 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3735 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3736 sess->first_burst = BYTE_UNITS * 3737 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3738 sess->max_burst = BYTE_UNITS * 3739 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3740 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3741 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3742 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3743 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3744 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3745 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3746 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3747 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3748 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3749 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3750 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3751 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3752 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3753 3754 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3755 if (ddb_link == DDB_ISNS) 3756 disc_parent = ISCSI_DISC_PARENT_ISNS; 3757 else if (ddb_link == DDB_NO_LINK) 3758 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3759 else if (ddb_link < MAX_DDB_ENTRIES) 3760 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3761 else 3762 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3763 3764 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3765 iscsi_get_discovery_parent_name(disc_parent), 0); 3766 3767 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3768 (char *)fw_ddb_entry->iscsi_alias, 0); 3769 3770 options = le16_to_cpu(fw_ddb_entry->options); 3771 if (options & DDB_OPT_IPV6_DEVICE) { 3772 memset(ip_addr, 0, sizeof(ip_addr)); 3773 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3774 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3775 (char *)ip_addr, 0); 3776 } 3777 } 3778 3779 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3780 struct dev_db_entry *fw_ddb_entry, 3781 struct iscsi_cls_session *cls_sess, 3782 struct iscsi_cls_conn *cls_conn) 3783 { 3784 int buflen = 0; 3785 struct iscsi_session *sess; 3786 struct ddb_entry *ddb_entry; 3787 struct ql4_chap_table chap_tbl; 3788 struct iscsi_conn *conn; 3789 char ip_addr[DDB_IPADDR_LEN]; 3790 uint16_t options = 0; 3791 3792 sess = cls_sess->dd_data; 3793 ddb_entry = sess->dd_data; 3794 conn = cls_conn->dd_data; 3795 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3796 3797 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3798 3799 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3800 3801 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3802 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3803 3804 memset(ip_addr, 0, sizeof(ip_addr)); 3805 options = le16_to_cpu(fw_ddb_entry->options); 3806 if (options & DDB_OPT_IPV6_DEVICE) { 3807 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3808 3809 memset(ip_addr, 0, sizeof(ip_addr)); 3810 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3811 } else { 3812 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3813 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3814 } 3815 3816 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3817 (char *)ip_addr, buflen); 3818 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3819 (char *)fw_ddb_entry->iscsi_name, buflen); 3820 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3821 (char *)ha->name_string, buflen); 3822 3823 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3824 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3825 chap_tbl.secret, 3826 ddb_entry->chap_tbl_idx)) { 3827 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3828 (char *)chap_tbl.name, 3829 strlen((char *)chap_tbl.name)); 3830 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3831 (char *)chap_tbl.secret, 3832 chap_tbl.secret_len); 3833 } 3834 } 3835 } 3836 3837 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3838 struct ddb_entry *ddb_entry) 3839 { 3840 struct iscsi_cls_session *cls_sess; 3841 struct iscsi_cls_conn *cls_conn; 3842 uint32_t ddb_state; 3843 dma_addr_t fw_ddb_entry_dma; 3844 struct dev_db_entry *fw_ddb_entry; 3845 3846 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3847 &fw_ddb_entry_dma, GFP_KERNEL); 3848 if (!fw_ddb_entry) { 3849 ql4_printk(KERN_ERR, ha, 3850 "%s: Unable to allocate dma buffer\n", __func__); 3851 goto exit_session_conn_fwddb_param; 3852 } 3853 3854 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3855 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3856 NULL, NULL, NULL) == QLA_ERROR) { 3857 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3858 "get_ddb_entry for fw_ddb_index %d\n", 3859 ha->host_no, __func__, 3860 ddb_entry->fw_ddb_index)); 3861 goto exit_session_conn_fwddb_param; 3862 } 3863 3864 cls_sess = ddb_entry->sess; 3865 3866 cls_conn = ddb_entry->conn; 3867 3868 /* Update params */ 3869 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3870 3871 exit_session_conn_fwddb_param: 3872 if (fw_ddb_entry) 3873 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3874 fw_ddb_entry, fw_ddb_entry_dma); 3875 } 3876 3877 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3878 struct ddb_entry *ddb_entry) 3879 { 3880 struct iscsi_cls_session *cls_sess; 3881 struct iscsi_cls_conn *cls_conn; 3882 struct iscsi_session *sess; 3883 struct iscsi_conn *conn; 3884 uint32_t ddb_state; 3885 dma_addr_t fw_ddb_entry_dma; 3886 struct dev_db_entry *fw_ddb_entry; 3887 3888 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3889 &fw_ddb_entry_dma, GFP_KERNEL); 3890 if (!fw_ddb_entry) { 3891 ql4_printk(KERN_ERR, ha, 3892 "%s: Unable to allocate dma buffer\n", __func__); 3893 goto exit_session_conn_param; 3894 } 3895 3896 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3897 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3898 NULL, NULL, NULL) == QLA_ERROR) { 3899 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3900 "get_ddb_entry for fw_ddb_index %d\n", 3901 ha->host_no, __func__, 3902 ddb_entry->fw_ddb_index)); 3903 goto exit_session_conn_param; 3904 } 3905 3906 cls_sess = ddb_entry->sess; 3907 sess = cls_sess->dd_data; 3908 3909 cls_conn = ddb_entry->conn; 3910 conn = cls_conn->dd_data; 3911 3912 /* Update timers after login */ 3913 ddb_entry->default_relogin_timeout = 3914 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3915 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3916 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3917 ddb_entry->default_time2wait = 3918 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3919 3920 /* Update params */ 3921 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3922 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3923 3924 memcpy(sess->initiatorname, ha->name_string, 3925 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3926 3927 exit_session_conn_param: 3928 if (fw_ddb_entry) 3929 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3930 fw_ddb_entry, fw_ddb_entry_dma); 3931 } 3932 3933 /* 3934 * Timer routines 3935 */ 3936 3937 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, 3938 unsigned long interval) 3939 { 3940 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 3941 __func__, ha->host->host_no)); 3942 init_timer(&ha->timer); 3943 ha->timer.expires = jiffies + interval * HZ; 3944 ha->timer.data = (unsigned long)ha; 3945 ha->timer.function = (void (*)(unsigned long))func; 3946 add_timer(&ha->timer); 3947 ha->timer_active = 1; 3948 } 3949 3950 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 3951 { 3952 del_timer_sync(&ha->timer); 3953 ha->timer_active = 0; 3954 } 3955 3956 /*** 3957 * qla4xxx_mark_device_missing - blocks the session 3958 * @cls_session: Pointer to the session to be blocked 3959 * @ddb_entry: Pointer to device database entry 3960 * 3961 * This routine marks a device missing and close connection. 3962 **/ 3963 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 3964 { 3965 iscsi_block_session(cls_session); 3966 } 3967 3968 /** 3969 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 3970 * @ha: Pointer to host adapter structure. 3971 * 3972 * This routine marks a device missing and resets the relogin retry count. 3973 **/ 3974 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 3975 { 3976 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 3977 } 3978 3979 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 3980 struct ddb_entry *ddb_entry, 3981 struct scsi_cmnd *cmd) 3982 { 3983 struct srb *srb; 3984 3985 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 3986 if (!srb) 3987 return srb; 3988 3989 kref_init(&srb->srb_ref); 3990 srb->ha = ha; 3991 srb->ddb = ddb_entry; 3992 srb->cmd = cmd; 3993 srb->flags = 0; 3994 CMD_SP(cmd) = (void *)srb; 3995 3996 return srb; 3997 } 3998 3999 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4000 { 4001 struct scsi_cmnd *cmd = srb->cmd; 4002 4003 if (srb->flags & SRB_DMA_VALID) { 4004 scsi_dma_unmap(cmd); 4005 srb->flags &= ~SRB_DMA_VALID; 4006 } 4007 CMD_SP(cmd) = NULL; 4008 } 4009 4010 void qla4xxx_srb_compl(struct kref *ref) 4011 { 4012 struct srb *srb = container_of(ref, struct srb, srb_ref); 4013 struct scsi_cmnd *cmd = srb->cmd; 4014 struct scsi_qla_host *ha = srb->ha; 4015 4016 qla4xxx_srb_free_dma(ha, srb); 4017 4018 mempool_free(srb, ha->srb_mempool); 4019 4020 cmd->scsi_done(cmd); 4021 } 4022 4023 /** 4024 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4025 * @host: scsi host 4026 * @cmd: Pointer to Linux's SCSI command structure 4027 * 4028 * Remarks: 4029 * This routine is invoked by Linux to send a SCSI command to the driver. 4030 * The mid-level driver tries to ensure that queuecommand never gets 4031 * invoked concurrently with itself or the interrupt handler (although 4032 * the interrupt handler may call this routine as part of request- 4033 * completion handling). Unfortunely, it sometimes calls the scheduler 4034 * in interrupt context which is a big NO! NO!. 4035 **/ 4036 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4037 { 4038 struct scsi_qla_host *ha = to_qla_host(host); 4039 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4040 struct iscsi_cls_session *sess = ddb_entry->sess; 4041 struct srb *srb; 4042 int rval; 4043 4044 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4045 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4046 cmd->result = DID_NO_CONNECT << 16; 4047 else 4048 cmd->result = DID_REQUEUE << 16; 4049 goto qc_fail_command; 4050 } 4051 4052 if (!sess) { 4053 cmd->result = DID_IMM_RETRY << 16; 4054 goto qc_fail_command; 4055 } 4056 4057 rval = iscsi_session_chkready(sess); 4058 if (rval) { 4059 cmd->result = rval; 4060 goto qc_fail_command; 4061 } 4062 4063 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4064 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4065 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4066 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4067 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4068 !test_bit(AF_ONLINE, &ha->flags) || 4069 !test_bit(AF_LINK_UP, &ha->flags) || 4070 test_bit(AF_LOOPBACK, &ha->flags) || 4071 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4072 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4073 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4074 goto qc_host_busy; 4075 4076 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4077 if (!srb) 4078 goto qc_host_busy; 4079 4080 rval = qla4xxx_send_command_to_isp(ha, srb); 4081 if (rval != QLA_SUCCESS) 4082 goto qc_host_busy_free_sp; 4083 4084 return 0; 4085 4086 qc_host_busy_free_sp: 4087 qla4xxx_srb_free_dma(ha, srb); 4088 mempool_free(srb, ha->srb_mempool); 4089 4090 qc_host_busy: 4091 return SCSI_MLQUEUE_HOST_BUSY; 4092 4093 qc_fail_command: 4094 cmd->scsi_done(cmd); 4095 4096 return 0; 4097 } 4098 4099 /** 4100 * qla4xxx_mem_free - frees memory allocated to adapter 4101 * @ha: Pointer to host adapter structure. 4102 * 4103 * Frees memory previously allocated by qla4xxx_mem_alloc 4104 **/ 4105 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4106 { 4107 if (ha->queues) 4108 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4109 ha->queues_dma); 4110 4111 if (ha->fw_dump) 4112 vfree(ha->fw_dump); 4113 4114 ha->queues_len = 0; 4115 ha->queues = NULL; 4116 ha->queues_dma = 0; 4117 ha->request_ring = NULL; 4118 ha->request_dma = 0; 4119 ha->response_ring = NULL; 4120 ha->response_dma = 0; 4121 ha->shadow_regs = NULL; 4122 ha->shadow_regs_dma = 0; 4123 ha->fw_dump = NULL; 4124 ha->fw_dump_size = 0; 4125 4126 /* Free srb pool. */ 4127 if (ha->srb_mempool) 4128 mempool_destroy(ha->srb_mempool); 4129 4130 ha->srb_mempool = NULL; 4131 4132 if (ha->chap_dma_pool) 4133 dma_pool_destroy(ha->chap_dma_pool); 4134 4135 if (ha->chap_list) 4136 vfree(ha->chap_list); 4137 ha->chap_list = NULL; 4138 4139 if (ha->fw_ddb_dma_pool) 4140 dma_pool_destroy(ha->fw_ddb_dma_pool); 4141 4142 /* release io space registers */ 4143 if (is_qla8022(ha)) { 4144 if (ha->nx_pcibase) 4145 iounmap( 4146 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4147 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4148 if (ha->nx_pcibase) 4149 iounmap( 4150 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4151 } else if (ha->reg) { 4152 iounmap(ha->reg); 4153 } 4154 4155 if (ha->reset_tmplt.buff) 4156 vfree(ha->reset_tmplt.buff); 4157 4158 pci_release_regions(ha->pdev); 4159 } 4160 4161 /** 4162 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4163 * @ha: Pointer to host adapter structure 4164 * 4165 * Allocates DMA memory for request and response queues. Also allocates memory 4166 * for srbs. 4167 **/ 4168 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4169 { 4170 unsigned long align; 4171 4172 /* Allocate contiguous block of DMA memory for queues. */ 4173 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4174 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4175 sizeof(struct shadow_regs) + 4176 MEM_ALIGN_VALUE + 4177 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4178 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4179 &ha->queues_dma, GFP_KERNEL); 4180 if (ha->queues == NULL) { 4181 ql4_printk(KERN_WARNING, ha, 4182 "Memory Allocation failed - queues.\n"); 4183 4184 goto mem_alloc_error_exit; 4185 } 4186 memset(ha->queues, 0, ha->queues_len); 4187 4188 /* 4189 * As per RISC alignment requirements -- the bus-address must be a 4190 * multiple of the request-ring size (in bytes). 4191 */ 4192 align = 0; 4193 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4194 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4195 (MEM_ALIGN_VALUE - 1)); 4196 4197 /* Update request and response queue pointers. */ 4198 ha->request_dma = ha->queues_dma + align; 4199 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4200 ha->response_dma = ha->queues_dma + align + 4201 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4202 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4203 (REQUEST_QUEUE_DEPTH * 4204 QUEUE_SIZE)); 4205 ha->shadow_regs_dma = ha->queues_dma + align + 4206 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4207 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4208 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4209 (REQUEST_QUEUE_DEPTH * 4210 QUEUE_SIZE) + 4211 (RESPONSE_QUEUE_DEPTH * 4212 QUEUE_SIZE)); 4213 4214 /* Allocate memory for srb pool. */ 4215 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4216 mempool_free_slab, srb_cachep); 4217 if (ha->srb_mempool == NULL) { 4218 ql4_printk(KERN_WARNING, ha, 4219 "Memory Allocation failed - SRB Pool.\n"); 4220 4221 goto mem_alloc_error_exit; 4222 } 4223 4224 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4225 CHAP_DMA_BLOCK_SIZE, 8, 0); 4226 4227 if (ha->chap_dma_pool == NULL) { 4228 ql4_printk(KERN_WARNING, ha, 4229 "%s: chap_dma_pool allocation failed..\n", __func__); 4230 goto mem_alloc_error_exit; 4231 } 4232 4233 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4234 DDB_DMA_BLOCK_SIZE, 8, 0); 4235 4236 if (ha->fw_ddb_dma_pool == NULL) { 4237 ql4_printk(KERN_WARNING, ha, 4238 "%s: fw_ddb_dma_pool allocation failed..\n", 4239 __func__); 4240 goto mem_alloc_error_exit; 4241 } 4242 4243 return QLA_SUCCESS; 4244 4245 mem_alloc_error_exit: 4246 qla4xxx_mem_free(ha); 4247 return QLA_ERROR; 4248 } 4249 4250 /** 4251 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4252 * @ha: adapter block pointer. 4253 * 4254 * Note: The caller should not hold the idc lock. 4255 **/ 4256 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4257 { 4258 uint32_t temp, temp_state, temp_val; 4259 int status = QLA_SUCCESS; 4260 4261 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4262 4263 temp_state = qla82xx_get_temp_state(temp); 4264 temp_val = qla82xx_get_temp_val(temp); 4265 4266 if (temp_state == QLA82XX_TEMP_PANIC) { 4267 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4268 " exceeds maximum allowed. Hardware has been shut" 4269 " down.\n", temp_val); 4270 status = QLA_ERROR; 4271 } else if (temp_state == QLA82XX_TEMP_WARN) { 4272 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4273 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4274 " degrees C exceeds operating range." 4275 " Immediate action needed.\n", temp_val); 4276 } else { 4277 if (ha->temperature == QLA82XX_TEMP_WARN) 4278 ql4_printk(KERN_INFO, ha, "Device temperature is" 4279 " now %d degrees C in normal range.\n", 4280 temp_val); 4281 } 4282 ha->temperature = temp_state; 4283 return status; 4284 } 4285 4286 /** 4287 * qla4_8xxx_check_fw_alive - Check firmware health 4288 * @ha: Pointer to host adapter structure. 4289 * 4290 * Context: Interrupt 4291 **/ 4292 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4293 { 4294 uint32_t fw_heartbeat_counter; 4295 int status = QLA_SUCCESS; 4296 4297 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4298 QLA8XXX_PEG_ALIVE_COUNTER); 4299 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4300 if (fw_heartbeat_counter == 0xffffffff) { 4301 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4302 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4303 ha->host_no, __func__)); 4304 return status; 4305 } 4306 4307 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4308 ha->seconds_since_last_heartbeat++; 4309 /* FW not alive after 2 seconds */ 4310 if (ha->seconds_since_last_heartbeat == 2) { 4311 ha->seconds_since_last_heartbeat = 0; 4312 qla4_8xxx_dump_peg_reg(ha); 4313 status = QLA_ERROR; 4314 } 4315 } else 4316 ha->seconds_since_last_heartbeat = 0; 4317 4318 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4319 return status; 4320 } 4321 4322 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4323 { 4324 uint32_t halt_status; 4325 int halt_status_unrecoverable = 0; 4326 4327 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4328 4329 if (is_qla8022(ha)) { 4330 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4331 __func__); 4332 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4333 CRB_NIU_XG_PAUSE_CTL_P0 | 4334 CRB_NIU_XG_PAUSE_CTL_P1); 4335 4336 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4337 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4338 __func__); 4339 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4340 halt_status_unrecoverable = 1; 4341 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4342 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4343 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4344 __func__); 4345 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4346 halt_status_unrecoverable = 1; 4347 } 4348 4349 /* 4350 * Since we cannot change dev_state in interrupt context, 4351 * set appropriate DPC flag then wakeup DPC 4352 */ 4353 if (halt_status_unrecoverable) { 4354 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4355 } else { 4356 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4357 __func__); 4358 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4359 } 4360 qla4xxx_mailbox_premature_completion(ha); 4361 qla4xxx_wake_dpc(ha); 4362 } 4363 4364 /** 4365 * qla4_8xxx_watchdog - Poll dev state 4366 * @ha: Pointer to host adapter structure. 4367 * 4368 * Context: Interrupt 4369 **/ 4370 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4371 { 4372 uint32_t dev_state; 4373 uint32_t idc_ctrl; 4374 4375 /* don't poll if reset is going on */ 4376 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4377 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4378 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4379 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4380 4381 if (qla4_8xxx_check_temp(ha)) { 4382 if (is_qla8022(ha)) { 4383 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4384 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4385 CRB_NIU_XG_PAUSE_CTL_P0 | 4386 CRB_NIU_XG_PAUSE_CTL_P1); 4387 } 4388 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4389 qla4xxx_wake_dpc(ha); 4390 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4391 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4392 4393 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4394 __func__); 4395 4396 if (is_qla8032(ha) || is_qla8042(ha)) { 4397 idc_ctrl = qla4_83xx_rd_reg(ha, 4398 QLA83XX_IDC_DRV_CTRL); 4399 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4400 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4401 __func__); 4402 qla4xxx_mailbox_premature_completion( 4403 ha); 4404 } 4405 } 4406 4407 if ((is_qla8032(ha) || is_qla8042(ha)) || 4408 (is_qla8022(ha) && !ql4xdontresethba)) { 4409 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4410 qla4xxx_wake_dpc(ha); 4411 } 4412 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4413 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4414 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4415 __func__); 4416 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4417 qla4xxx_wake_dpc(ha); 4418 } else { 4419 /* Check firmware health */ 4420 if (qla4_8xxx_check_fw_alive(ha)) 4421 qla4_8xxx_process_fw_error(ha); 4422 } 4423 } 4424 } 4425 4426 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4427 { 4428 struct iscsi_session *sess; 4429 struct ddb_entry *ddb_entry; 4430 struct scsi_qla_host *ha; 4431 4432 sess = cls_sess->dd_data; 4433 ddb_entry = sess->dd_data; 4434 ha = ddb_entry->ha; 4435 4436 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4437 return; 4438 4439 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4440 !iscsi_is_session_online(cls_sess)) { 4441 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4442 INVALID_ENTRY) { 4443 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4444 0) { 4445 atomic_set(&ddb_entry->retry_relogin_timer, 4446 INVALID_ENTRY); 4447 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4448 set_bit(DF_RELOGIN, &ddb_entry->flags); 4449 DEBUG2(ql4_printk(KERN_INFO, ha, 4450 "%s: index [%d] login device\n", 4451 __func__, ddb_entry->fw_ddb_index)); 4452 } else 4453 atomic_dec(&ddb_entry->retry_relogin_timer); 4454 } 4455 } 4456 4457 /* Wait for relogin to timeout */ 4458 if (atomic_read(&ddb_entry->relogin_timer) && 4459 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4460 /* 4461 * If the relogin times out and the device is 4462 * still NOT ONLINE then try and relogin again. 4463 */ 4464 if (!iscsi_is_session_online(cls_sess)) { 4465 /* Reset retry relogin timer */ 4466 atomic_inc(&ddb_entry->relogin_retry_count); 4467 DEBUG2(ql4_printk(KERN_INFO, ha, 4468 "%s: index[%d] relogin timed out-retrying" 4469 " relogin (%d), retry (%d)\n", __func__, 4470 ddb_entry->fw_ddb_index, 4471 atomic_read(&ddb_entry->relogin_retry_count), 4472 ddb_entry->default_time2wait + 4)); 4473 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4474 atomic_set(&ddb_entry->retry_relogin_timer, 4475 ddb_entry->default_time2wait + 4); 4476 } 4477 } 4478 } 4479 4480 /** 4481 * qla4xxx_timer - checks every second for work to do. 4482 * @ha: Pointer to host adapter structure. 4483 **/ 4484 static void qla4xxx_timer(struct scsi_qla_host *ha) 4485 { 4486 int start_dpc = 0; 4487 uint16_t w; 4488 4489 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4490 4491 /* If we are in the middle of AER/EEH processing 4492 * skip any processing and reschedule the timer 4493 */ 4494 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4495 mod_timer(&ha->timer, jiffies + HZ); 4496 return; 4497 } 4498 4499 /* Hardware read to trigger an EEH error during mailbox waits. */ 4500 if (!pci_channel_offline(ha->pdev)) 4501 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4502 4503 if (is_qla80XX(ha)) 4504 qla4_8xxx_watchdog(ha); 4505 4506 if (is_qla40XX(ha)) { 4507 /* Check for heartbeat interval. */ 4508 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4509 ha->heartbeat_interval != 0) { 4510 ha->seconds_since_last_heartbeat++; 4511 if (ha->seconds_since_last_heartbeat > 4512 ha->heartbeat_interval + 2) 4513 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4514 } 4515 } 4516 4517 /* Process any deferred work. */ 4518 if (!list_empty(&ha->work_list)) 4519 start_dpc++; 4520 4521 /* Wakeup the dpc routine for this adapter, if needed. */ 4522 if (start_dpc || 4523 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4524 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4525 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4526 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4527 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4528 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4529 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4530 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4531 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4532 test_bit(DPC_AEN, &ha->dpc_flags)) { 4533 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4534 " - dpc flags = 0x%lx\n", 4535 ha->host_no, __func__, ha->dpc_flags)); 4536 qla4xxx_wake_dpc(ha); 4537 } 4538 4539 /* Reschedule timer thread to call us back in one second */ 4540 mod_timer(&ha->timer, jiffies + HZ); 4541 4542 DEBUG2(ha->seconds_since_last_intr++); 4543 } 4544 4545 /** 4546 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4547 * @ha: Pointer to host adapter structure. 4548 * 4549 * This routine stalls the driver until all outstanding commands are returned. 4550 * Caller must release the Hardware Lock prior to calling this routine. 4551 **/ 4552 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4553 { 4554 uint32_t index = 0; 4555 unsigned long flags; 4556 struct scsi_cmnd *cmd; 4557 4558 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ); 4559 4560 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to " 4561 "complete\n", WAIT_CMD_TOV)); 4562 4563 while (!time_after_eq(jiffies, wtime)) { 4564 spin_lock_irqsave(&ha->hardware_lock, flags); 4565 /* Find a command that hasn't completed. */ 4566 for (index = 0; index < ha->host->can_queue; index++) { 4567 cmd = scsi_host_find_tag(ha->host, index); 4568 /* 4569 * We cannot just check if the index is valid, 4570 * becase if we are run from the scsi eh, then 4571 * the scsi/block layer is going to prevent 4572 * the tag from being released. 4573 */ 4574 if (cmd != NULL && CMD_SP(cmd)) 4575 break; 4576 } 4577 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4578 4579 /* If No Commands are pending, wait is complete */ 4580 if (index == ha->host->can_queue) 4581 return QLA_SUCCESS; 4582 4583 msleep(1000); 4584 } 4585 /* If we timed out on waiting for commands to come back 4586 * return ERROR. */ 4587 return QLA_ERROR; 4588 } 4589 4590 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4591 { 4592 uint32_t ctrl_status; 4593 unsigned long flags = 0; 4594 4595 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4596 4597 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4598 return QLA_ERROR; 4599 4600 spin_lock_irqsave(&ha->hardware_lock, flags); 4601 4602 /* 4603 * If the SCSI Reset Interrupt bit is set, clear it. 4604 * Otherwise, the Soft Reset won't work. 4605 */ 4606 ctrl_status = readw(&ha->reg->ctrl_status); 4607 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4608 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4609 4610 /* Issue Soft Reset */ 4611 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4612 readl(&ha->reg->ctrl_status); 4613 4614 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4615 return QLA_SUCCESS; 4616 } 4617 4618 /** 4619 * qla4xxx_soft_reset - performs soft reset. 4620 * @ha: Pointer to host adapter structure. 4621 **/ 4622 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4623 { 4624 uint32_t max_wait_time; 4625 unsigned long flags = 0; 4626 int status; 4627 uint32_t ctrl_status; 4628 4629 status = qla4xxx_hw_reset(ha); 4630 if (status != QLA_SUCCESS) 4631 return status; 4632 4633 status = QLA_ERROR; 4634 /* Wait until the Network Reset Intr bit is cleared */ 4635 max_wait_time = RESET_INTR_TOV; 4636 do { 4637 spin_lock_irqsave(&ha->hardware_lock, flags); 4638 ctrl_status = readw(&ha->reg->ctrl_status); 4639 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4640 4641 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4642 break; 4643 4644 msleep(1000); 4645 } while ((--max_wait_time)); 4646 4647 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4648 DEBUG2(printk(KERN_WARNING 4649 "scsi%ld: Network Reset Intr not cleared by " 4650 "Network function, clearing it now!\n", 4651 ha->host_no)); 4652 spin_lock_irqsave(&ha->hardware_lock, flags); 4653 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4654 readl(&ha->reg->ctrl_status); 4655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4656 } 4657 4658 /* Wait until the firmware tells us the Soft Reset is done */ 4659 max_wait_time = SOFT_RESET_TOV; 4660 do { 4661 spin_lock_irqsave(&ha->hardware_lock, flags); 4662 ctrl_status = readw(&ha->reg->ctrl_status); 4663 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4664 4665 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4666 status = QLA_SUCCESS; 4667 break; 4668 } 4669 4670 msleep(1000); 4671 } while ((--max_wait_time)); 4672 4673 /* 4674 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4675 * after the soft reset has taken place. 4676 */ 4677 spin_lock_irqsave(&ha->hardware_lock, flags); 4678 ctrl_status = readw(&ha->reg->ctrl_status); 4679 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4680 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4681 readl(&ha->reg->ctrl_status); 4682 } 4683 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4684 4685 /* If soft reset fails then most probably the bios on other 4686 * function is also enabled. 4687 * Since the initialization is sequential the other fn 4688 * wont be able to acknowledge the soft reset. 4689 * Issue a force soft reset to workaround this scenario. 4690 */ 4691 if (max_wait_time == 0) { 4692 /* Issue Force Soft Reset */ 4693 spin_lock_irqsave(&ha->hardware_lock, flags); 4694 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4695 readl(&ha->reg->ctrl_status); 4696 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4697 /* Wait until the firmware tells us the Soft Reset is done */ 4698 max_wait_time = SOFT_RESET_TOV; 4699 do { 4700 spin_lock_irqsave(&ha->hardware_lock, flags); 4701 ctrl_status = readw(&ha->reg->ctrl_status); 4702 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4703 4704 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4705 status = QLA_SUCCESS; 4706 break; 4707 } 4708 4709 msleep(1000); 4710 } while ((--max_wait_time)); 4711 } 4712 4713 return status; 4714 } 4715 4716 /** 4717 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4718 * @ha: Pointer to host adapter structure. 4719 * @res: returned scsi status 4720 * 4721 * This routine is called just prior to a HARD RESET to return all 4722 * outstanding commands back to the Operating System. 4723 * Caller should make sure that the following locks are released 4724 * before this calling routine: Hardware lock, and io_request_lock. 4725 **/ 4726 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4727 { 4728 struct srb *srb; 4729 int i; 4730 unsigned long flags; 4731 4732 spin_lock_irqsave(&ha->hardware_lock, flags); 4733 for (i = 0; i < ha->host->can_queue; i++) { 4734 srb = qla4xxx_del_from_active_array(ha, i); 4735 if (srb != NULL) { 4736 srb->cmd->result = res; 4737 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4738 } 4739 } 4740 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4741 } 4742 4743 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4744 { 4745 clear_bit(AF_ONLINE, &ha->flags); 4746 4747 /* Disable the board */ 4748 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4749 4750 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4751 qla4xxx_mark_all_devices_missing(ha); 4752 clear_bit(AF_INIT_DONE, &ha->flags); 4753 } 4754 4755 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4756 { 4757 struct iscsi_session *sess; 4758 struct ddb_entry *ddb_entry; 4759 4760 sess = cls_session->dd_data; 4761 ddb_entry = sess->dd_data; 4762 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4763 4764 if (ddb_entry->ddb_type == FLASH_DDB) 4765 iscsi_block_session(ddb_entry->sess); 4766 else 4767 iscsi_session_failure(cls_session->dd_data, 4768 ISCSI_ERR_CONN_FAILED); 4769 } 4770 4771 /** 4772 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4773 * @ha: Pointer to host adapter structure. 4774 **/ 4775 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4776 { 4777 int status = QLA_ERROR; 4778 uint8_t reset_chip = 0; 4779 uint32_t dev_state; 4780 unsigned long wait; 4781 4782 /* Stall incoming I/O until we are done */ 4783 scsi_block_requests(ha->host); 4784 clear_bit(AF_ONLINE, &ha->flags); 4785 clear_bit(AF_LINK_UP, &ha->flags); 4786 4787 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4788 4789 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4790 4791 if ((is_qla8032(ha) || is_qla8042(ha)) && 4792 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4793 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4794 __func__); 4795 /* disable pause frame for ISP83xx */ 4796 qla4_83xx_disable_pause(ha); 4797 } 4798 4799 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4800 4801 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4802 reset_chip = 1; 4803 4804 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4805 * do not reset adapter, jump to initialize_adapter */ 4806 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4807 status = QLA_SUCCESS; 4808 goto recover_ha_init_adapter; 4809 } 4810 4811 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4812 * from eh_host_reset or ioctl module */ 4813 if (is_qla80XX(ha) && !reset_chip && 4814 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4815 4816 DEBUG2(ql4_printk(KERN_INFO, ha, 4817 "scsi%ld: %s - Performing stop_firmware...\n", 4818 ha->host_no, __func__)); 4819 status = ha->isp_ops->reset_firmware(ha); 4820 if (status == QLA_SUCCESS) { 4821 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4822 qla4xxx_cmd_wait(ha); 4823 4824 ha->isp_ops->disable_intrs(ha); 4825 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4826 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4827 } else { 4828 /* If the stop_firmware fails then 4829 * reset the entire chip */ 4830 reset_chip = 1; 4831 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4832 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4833 } 4834 } 4835 4836 /* Issue full chip reset if recovering from a catastrophic error, 4837 * or if stop_firmware fails for ISP-8xxx. 4838 * This is the default case for ISP-4xxx */ 4839 if (is_qla40XX(ha) || reset_chip) { 4840 if (is_qla40XX(ha)) 4841 goto chip_reset; 4842 4843 /* Check if 8XXX firmware is alive or not 4844 * We may have arrived here from NEED_RESET 4845 * detection only */ 4846 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4847 goto chip_reset; 4848 4849 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4850 while (time_before(jiffies, wait)) { 4851 if (qla4_8xxx_check_fw_alive(ha)) { 4852 qla4xxx_mailbox_premature_completion(ha); 4853 break; 4854 } 4855 4856 set_current_state(TASK_UNINTERRUPTIBLE); 4857 schedule_timeout(HZ); 4858 } 4859 chip_reset: 4860 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4861 qla4xxx_cmd_wait(ha); 4862 4863 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4864 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4865 DEBUG2(ql4_printk(KERN_INFO, ha, 4866 "scsi%ld: %s - Performing chip reset..\n", 4867 ha->host_no, __func__)); 4868 status = ha->isp_ops->reset_chip(ha); 4869 } 4870 4871 /* Flush any pending ddb changed AENs */ 4872 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4873 4874 recover_ha_init_adapter: 4875 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4876 if (status == QLA_SUCCESS) { 4877 /* For ISP-4xxx, force function 1 to always initialize 4878 * before function 3 to prevent both funcions from 4879 * stepping on top of the other */ 4880 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4881 ssleep(6); 4882 4883 /* NOTE: AF_ONLINE flag set upon successful completion of 4884 * qla4xxx_initialize_adapter */ 4885 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4886 } 4887 4888 /* Retry failed adapter initialization, if necessary 4889 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4890 * case to prevent ping-pong resets between functions */ 4891 if (!test_bit(AF_ONLINE, &ha->flags) && 4892 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4893 /* Adapter initialization failed, see if we can retry 4894 * resetting the ha. 4895 * Since we don't want to block the DPC for too long 4896 * with multiple resets in the same thread, 4897 * utilize DPC to retry */ 4898 if (is_qla80XX(ha)) { 4899 ha->isp_ops->idc_lock(ha); 4900 dev_state = qla4_8xxx_rd_direct(ha, 4901 QLA8XXX_CRB_DEV_STATE); 4902 ha->isp_ops->idc_unlock(ha); 4903 if (dev_state == QLA8XXX_DEV_FAILED) { 4904 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4905 "recover adapter. H/W is in Failed " 4906 "state\n", __func__); 4907 qla4xxx_dead_adapter_cleanup(ha); 4908 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4909 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4910 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4911 &ha->dpc_flags); 4912 status = QLA_ERROR; 4913 4914 goto exit_recover; 4915 } 4916 } 4917 4918 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 4919 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 4920 DEBUG2(printk("scsi%ld: recover adapter - retrying " 4921 "(%d) more times\n", ha->host_no, 4922 ha->retry_reset_ha_cnt)); 4923 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4924 status = QLA_ERROR; 4925 } else { 4926 if (ha->retry_reset_ha_cnt > 0) { 4927 /* Schedule another Reset HA--DPC will retry */ 4928 ha->retry_reset_ha_cnt--; 4929 DEBUG2(printk("scsi%ld: recover adapter - " 4930 "retry remaining %d\n", 4931 ha->host_no, 4932 ha->retry_reset_ha_cnt)); 4933 status = QLA_ERROR; 4934 } 4935 4936 if (ha->retry_reset_ha_cnt == 0) { 4937 /* Recover adapter retries have been exhausted. 4938 * Adapter DEAD */ 4939 DEBUG2(printk("scsi%ld: recover adapter " 4940 "failed - board disabled\n", 4941 ha->host_no)); 4942 qla4xxx_dead_adapter_cleanup(ha); 4943 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4944 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4945 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4946 &ha->dpc_flags); 4947 status = QLA_ERROR; 4948 } 4949 } 4950 } else { 4951 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4952 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4953 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4954 } 4955 4956 exit_recover: 4957 ha->adapter_error_count++; 4958 4959 if (test_bit(AF_ONLINE, &ha->flags)) 4960 ha->isp_ops->enable_intrs(ha); 4961 4962 scsi_unblock_requests(ha->host); 4963 4964 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4965 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 4966 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 4967 4968 return status; 4969 } 4970 4971 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 4972 { 4973 struct iscsi_session *sess; 4974 struct ddb_entry *ddb_entry; 4975 struct scsi_qla_host *ha; 4976 4977 sess = cls_session->dd_data; 4978 ddb_entry = sess->dd_data; 4979 ha = ddb_entry->ha; 4980 if (!iscsi_is_session_online(cls_session)) { 4981 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 4982 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 4983 " unblock session\n", ha->host_no, __func__, 4984 ddb_entry->fw_ddb_index); 4985 iscsi_unblock_session(ddb_entry->sess); 4986 } else { 4987 /* Trigger relogin */ 4988 if (ddb_entry->ddb_type == FLASH_DDB) { 4989 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 4990 test_bit(DF_DISABLE_RELOGIN, 4991 &ddb_entry->flags))) 4992 qla4xxx_arm_relogin_timer(ddb_entry); 4993 } else 4994 iscsi_session_failure(cls_session->dd_data, 4995 ISCSI_ERR_CONN_FAILED); 4996 } 4997 } 4998 } 4999 5000 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5001 { 5002 struct iscsi_session *sess; 5003 struct ddb_entry *ddb_entry; 5004 struct scsi_qla_host *ha; 5005 5006 sess = cls_session->dd_data; 5007 ddb_entry = sess->dd_data; 5008 ha = ddb_entry->ha; 5009 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5010 " unblock session\n", ha->host_no, __func__, 5011 ddb_entry->fw_ddb_index); 5012 5013 iscsi_unblock_session(ddb_entry->sess); 5014 5015 /* Start scan target */ 5016 if (test_bit(AF_ONLINE, &ha->flags)) { 5017 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5018 " start scan\n", ha->host_no, __func__, 5019 ddb_entry->fw_ddb_index); 5020 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5021 } 5022 return QLA_SUCCESS; 5023 } 5024 5025 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5026 { 5027 struct iscsi_session *sess; 5028 struct ddb_entry *ddb_entry; 5029 struct scsi_qla_host *ha; 5030 int status = QLA_SUCCESS; 5031 5032 sess = cls_session->dd_data; 5033 ddb_entry = sess->dd_data; 5034 ha = ddb_entry->ha; 5035 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5036 " unblock user space session\n", ha->host_no, __func__, 5037 ddb_entry->fw_ddb_index); 5038 5039 if (!iscsi_is_session_online(cls_session)) { 5040 iscsi_conn_start(ddb_entry->conn); 5041 iscsi_conn_login_event(ddb_entry->conn, 5042 ISCSI_CONN_STATE_LOGGED_IN); 5043 } else { 5044 ql4_printk(KERN_INFO, ha, 5045 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5046 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5047 cls_session->sid); 5048 status = QLA_ERROR; 5049 } 5050 5051 return status; 5052 } 5053 5054 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5055 { 5056 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5057 } 5058 5059 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5060 { 5061 uint16_t relogin_timer; 5062 struct iscsi_session *sess; 5063 struct ddb_entry *ddb_entry; 5064 struct scsi_qla_host *ha; 5065 5066 sess = cls_sess->dd_data; 5067 ddb_entry = sess->dd_data; 5068 ha = ddb_entry->ha; 5069 5070 relogin_timer = max(ddb_entry->default_relogin_timeout, 5071 (uint16_t)RELOGIN_TOV); 5072 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5073 5074 DEBUG2(ql4_printk(KERN_INFO, ha, 5075 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5076 ddb_entry->fw_ddb_index, relogin_timer)); 5077 5078 qla4xxx_login_flash_ddb(cls_sess); 5079 } 5080 5081 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5082 { 5083 struct iscsi_session *sess; 5084 struct ddb_entry *ddb_entry; 5085 struct scsi_qla_host *ha; 5086 5087 sess = cls_sess->dd_data; 5088 ddb_entry = sess->dd_data; 5089 ha = ddb_entry->ha; 5090 5091 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5092 return; 5093 5094 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5095 return; 5096 5097 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5098 !iscsi_is_session_online(cls_sess)) { 5099 DEBUG2(ql4_printk(KERN_INFO, ha, 5100 "relogin issued\n")); 5101 qla4xxx_relogin_flash_ddb(cls_sess); 5102 } 5103 } 5104 5105 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5106 { 5107 if (ha->dpc_thread) 5108 queue_work(ha->dpc_thread, &ha->dpc_work); 5109 } 5110 5111 static struct qla4_work_evt * 5112 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5113 enum qla4_work_type type) 5114 { 5115 struct qla4_work_evt *e; 5116 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5117 5118 e = kzalloc(size, GFP_ATOMIC); 5119 if (!e) 5120 return NULL; 5121 5122 INIT_LIST_HEAD(&e->list); 5123 e->type = type; 5124 return e; 5125 } 5126 5127 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5128 struct qla4_work_evt *e) 5129 { 5130 unsigned long flags; 5131 5132 spin_lock_irqsave(&ha->work_lock, flags); 5133 list_add_tail(&e->list, &ha->work_list); 5134 spin_unlock_irqrestore(&ha->work_lock, flags); 5135 qla4xxx_wake_dpc(ha); 5136 } 5137 5138 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5139 enum iscsi_host_event_code aen_code, 5140 uint32_t data_size, uint8_t *data) 5141 { 5142 struct qla4_work_evt *e; 5143 5144 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5145 if (!e) 5146 return QLA_ERROR; 5147 5148 e->u.aen.code = aen_code; 5149 e->u.aen.data_size = data_size; 5150 memcpy(e->u.aen.data, data, data_size); 5151 5152 qla4xxx_post_work(ha, e); 5153 5154 return QLA_SUCCESS; 5155 } 5156 5157 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5158 uint32_t status, uint32_t pid, 5159 uint32_t data_size, uint8_t *data) 5160 { 5161 struct qla4_work_evt *e; 5162 5163 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5164 if (!e) 5165 return QLA_ERROR; 5166 5167 e->u.ping.status = status; 5168 e->u.ping.pid = pid; 5169 e->u.ping.data_size = data_size; 5170 memcpy(e->u.ping.data, data, data_size); 5171 5172 qla4xxx_post_work(ha, e); 5173 5174 return QLA_SUCCESS; 5175 } 5176 5177 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5178 { 5179 struct qla4_work_evt *e, *tmp; 5180 unsigned long flags; 5181 LIST_HEAD(work); 5182 5183 spin_lock_irqsave(&ha->work_lock, flags); 5184 list_splice_init(&ha->work_list, &work); 5185 spin_unlock_irqrestore(&ha->work_lock, flags); 5186 5187 list_for_each_entry_safe(e, tmp, &work, list) { 5188 list_del_init(&e->list); 5189 5190 switch (e->type) { 5191 case QLA4_EVENT_AEN: 5192 iscsi_post_host_event(ha->host_no, 5193 &qla4xxx_iscsi_transport, 5194 e->u.aen.code, 5195 e->u.aen.data_size, 5196 e->u.aen.data); 5197 break; 5198 case QLA4_EVENT_PING_STATUS: 5199 iscsi_ping_comp_event(ha->host_no, 5200 &qla4xxx_iscsi_transport, 5201 e->u.ping.status, 5202 e->u.ping.pid, 5203 e->u.ping.data_size, 5204 e->u.ping.data); 5205 break; 5206 default: 5207 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5208 "supported", e->type); 5209 } 5210 kfree(e); 5211 } 5212 } 5213 5214 /** 5215 * qla4xxx_do_dpc - dpc routine 5216 * @data: in our case pointer to adapter structure 5217 * 5218 * This routine is a task that is schedule by the interrupt handler 5219 * to perform the background processing for interrupts. We put it 5220 * on a task queue that is consumed whenever the scheduler runs; that's 5221 * so you can do anything (i.e. put the process to sleep etc). In fact, 5222 * the mid-level tries to sleep when it reaches the driver threshold 5223 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5224 **/ 5225 static void qla4xxx_do_dpc(struct work_struct *work) 5226 { 5227 struct scsi_qla_host *ha = 5228 container_of(work, struct scsi_qla_host, dpc_work); 5229 int status = QLA_ERROR; 5230 5231 DEBUG2(printk("scsi%ld: %s: DPC handler waking up." 5232 "flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5233 ha->host_no, __func__, ha->flags, ha->dpc_flags)) 5234 5235 /* Initialization not yet finished. Don't do anything yet. */ 5236 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5237 return; 5238 5239 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5240 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5241 ha->host_no, __func__, ha->flags)); 5242 return; 5243 } 5244 5245 /* post events to application */ 5246 qla4xxx_do_work(ha); 5247 5248 if (is_qla80XX(ha)) { 5249 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5250 if (is_qla8032(ha) || is_qla8042(ha)) { 5251 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5252 __func__); 5253 /* disable pause frame for ISP83xx */ 5254 qla4_83xx_disable_pause(ha); 5255 } 5256 5257 ha->isp_ops->idc_lock(ha); 5258 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5259 QLA8XXX_DEV_FAILED); 5260 ha->isp_ops->idc_unlock(ha); 5261 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5262 qla4_8xxx_device_state_handler(ha); 5263 } 5264 5265 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5266 if (is_qla8042(ha)) { 5267 if (ha->idc_info.info2 & 5268 ENABLE_INTERNAL_LOOPBACK) { 5269 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5270 __func__); 5271 status = qla4_84xx_config_acb(ha, 5272 ACB_CONFIG_DISABLE); 5273 if (status != QLA_SUCCESS) { 5274 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5275 __func__); 5276 } 5277 } 5278 } 5279 qla4_83xx_post_idc_ack(ha); 5280 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5281 } 5282 5283 if (is_qla8042(ha) && 5284 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5285 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5286 __func__); 5287 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5288 QLA_SUCCESS) { 5289 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5290 __func__); 5291 } 5292 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5293 } 5294 5295 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5296 qla4_8xxx_need_qsnt_handler(ha); 5297 } 5298 } 5299 5300 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5301 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5302 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5303 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5304 if ((is_qla8022(ha) && ql4xdontresethba) || 5305 ((is_qla8032(ha) || is_qla8042(ha)) && 5306 qla4_83xx_idc_dontreset(ha))) { 5307 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5308 ha->host_no, __func__)); 5309 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5310 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5311 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5312 goto dpc_post_reset_ha; 5313 } 5314 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5315 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5316 qla4xxx_recover_adapter(ha); 5317 5318 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5319 uint8_t wait_time = RESET_INTR_TOV; 5320 5321 while ((readw(&ha->reg->ctrl_status) & 5322 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5323 if (--wait_time == 0) 5324 break; 5325 msleep(1000); 5326 } 5327 if (wait_time == 0) 5328 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5329 "bit not cleared-- resetting\n", 5330 ha->host_no, __func__)); 5331 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5332 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5333 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5334 status = qla4xxx_recover_adapter(ha); 5335 } 5336 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5337 if (status == QLA_SUCCESS) 5338 ha->isp_ops->enable_intrs(ha); 5339 } 5340 } 5341 5342 dpc_post_reset_ha: 5343 /* ---- process AEN? --- */ 5344 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5345 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5346 5347 /* ---- Get DHCP IP Address? --- */ 5348 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5349 qla4xxx_get_dhcp_ip_address(ha); 5350 5351 /* ---- relogin device? --- */ 5352 if (adapter_up(ha) && 5353 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5354 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5355 } 5356 5357 /* ---- link change? --- */ 5358 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5359 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5360 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5361 /* ---- link down? --- */ 5362 qla4xxx_mark_all_devices_missing(ha); 5363 } else { 5364 /* ---- link up? --- * 5365 * F/W will auto login to all devices ONLY ONCE after 5366 * link up during driver initialization and runtime 5367 * fatal error recovery. Therefore, the driver must 5368 * manually relogin to devices when recovering from 5369 * connection failures, logouts, expired KATO, etc. */ 5370 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5371 qla4xxx_build_ddb_list(ha, ha->is_reset); 5372 iscsi_host_for_each_session(ha->host, 5373 qla4xxx_login_flash_ddb); 5374 } else 5375 qla4xxx_relogin_all_devices(ha); 5376 } 5377 } 5378 } 5379 5380 /** 5381 * qla4xxx_free_adapter - release the adapter 5382 * @ha: pointer to adapter structure 5383 **/ 5384 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5385 { 5386 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5387 5388 /* Turn-off interrupts on the card. */ 5389 ha->isp_ops->disable_intrs(ha); 5390 5391 if (is_qla40XX(ha)) { 5392 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5393 &ha->reg->ctrl_status); 5394 readl(&ha->reg->ctrl_status); 5395 } else if (is_qla8022(ha)) { 5396 writel(0, &ha->qla4_82xx_reg->host_int); 5397 readl(&ha->qla4_82xx_reg->host_int); 5398 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5399 writel(0, &ha->qla4_83xx_reg->risc_intr); 5400 readl(&ha->qla4_83xx_reg->risc_intr); 5401 } 5402 5403 /* Remove timer thread, if present */ 5404 if (ha->timer_active) 5405 qla4xxx_stop_timer(ha); 5406 5407 /* Kill the kernel thread for this host */ 5408 if (ha->dpc_thread) 5409 destroy_workqueue(ha->dpc_thread); 5410 5411 /* Kill the kernel thread for this host */ 5412 if (ha->task_wq) 5413 destroy_workqueue(ha->task_wq); 5414 5415 /* Put firmware in known state */ 5416 ha->isp_ops->reset_firmware(ha); 5417 5418 if (is_qla80XX(ha)) { 5419 ha->isp_ops->idc_lock(ha); 5420 qla4_8xxx_clear_drv_active(ha); 5421 ha->isp_ops->idc_unlock(ha); 5422 } 5423 5424 /* Detach interrupts */ 5425 qla4xxx_free_irqs(ha); 5426 5427 /* free extra memory */ 5428 qla4xxx_mem_free(ha); 5429 } 5430 5431 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5432 { 5433 int status = 0; 5434 unsigned long mem_base, mem_len, db_base, db_len; 5435 struct pci_dev *pdev = ha->pdev; 5436 5437 status = pci_request_regions(pdev, DRIVER_NAME); 5438 if (status) { 5439 printk(KERN_WARNING 5440 "scsi(%ld) Failed to reserve PIO regions (%s) " 5441 "status=%d\n", ha->host_no, pci_name(pdev), status); 5442 goto iospace_error_exit; 5443 } 5444 5445 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5446 __func__, pdev->revision)); 5447 ha->revision_id = pdev->revision; 5448 5449 /* remap phys address */ 5450 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5451 mem_len = pci_resource_len(pdev, 0); 5452 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5453 __func__, mem_base, mem_len)); 5454 5455 /* mapping of pcibase pointer */ 5456 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5457 if (!ha->nx_pcibase) { 5458 printk(KERN_ERR 5459 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5460 pci_release_regions(ha->pdev); 5461 goto iospace_error_exit; 5462 } 5463 5464 /* Mapping of IO base pointer, door bell read and write pointer */ 5465 5466 /* mapping of IO base pointer */ 5467 if (is_qla8022(ha)) { 5468 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5469 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5470 (ha->pdev->devfn << 11)); 5471 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5472 QLA82XX_CAM_RAM_DB2); 5473 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5474 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5475 ((uint8_t *)ha->nx_pcibase); 5476 } 5477 5478 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 5479 db_len = pci_resource_len(pdev, 4); 5480 5481 return 0; 5482 iospace_error_exit: 5483 return -ENOMEM; 5484 } 5485 5486 /*** 5487 * qla4xxx_iospace_config - maps registers 5488 * @ha: pointer to adapter structure 5489 * 5490 * This routines maps HBA's registers from the pci address space 5491 * into the kernel virtual address space for memory mapped i/o. 5492 **/ 5493 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5494 { 5495 unsigned long pio, pio_len, pio_flags; 5496 unsigned long mmio, mmio_len, mmio_flags; 5497 5498 pio = pci_resource_start(ha->pdev, 0); 5499 pio_len = pci_resource_len(ha->pdev, 0); 5500 pio_flags = pci_resource_flags(ha->pdev, 0); 5501 if (pio_flags & IORESOURCE_IO) { 5502 if (pio_len < MIN_IOBASE_LEN) { 5503 ql4_printk(KERN_WARNING, ha, 5504 "Invalid PCI I/O region size\n"); 5505 pio = 0; 5506 } 5507 } else { 5508 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5509 pio = 0; 5510 } 5511 5512 /* Use MMIO operations for all accesses. */ 5513 mmio = pci_resource_start(ha->pdev, 1); 5514 mmio_len = pci_resource_len(ha->pdev, 1); 5515 mmio_flags = pci_resource_flags(ha->pdev, 1); 5516 5517 if (!(mmio_flags & IORESOURCE_MEM)) { 5518 ql4_printk(KERN_ERR, ha, 5519 "region #0 not an MMIO resource, aborting\n"); 5520 5521 goto iospace_error_exit; 5522 } 5523 5524 if (mmio_len < MIN_IOBASE_LEN) { 5525 ql4_printk(KERN_ERR, ha, 5526 "Invalid PCI mem region size, aborting\n"); 5527 goto iospace_error_exit; 5528 } 5529 5530 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5531 ql4_printk(KERN_WARNING, ha, 5532 "Failed to reserve PIO/MMIO regions\n"); 5533 5534 goto iospace_error_exit; 5535 } 5536 5537 ha->pio_address = pio; 5538 ha->pio_length = pio_len; 5539 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5540 if (!ha->reg) { 5541 ql4_printk(KERN_ERR, ha, 5542 "cannot remap MMIO, aborting\n"); 5543 5544 goto iospace_error_exit; 5545 } 5546 5547 return 0; 5548 5549 iospace_error_exit: 5550 return -ENOMEM; 5551 } 5552 5553 static struct isp_operations qla4xxx_isp_ops = { 5554 .iospace_config = qla4xxx_iospace_config, 5555 .pci_config = qla4xxx_pci_config, 5556 .disable_intrs = qla4xxx_disable_intrs, 5557 .enable_intrs = qla4xxx_enable_intrs, 5558 .start_firmware = qla4xxx_start_firmware, 5559 .intr_handler = qla4xxx_intr_handler, 5560 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5561 .reset_chip = qla4xxx_soft_reset, 5562 .reset_firmware = qla4xxx_hw_reset, 5563 .queue_iocb = qla4xxx_queue_iocb, 5564 .complete_iocb = qla4xxx_complete_iocb, 5565 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5566 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5567 .get_sys_info = qla4xxx_get_sys_info, 5568 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5569 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5570 }; 5571 5572 static struct isp_operations qla4_82xx_isp_ops = { 5573 .iospace_config = qla4_8xxx_iospace_config, 5574 .pci_config = qla4_8xxx_pci_config, 5575 .disable_intrs = qla4_82xx_disable_intrs, 5576 .enable_intrs = qla4_82xx_enable_intrs, 5577 .start_firmware = qla4_8xxx_load_risc, 5578 .restart_firmware = qla4_82xx_try_start_fw, 5579 .intr_handler = qla4_82xx_intr_handler, 5580 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5581 .need_reset = qla4_8xxx_need_reset, 5582 .reset_chip = qla4_82xx_isp_reset, 5583 .reset_firmware = qla4_8xxx_stop_firmware, 5584 .queue_iocb = qla4_82xx_queue_iocb, 5585 .complete_iocb = qla4_82xx_complete_iocb, 5586 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5587 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5588 .get_sys_info = qla4_8xxx_get_sys_info, 5589 .rd_reg_direct = qla4_82xx_rd_32, 5590 .wr_reg_direct = qla4_82xx_wr_32, 5591 .rd_reg_indirect = qla4_82xx_md_rd_32, 5592 .wr_reg_indirect = qla4_82xx_md_wr_32, 5593 .idc_lock = qla4_82xx_idc_lock, 5594 .idc_unlock = qla4_82xx_idc_unlock, 5595 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5596 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5597 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5598 }; 5599 5600 static struct isp_operations qla4_83xx_isp_ops = { 5601 .iospace_config = qla4_8xxx_iospace_config, 5602 .pci_config = qla4_8xxx_pci_config, 5603 .disable_intrs = qla4_83xx_disable_intrs, 5604 .enable_intrs = qla4_83xx_enable_intrs, 5605 .start_firmware = qla4_8xxx_load_risc, 5606 .restart_firmware = qla4_83xx_start_firmware, 5607 .intr_handler = qla4_83xx_intr_handler, 5608 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5609 .need_reset = qla4_8xxx_need_reset, 5610 .reset_chip = qla4_83xx_isp_reset, 5611 .reset_firmware = qla4_8xxx_stop_firmware, 5612 .queue_iocb = qla4_83xx_queue_iocb, 5613 .complete_iocb = qla4_83xx_complete_iocb, 5614 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5615 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5616 .get_sys_info = qla4_8xxx_get_sys_info, 5617 .rd_reg_direct = qla4_83xx_rd_reg, 5618 .wr_reg_direct = qla4_83xx_wr_reg, 5619 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5620 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5621 .idc_lock = qla4_83xx_drv_lock, 5622 .idc_unlock = qla4_83xx_drv_unlock, 5623 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5624 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5625 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5626 }; 5627 5628 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5629 { 5630 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5631 } 5632 5633 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5634 { 5635 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5636 } 5637 5638 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5639 { 5640 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5641 } 5642 5643 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5644 { 5645 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5646 } 5647 5648 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5649 { 5650 struct scsi_qla_host *ha = data; 5651 char *str = buf; 5652 int rc; 5653 5654 switch (type) { 5655 case ISCSI_BOOT_ETH_FLAGS: 5656 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5657 break; 5658 case ISCSI_BOOT_ETH_INDEX: 5659 rc = sprintf(str, "0\n"); 5660 break; 5661 case ISCSI_BOOT_ETH_MAC: 5662 rc = sysfs_format_mac(str, ha->my_mac, 5663 MAC_ADDR_LEN); 5664 break; 5665 default: 5666 rc = -ENOSYS; 5667 break; 5668 } 5669 return rc; 5670 } 5671 5672 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5673 { 5674 int rc; 5675 5676 switch (type) { 5677 case ISCSI_BOOT_ETH_FLAGS: 5678 case ISCSI_BOOT_ETH_MAC: 5679 case ISCSI_BOOT_ETH_INDEX: 5680 rc = S_IRUGO; 5681 break; 5682 default: 5683 rc = 0; 5684 break; 5685 } 5686 return rc; 5687 } 5688 5689 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5690 { 5691 struct scsi_qla_host *ha = data; 5692 char *str = buf; 5693 int rc; 5694 5695 switch (type) { 5696 case ISCSI_BOOT_INI_INITIATOR_NAME: 5697 rc = sprintf(str, "%s\n", ha->name_string); 5698 break; 5699 default: 5700 rc = -ENOSYS; 5701 break; 5702 } 5703 return rc; 5704 } 5705 5706 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5707 { 5708 int rc; 5709 5710 switch (type) { 5711 case ISCSI_BOOT_INI_INITIATOR_NAME: 5712 rc = S_IRUGO; 5713 break; 5714 default: 5715 rc = 0; 5716 break; 5717 } 5718 return rc; 5719 } 5720 5721 static ssize_t 5722 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5723 char *buf) 5724 { 5725 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5726 char *str = buf; 5727 int rc; 5728 5729 switch (type) { 5730 case ISCSI_BOOT_TGT_NAME: 5731 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5732 break; 5733 case ISCSI_BOOT_TGT_IP_ADDR: 5734 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5735 rc = sprintf(buf, "%pI4\n", 5736 &boot_conn->dest_ipaddr.ip_address); 5737 else 5738 rc = sprintf(str, "%pI6\n", 5739 &boot_conn->dest_ipaddr.ip_address); 5740 break; 5741 case ISCSI_BOOT_TGT_PORT: 5742 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5743 break; 5744 case ISCSI_BOOT_TGT_CHAP_NAME: 5745 rc = sprintf(str, "%.*s\n", 5746 boot_conn->chap.target_chap_name_length, 5747 (char *)&boot_conn->chap.target_chap_name); 5748 break; 5749 case ISCSI_BOOT_TGT_CHAP_SECRET: 5750 rc = sprintf(str, "%.*s\n", 5751 boot_conn->chap.target_secret_length, 5752 (char *)&boot_conn->chap.target_secret); 5753 break; 5754 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5755 rc = sprintf(str, "%.*s\n", 5756 boot_conn->chap.intr_chap_name_length, 5757 (char *)&boot_conn->chap.intr_chap_name); 5758 break; 5759 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5760 rc = sprintf(str, "%.*s\n", 5761 boot_conn->chap.intr_secret_length, 5762 (char *)&boot_conn->chap.intr_secret); 5763 break; 5764 case ISCSI_BOOT_TGT_FLAGS: 5765 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5766 break; 5767 case ISCSI_BOOT_TGT_NIC_ASSOC: 5768 rc = sprintf(str, "0\n"); 5769 break; 5770 default: 5771 rc = -ENOSYS; 5772 break; 5773 } 5774 return rc; 5775 } 5776 5777 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5778 { 5779 struct scsi_qla_host *ha = data; 5780 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5781 5782 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5783 } 5784 5785 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5786 { 5787 struct scsi_qla_host *ha = data; 5788 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5789 5790 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5791 } 5792 5793 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5794 { 5795 int rc; 5796 5797 switch (type) { 5798 case ISCSI_BOOT_TGT_NAME: 5799 case ISCSI_BOOT_TGT_IP_ADDR: 5800 case ISCSI_BOOT_TGT_PORT: 5801 case ISCSI_BOOT_TGT_CHAP_NAME: 5802 case ISCSI_BOOT_TGT_CHAP_SECRET: 5803 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5804 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5805 case ISCSI_BOOT_TGT_NIC_ASSOC: 5806 case ISCSI_BOOT_TGT_FLAGS: 5807 rc = S_IRUGO; 5808 break; 5809 default: 5810 rc = 0; 5811 break; 5812 } 5813 return rc; 5814 } 5815 5816 static void qla4xxx_boot_release(void *data) 5817 { 5818 struct scsi_qla_host *ha = data; 5819 5820 scsi_host_put(ha->host); 5821 } 5822 5823 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5824 { 5825 dma_addr_t buf_dma; 5826 uint32_t addr, pri_addr, sec_addr; 5827 uint32_t offset; 5828 uint16_t func_num; 5829 uint8_t val; 5830 uint8_t *buf = NULL; 5831 size_t size = 13 * sizeof(uint8_t); 5832 int ret = QLA_SUCCESS; 5833 5834 func_num = PCI_FUNC(ha->pdev->devfn); 5835 5836 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5837 __func__, ha->pdev->device, func_num); 5838 5839 if (is_qla40XX(ha)) { 5840 if (func_num == 1) { 5841 addr = NVRAM_PORT0_BOOT_MODE; 5842 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5843 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5844 } else if (func_num == 3) { 5845 addr = NVRAM_PORT1_BOOT_MODE; 5846 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5847 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5848 } else { 5849 ret = QLA_ERROR; 5850 goto exit_boot_info; 5851 } 5852 5853 /* Check Boot Mode */ 5854 val = rd_nvram_byte(ha, addr); 5855 if (!(val & 0x07)) { 5856 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5857 "options : 0x%x\n", __func__, val)); 5858 ret = QLA_ERROR; 5859 goto exit_boot_info; 5860 } 5861 5862 /* get primary valid target index */ 5863 val = rd_nvram_byte(ha, pri_addr); 5864 if (val & BIT_7) 5865 ddb_index[0] = (val & 0x7f); 5866 5867 /* get secondary valid target index */ 5868 val = rd_nvram_byte(ha, sec_addr); 5869 if (val & BIT_7) 5870 ddb_index[1] = (val & 0x7f); 5871 5872 } else if (is_qla80XX(ha)) { 5873 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5874 &buf_dma, GFP_KERNEL); 5875 if (!buf) { 5876 DEBUG2(ql4_printk(KERN_ERR, ha, 5877 "%s: Unable to allocate dma buffer\n", 5878 __func__)); 5879 ret = QLA_ERROR; 5880 goto exit_boot_info; 5881 } 5882 5883 if (ha->port_num == 0) 5884 offset = BOOT_PARAM_OFFSET_PORT0; 5885 else if (ha->port_num == 1) 5886 offset = BOOT_PARAM_OFFSET_PORT1; 5887 else { 5888 ret = QLA_ERROR; 5889 goto exit_boot_info_free; 5890 } 5891 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5892 offset; 5893 if (qla4xxx_get_flash(ha, buf_dma, addr, 5894 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5895 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5896 " failed\n", ha->host_no, __func__)); 5897 ret = QLA_ERROR; 5898 goto exit_boot_info_free; 5899 } 5900 /* Check Boot Mode */ 5901 if (!(buf[1] & 0x07)) { 5902 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5903 " : 0x%x\n", buf[1])); 5904 ret = QLA_ERROR; 5905 goto exit_boot_info_free; 5906 } 5907 5908 /* get primary valid target index */ 5909 if (buf[2] & BIT_7) 5910 ddb_index[0] = buf[2] & 0x7f; 5911 5912 /* get secondary valid target index */ 5913 if (buf[11] & BIT_7) 5914 ddb_index[1] = buf[11] & 0x7f; 5915 } else { 5916 ret = QLA_ERROR; 5917 goto exit_boot_info; 5918 } 5919 5920 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 5921 " target ID %d\n", __func__, ddb_index[0], 5922 ddb_index[1])); 5923 5924 exit_boot_info_free: 5925 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 5926 exit_boot_info: 5927 ha->pri_ddb_idx = ddb_index[0]; 5928 ha->sec_ddb_idx = ddb_index[1]; 5929 return ret; 5930 } 5931 5932 /** 5933 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 5934 * @ha: pointer to adapter structure 5935 * @username: CHAP username to be returned 5936 * @password: CHAP password to be returned 5937 * 5938 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 5939 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 5940 * So from the CHAP cache find the first BIDI CHAP entry and set it 5941 * to the boot record in sysfs. 5942 **/ 5943 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 5944 char *password) 5945 { 5946 int i, ret = -EINVAL; 5947 int max_chap_entries = 0; 5948 struct ql4_chap_table *chap_table; 5949 5950 if (is_qla80XX(ha)) 5951 max_chap_entries = (ha->hw.flt_chap_size / 2) / 5952 sizeof(struct ql4_chap_table); 5953 else 5954 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 5955 5956 if (!ha->chap_list) { 5957 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 5958 return ret; 5959 } 5960 5961 mutex_lock(&ha->chap_sem); 5962 for (i = 0; i < max_chap_entries; i++) { 5963 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 5964 if (chap_table->cookie != 5965 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 5966 continue; 5967 } 5968 5969 if (chap_table->flags & BIT_7) /* local */ 5970 continue; 5971 5972 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 5973 continue; 5974 5975 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 5976 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 5977 ret = 0; 5978 break; 5979 } 5980 mutex_unlock(&ha->chap_sem); 5981 5982 return ret; 5983 } 5984 5985 5986 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 5987 struct ql4_boot_session_info *boot_sess, 5988 uint16_t ddb_index) 5989 { 5990 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5991 struct dev_db_entry *fw_ddb_entry; 5992 dma_addr_t fw_ddb_entry_dma; 5993 uint16_t idx; 5994 uint16_t options; 5995 int ret = QLA_SUCCESS; 5996 5997 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 5998 &fw_ddb_entry_dma, GFP_KERNEL); 5999 if (!fw_ddb_entry) { 6000 DEBUG2(ql4_printk(KERN_ERR, ha, 6001 "%s: Unable to allocate dma buffer.\n", 6002 __func__)); 6003 ret = QLA_ERROR; 6004 return ret; 6005 } 6006 6007 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6008 fw_ddb_entry_dma, ddb_index)) { 6009 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6010 "index [%d]\n", __func__, ddb_index)); 6011 ret = QLA_ERROR; 6012 goto exit_boot_target; 6013 } 6014 6015 /* Update target name and IP from DDB */ 6016 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6017 min(sizeof(boot_sess->target_name), 6018 sizeof(fw_ddb_entry->iscsi_name))); 6019 6020 options = le16_to_cpu(fw_ddb_entry->options); 6021 if (options & DDB_OPT_IPV6_DEVICE) { 6022 memcpy(&boot_conn->dest_ipaddr.ip_address, 6023 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6024 } else { 6025 boot_conn->dest_ipaddr.ip_type = 0x1; 6026 memcpy(&boot_conn->dest_ipaddr.ip_address, 6027 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6028 } 6029 6030 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6031 6032 /* update chap information */ 6033 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6034 6035 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6036 6037 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6038 6039 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6040 target_chap_name, 6041 (char *)&boot_conn->chap.target_secret, 6042 idx); 6043 if (ret) { 6044 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6045 ret = QLA_ERROR; 6046 goto exit_boot_target; 6047 } 6048 6049 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6050 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6051 } 6052 6053 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6054 6055 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6056 6057 ret = qla4xxx_get_bidi_chap(ha, 6058 (char *)&boot_conn->chap.intr_chap_name, 6059 (char *)&boot_conn->chap.intr_secret); 6060 6061 if (ret) { 6062 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6063 ret = QLA_ERROR; 6064 goto exit_boot_target; 6065 } 6066 6067 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6068 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6069 } 6070 6071 exit_boot_target: 6072 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6073 fw_ddb_entry, fw_ddb_entry_dma); 6074 return ret; 6075 } 6076 6077 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6078 { 6079 uint16_t ddb_index[2]; 6080 int ret = QLA_ERROR; 6081 int rval; 6082 6083 memset(ddb_index, 0, sizeof(ddb_index)); 6084 ddb_index[0] = 0xffff; 6085 ddb_index[1] = 0xffff; 6086 ret = get_fw_boot_info(ha, ddb_index); 6087 if (ret != QLA_SUCCESS) { 6088 DEBUG2(ql4_printk(KERN_INFO, ha, 6089 "%s: No boot target configured.\n", __func__)); 6090 return ret; 6091 } 6092 6093 if (ql4xdisablesysfsboot) 6094 return QLA_SUCCESS; 6095 6096 if (ddb_index[0] == 0xffff) 6097 goto sec_target; 6098 6099 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6100 ddb_index[0]); 6101 if (rval != QLA_SUCCESS) { 6102 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6103 "configured\n", __func__)); 6104 } else 6105 ret = QLA_SUCCESS; 6106 6107 sec_target: 6108 if (ddb_index[1] == 0xffff) 6109 goto exit_get_boot_info; 6110 6111 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6112 ddb_index[1]); 6113 if (rval != QLA_SUCCESS) { 6114 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6115 " configured\n", __func__)); 6116 } else 6117 ret = QLA_SUCCESS; 6118 6119 exit_get_boot_info: 6120 return ret; 6121 } 6122 6123 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6124 { 6125 struct iscsi_boot_kobj *boot_kobj; 6126 6127 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6128 return QLA_ERROR; 6129 6130 if (ql4xdisablesysfsboot) { 6131 ql4_printk(KERN_INFO, ha, 6132 "%s: syfsboot disabled - driver will trigger login " 6133 "and publish session for discovery .\n", __func__); 6134 return QLA_SUCCESS; 6135 } 6136 6137 6138 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6139 if (!ha->boot_kset) 6140 goto kset_free; 6141 6142 if (!scsi_host_get(ha->host)) 6143 goto kset_free; 6144 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6145 qla4xxx_show_boot_tgt_pri_info, 6146 qla4xxx_tgt_get_attr_visibility, 6147 qla4xxx_boot_release); 6148 if (!boot_kobj) 6149 goto put_host; 6150 6151 if (!scsi_host_get(ha->host)) 6152 goto kset_free; 6153 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6154 qla4xxx_show_boot_tgt_sec_info, 6155 qla4xxx_tgt_get_attr_visibility, 6156 qla4xxx_boot_release); 6157 if (!boot_kobj) 6158 goto put_host; 6159 6160 if (!scsi_host_get(ha->host)) 6161 goto kset_free; 6162 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6163 qla4xxx_show_boot_ini_info, 6164 qla4xxx_ini_get_attr_visibility, 6165 qla4xxx_boot_release); 6166 if (!boot_kobj) 6167 goto put_host; 6168 6169 if (!scsi_host_get(ha->host)) 6170 goto kset_free; 6171 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6172 qla4xxx_show_boot_eth_info, 6173 qla4xxx_eth_get_attr_visibility, 6174 qla4xxx_boot_release); 6175 if (!boot_kobj) 6176 goto put_host; 6177 6178 return QLA_SUCCESS; 6179 6180 put_host: 6181 scsi_host_put(ha->host); 6182 kset_free: 6183 iscsi_boot_destroy_kset(ha->boot_kset); 6184 return -ENOMEM; 6185 } 6186 6187 6188 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6189 struct ql4_tuple_ddb *tddb) 6190 { 6191 struct scsi_qla_host *ha; 6192 struct iscsi_cls_session *cls_sess; 6193 struct iscsi_cls_conn *cls_conn; 6194 struct iscsi_session *sess; 6195 struct iscsi_conn *conn; 6196 6197 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6198 ha = ddb_entry->ha; 6199 cls_sess = ddb_entry->sess; 6200 sess = cls_sess->dd_data; 6201 cls_conn = ddb_entry->conn; 6202 conn = cls_conn->dd_data; 6203 6204 tddb->tpgt = sess->tpgt; 6205 tddb->port = conn->persistent_port; 6206 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6207 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6208 } 6209 6210 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6211 struct ql4_tuple_ddb *tddb, 6212 uint8_t *flash_isid) 6213 { 6214 uint16_t options = 0; 6215 6216 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6217 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6218 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6219 6220 options = le16_to_cpu(fw_ddb_entry->options); 6221 if (options & DDB_OPT_IPV6_DEVICE) 6222 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6223 else 6224 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6225 6226 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6227 6228 if (flash_isid == NULL) 6229 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6230 sizeof(tddb->isid)); 6231 else 6232 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6233 } 6234 6235 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6236 struct ql4_tuple_ddb *old_tddb, 6237 struct ql4_tuple_ddb *new_tddb, 6238 uint8_t is_isid_compare) 6239 { 6240 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6241 return QLA_ERROR; 6242 6243 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6244 return QLA_ERROR; 6245 6246 if (old_tddb->port != new_tddb->port) 6247 return QLA_ERROR; 6248 6249 /* For multi sessions, driver generates the ISID, so do not compare 6250 * ISID in reset path since it would be a comparison between the 6251 * driver generated ISID and firmware generated ISID. This could 6252 * lead to adding duplicated DDBs in the list as driver generated 6253 * ISID would not match firmware generated ISID. 6254 */ 6255 if (is_isid_compare) { 6256 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x" 6257 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", 6258 __func__, old_tddb->isid[5], old_tddb->isid[4], 6259 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1], 6260 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4], 6261 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1], 6262 new_tddb->isid[0])); 6263 6264 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6265 sizeof(old_tddb->isid))) 6266 return QLA_ERROR; 6267 } 6268 6269 DEBUG2(ql4_printk(KERN_INFO, ha, 6270 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6271 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6272 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6273 new_tddb->ip_addr, new_tddb->iscsi_name)); 6274 6275 return QLA_SUCCESS; 6276 } 6277 6278 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6279 struct dev_db_entry *fw_ddb_entry, 6280 uint32_t *index) 6281 { 6282 struct ddb_entry *ddb_entry; 6283 struct ql4_tuple_ddb *fw_tddb = NULL; 6284 struct ql4_tuple_ddb *tmp_tddb = NULL; 6285 int idx; 6286 int ret = QLA_ERROR; 6287 6288 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6289 if (!fw_tddb) { 6290 DEBUG2(ql4_printk(KERN_WARNING, ha, 6291 "Memory Allocation failed.\n")); 6292 ret = QLA_SUCCESS; 6293 goto exit_check; 6294 } 6295 6296 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6297 if (!tmp_tddb) { 6298 DEBUG2(ql4_printk(KERN_WARNING, ha, 6299 "Memory Allocation failed.\n")); 6300 ret = QLA_SUCCESS; 6301 goto exit_check; 6302 } 6303 6304 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6305 6306 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6307 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6308 if (ddb_entry == NULL) 6309 continue; 6310 6311 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6312 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6313 ret = QLA_SUCCESS; /* found */ 6314 if (index != NULL) 6315 *index = idx; 6316 goto exit_check; 6317 } 6318 } 6319 6320 exit_check: 6321 if (fw_tddb) 6322 vfree(fw_tddb); 6323 if (tmp_tddb) 6324 vfree(tmp_tddb); 6325 return ret; 6326 } 6327 6328 /** 6329 * qla4xxx_check_existing_isid - check if target with same isid exist 6330 * in target list 6331 * @list_nt: list of target 6332 * @isid: isid to check 6333 * 6334 * This routine return QLA_SUCCESS if target with same isid exist 6335 **/ 6336 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6337 { 6338 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6339 struct dev_db_entry *fw_ddb_entry; 6340 6341 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6342 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6343 6344 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6345 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6346 return QLA_SUCCESS; 6347 } 6348 } 6349 return QLA_ERROR; 6350 } 6351 6352 /** 6353 * qla4xxx_update_isid - compare ddbs and updated isid 6354 * @ha: Pointer to host adapter structure. 6355 * @list_nt: list of nt target 6356 * @fw_ddb_entry: firmware ddb entry 6357 * 6358 * This routine update isid if ddbs have same iqn, same isid and 6359 * different IP addr. 6360 * Return QLA_SUCCESS if isid is updated. 6361 **/ 6362 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6363 struct list_head *list_nt, 6364 struct dev_db_entry *fw_ddb_entry) 6365 { 6366 uint8_t base_value, i; 6367 6368 base_value = fw_ddb_entry->isid[1] & 0x1f; 6369 for (i = 0; i < 8; i++) { 6370 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6371 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6372 break; 6373 } 6374 6375 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6376 return QLA_ERROR; 6377 6378 return QLA_SUCCESS; 6379 } 6380 6381 /** 6382 * qla4xxx_should_update_isid - check if isid need to update 6383 * @ha: Pointer to host adapter structure. 6384 * @old_tddb: ddb tuple 6385 * @new_tddb: ddb tuple 6386 * 6387 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6388 * same isid 6389 **/ 6390 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6391 struct ql4_tuple_ddb *old_tddb, 6392 struct ql4_tuple_ddb *new_tddb) 6393 { 6394 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6395 /* Same ip */ 6396 if (old_tddb->port == new_tddb->port) 6397 return QLA_ERROR; 6398 } 6399 6400 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6401 /* different iqn */ 6402 return QLA_ERROR; 6403 6404 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6405 sizeof(old_tddb->isid))) 6406 /* different isid */ 6407 return QLA_ERROR; 6408 6409 return QLA_SUCCESS; 6410 } 6411 6412 /** 6413 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6414 * @ha: Pointer to host adapter structure. 6415 * @list_nt: list of nt target. 6416 * @fw_ddb_entry: firmware ddb entry. 6417 * 6418 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6419 * duplicate ddb in list_nt. 6420 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6421 * Note: This function also update isid of DDB if required. 6422 **/ 6423 6424 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6425 struct list_head *list_nt, 6426 struct dev_db_entry *fw_ddb_entry) 6427 { 6428 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6429 struct ql4_tuple_ddb *fw_tddb = NULL; 6430 struct ql4_tuple_ddb *tmp_tddb = NULL; 6431 int rval, ret = QLA_ERROR; 6432 6433 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6434 if (!fw_tddb) { 6435 DEBUG2(ql4_printk(KERN_WARNING, ha, 6436 "Memory Allocation failed.\n")); 6437 ret = QLA_SUCCESS; 6438 goto exit_check; 6439 } 6440 6441 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6442 if (!tmp_tddb) { 6443 DEBUG2(ql4_printk(KERN_WARNING, ha, 6444 "Memory Allocation failed.\n")); 6445 ret = QLA_SUCCESS; 6446 goto exit_check; 6447 } 6448 6449 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6450 6451 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6452 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6453 nt_ddb_idx->flash_isid); 6454 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6455 /* found duplicate ddb */ 6456 if (ret == QLA_SUCCESS) 6457 goto exit_check; 6458 } 6459 6460 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6461 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6462 6463 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6464 if (ret == QLA_SUCCESS) { 6465 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6466 if (rval == QLA_SUCCESS) 6467 ret = QLA_ERROR; 6468 else 6469 ret = QLA_SUCCESS; 6470 6471 goto exit_check; 6472 } 6473 } 6474 6475 exit_check: 6476 if (fw_tddb) 6477 vfree(fw_tddb); 6478 if (tmp_tddb) 6479 vfree(tmp_tddb); 6480 return ret; 6481 } 6482 6483 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6484 { 6485 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6486 6487 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6488 list_del_init(&ddb_idx->list); 6489 vfree(ddb_idx); 6490 } 6491 } 6492 6493 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6494 struct dev_db_entry *fw_ddb_entry) 6495 { 6496 struct iscsi_endpoint *ep; 6497 struct sockaddr_in *addr; 6498 struct sockaddr_in6 *addr6; 6499 struct sockaddr *t_addr; 6500 struct sockaddr_storage *dst_addr; 6501 char *ip; 6502 6503 /* TODO: need to destroy on unload iscsi_endpoint*/ 6504 dst_addr = vmalloc(sizeof(*dst_addr)); 6505 if (!dst_addr) 6506 return NULL; 6507 6508 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6509 t_addr = (struct sockaddr *)dst_addr; 6510 t_addr->sa_family = AF_INET6; 6511 addr6 = (struct sockaddr_in6 *)dst_addr; 6512 ip = (char *)&addr6->sin6_addr; 6513 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6514 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6515 6516 } else { 6517 t_addr = (struct sockaddr *)dst_addr; 6518 t_addr->sa_family = AF_INET; 6519 addr = (struct sockaddr_in *)dst_addr; 6520 ip = (char *)&addr->sin_addr; 6521 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6522 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6523 } 6524 6525 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6526 vfree(dst_addr); 6527 return ep; 6528 } 6529 6530 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6531 { 6532 if (ql4xdisablesysfsboot) 6533 return QLA_SUCCESS; 6534 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6535 return QLA_ERROR; 6536 return QLA_SUCCESS; 6537 } 6538 6539 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6540 struct ddb_entry *ddb_entry, 6541 uint16_t idx) 6542 { 6543 uint16_t def_timeout; 6544 6545 ddb_entry->ddb_type = FLASH_DDB; 6546 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6547 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6548 ddb_entry->ha = ha; 6549 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6550 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6551 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6552 6553 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6554 atomic_set(&ddb_entry->relogin_timer, 0); 6555 atomic_set(&ddb_entry->relogin_retry_count, 0); 6556 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6557 ddb_entry->default_relogin_timeout = 6558 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6559 def_timeout : LOGIN_TOV; 6560 ddb_entry->default_time2wait = 6561 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6562 6563 if (ql4xdisablesysfsboot && 6564 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6565 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6566 } 6567 6568 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6569 { 6570 uint32_t idx = 0; 6571 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6572 uint32_t sts[MBOX_REG_COUNT]; 6573 uint32_t ip_state; 6574 unsigned long wtime; 6575 int ret; 6576 6577 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6578 do { 6579 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6580 if (ip_idx[idx] == -1) 6581 continue; 6582 6583 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6584 6585 if (ret == QLA_ERROR) { 6586 ip_idx[idx] = -1; 6587 continue; 6588 } 6589 6590 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6591 6592 DEBUG2(ql4_printk(KERN_INFO, ha, 6593 "Waiting for IP state for idx = %d, state = 0x%x\n", 6594 ip_idx[idx], ip_state)); 6595 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6596 ip_state == IP_ADDRSTATE_INVALID || 6597 ip_state == IP_ADDRSTATE_PREFERRED || 6598 ip_state == IP_ADDRSTATE_DEPRICATED || 6599 ip_state == IP_ADDRSTATE_DISABLING) 6600 ip_idx[idx] = -1; 6601 } 6602 6603 /* Break if all IP states checked */ 6604 if ((ip_idx[0] == -1) && 6605 (ip_idx[1] == -1) && 6606 (ip_idx[2] == -1) && 6607 (ip_idx[3] == -1)) 6608 break; 6609 schedule_timeout_uninterruptible(HZ); 6610 } while (time_after(wtime, jiffies)); 6611 } 6612 6613 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6614 struct dev_db_entry *flash_ddb_entry) 6615 { 6616 uint16_t options = 0; 6617 size_t ip_len = IP_ADDR_LEN; 6618 6619 options = le16_to_cpu(fw_ddb_entry->options); 6620 if (options & DDB_OPT_IPV6_DEVICE) 6621 ip_len = IPv6_ADDR_LEN; 6622 6623 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6624 return QLA_ERROR; 6625 6626 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6627 sizeof(fw_ddb_entry->isid))) 6628 return QLA_ERROR; 6629 6630 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6631 sizeof(fw_ddb_entry->port))) 6632 return QLA_ERROR; 6633 6634 return QLA_SUCCESS; 6635 } 6636 6637 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6638 struct dev_db_entry *fw_ddb_entry, 6639 uint32_t fw_idx, uint32_t *flash_index) 6640 { 6641 struct dev_db_entry *flash_ddb_entry; 6642 dma_addr_t flash_ddb_entry_dma; 6643 uint32_t idx = 0; 6644 int max_ddbs; 6645 int ret = QLA_ERROR, status; 6646 6647 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6648 MAX_DEV_DB_ENTRIES; 6649 6650 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6651 &flash_ddb_entry_dma); 6652 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6653 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6654 goto exit_find_st_idx; 6655 } 6656 6657 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6658 flash_ddb_entry_dma, fw_idx); 6659 if (status == QLA_SUCCESS) { 6660 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6661 if (status == QLA_SUCCESS) { 6662 *flash_index = fw_idx; 6663 ret = QLA_SUCCESS; 6664 goto exit_find_st_idx; 6665 } 6666 } 6667 6668 for (idx = 0; idx < max_ddbs; idx++) { 6669 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6670 flash_ddb_entry_dma, idx); 6671 if (status == QLA_ERROR) 6672 continue; 6673 6674 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6675 if (status == QLA_SUCCESS) { 6676 *flash_index = idx; 6677 ret = QLA_SUCCESS; 6678 goto exit_find_st_idx; 6679 } 6680 } 6681 6682 if (idx == max_ddbs) 6683 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6684 fw_idx); 6685 6686 exit_find_st_idx: 6687 if (flash_ddb_entry) 6688 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6689 flash_ddb_entry_dma); 6690 6691 return ret; 6692 } 6693 6694 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6695 struct list_head *list_st) 6696 { 6697 struct qla_ddb_index *st_ddb_idx; 6698 int max_ddbs; 6699 int fw_idx_size; 6700 struct dev_db_entry *fw_ddb_entry; 6701 dma_addr_t fw_ddb_dma; 6702 int ret; 6703 uint32_t idx = 0, next_idx = 0; 6704 uint32_t state = 0, conn_err = 0; 6705 uint32_t flash_index = -1; 6706 uint16_t conn_id = 0; 6707 6708 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6709 &fw_ddb_dma); 6710 if (fw_ddb_entry == NULL) { 6711 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6712 goto exit_st_list; 6713 } 6714 6715 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6716 MAX_DEV_DB_ENTRIES; 6717 fw_idx_size = sizeof(struct qla_ddb_index); 6718 6719 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6720 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6721 NULL, &next_idx, &state, 6722 &conn_err, NULL, &conn_id); 6723 if (ret == QLA_ERROR) 6724 break; 6725 6726 /* Ignore DDB if invalid state (unassigned) */ 6727 if (state == DDB_DS_UNASSIGNED) 6728 goto continue_next_st; 6729 6730 /* Check if ST, add to the list_st */ 6731 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6732 goto continue_next_st; 6733 6734 st_ddb_idx = vzalloc(fw_idx_size); 6735 if (!st_ddb_idx) 6736 break; 6737 6738 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6739 &flash_index); 6740 if (ret == QLA_ERROR) { 6741 ql4_printk(KERN_ERR, ha, 6742 "No flash entry for ST at idx [%d]\n", idx); 6743 st_ddb_idx->flash_ddb_idx = idx; 6744 } else { 6745 ql4_printk(KERN_INFO, ha, 6746 "ST at idx [%d] is stored at flash [%d]\n", 6747 idx, flash_index); 6748 st_ddb_idx->flash_ddb_idx = flash_index; 6749 } 6750 6751 st_ddb_idx->fw_ddb_idx = idx; 6752 6753 list_add_tail(&st_ddb_idx->list, list_st); 6754 continue_next_st: 6755 if (next_idx == 0) 6756 break; 6757 } 6758 6759 exit_st_list: 6760 if (fw_ddb_entry) 6761 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6762 } 6763 6764 /** 6765 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6766 * @ha: pointer to adapter structure 6767 * @list_ddb: List from which failed ddb to be removed 6768 * 6769 * Iterate over the list of DDBs and find and remove DDBs that are either in 6770 * no connection active state or failed state 6771 **/ 6772 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6773 struct list_head *list_ddb) 6774 { 6775 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6776 uint32_t next_idx = 0; 6777 uint32_t state = 0, conn_err = 0; 6778 int ret; 6779 6780 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6781 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6782 NULL, 0, NULL, &next_idx, &state, 6783 &conn_err, NULL, NULL); 6784 if (ret == QLA_ERROR) 6785 continue; 6786 6787 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6788 state == DDB_DS_SESSION_FAILED) { 6789 list_del_init(&ddb_idx->list); 6790 vfree(ddb_idx); 6791 } 6792 } 6793 } 6794 6795 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6796 struct ddb_entry *ddb_entry, 6797 struct dev_db_entry *fw_ddb_entry) 6798 { 6799 struct iscsi_cls_session *cls_sess; 6800 struct iscsi_session *sess; 6801 uint32_t max_ddbs = 0; 6802 uint16_t ddb_link = -1; 6803 6804 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6805 MAX_DEV_DB_ENTRIES; 6806 6807 cls_sess = ddb_entry->sess; 6808 sess = cls_sess->dd_data; 6809 6810 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6811 if (ddb_link < max_ddbs) 6812 sess->discovery_parent_idx = ddb_link; 6813 else 6814 sess->discovery_parent_idx = DDB_NO_LINK; 6815 } 6816 6817 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6818 struct dev_db_entry *fw_ddb_entry, 6819 int is_reset, uint16_t idx) 6820 { 6821 struct iscsi_cls_session *cls_sess; 6822 struct iscsi_session *sess; 6823 struct iscsi_cls_conn *cls_conn; 6824 struct iscsi_endpoint *ep; 6825 uint16_t cmds_max = 32; 6826 uint16_t conn_id = 0; 6827 uint32_t initial_cmdsn = 0; 6828 int ret = QLA_SUCCESS; 6829 6830 struct ddb_entry *ddb_entry = NULL; 6831 6832 /* Create session object, with INVALID_ENTRY, 6833 * the targer_id would get set when we issue the login 6834 */ 6835 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6836 cmds_max, sizeof(struct ddb_entry), 6837 sizeof(struct ql4_task_data), 6838 initial_cmdsn, INVALID_ENTRY); 6839 if (!cls_sess) { 6840 ret = QLA_ERROR; 6841 goto exit_setup; 6842 } 6843 6844 /* 6845 * so calling module_put function to decrement the 6846 * reference count. 6847 **/ 6848 module_put(qla4xxx_iscsi_transport.owner); 6849 sess = cls_sess->dd_data; 6850 ddb_entry = sess->dd_data; 6851 ddb_entry->sess = cls_sess; 6852 6853 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6854 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6855 sizeof(struct dev_db_entry)); 6856 6857 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6858 6859 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6860 6861 if (!cls_conn) { 6862 ret = QLA_ERROR; 6863 goto exit_setup; 6864 } 6865 6866 ddb_entry->conn = cls_conn; 6867 6868 /* Setup ep, for displaying attributes in sysfs */ 6869 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6870 if (ep) { 6871 ep->conn = cls_conn; 6872 cls_conn->ep = ep; 6873 } else { 6874 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6875 ret = QLA_ERROR; 6876 goto exit_setup; 6877 } 6878 6879 /* Update sess/conn params */ 6880 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6881 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6882 6883 if (is_reset == RESET_ADAPTER) { 6884 iscsi_block_session(cls_sess); 6885 /* Use the relogin path to discover new devices 6886 * by short-circuting the logic of setting 6887 * timer to relogin - instead set the flags 6888 * to initiate login right away. 6889 */ 6890 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6891 set_bit(DF_RELOGIN, &ddb_entry->flags); 6892 } 6893 6894 exit_setup: 6895 return ret; 6896 } 6897 6898 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6899 struct list_head *list_ddb, 6900 struct dev_db_entry *fw_ddb_entry) 6901 { 6902 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6903 uint16_t ddb_link; 6904 6905 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6906 6907 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6908 if (ddb_idx->fw_ddb_idx == ddb_link) { 6909 DEBUG2(ql4_printk(KERN_INFO, ha, 6910 "Updating NT parent idx from [%d] to [%d]\n", 6911 ddb_link, ddb_idx->flash_ddb_idx)); 6912 fw_ddb_entry->ddb_link = 6913 cpu_to_le16(ddb_idx->flash_ddb_idx); 6914 return; 6915 } 6916 } 6917 } 6918 6919 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 6920 struct list_head *list_nt, 6921 struct list_head *list_st, 6922 int is_reset) 6923 { 6924 struct dev_db_entry *fw_ddb_entry; 6925 struct ddb_entry *ddb_entry = NULL; 6926 dma_addr_t fw_ddb_dma; 6927 int max_ddbs; 6928 int fw_idx_size; 6929 int ret; 6930 uint32_t idx = 0, next_idx = 0; 6931 uint32_t state = 0, conn_err = 0; 6932 uint32_t ddb_idx = -1; 6933 uint16_t conn_id = 0; 6934 uint16_t ddb_link = -1; 6935 struct qla_ddb_index *nt_ddb_idx; 6936 6937 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6938 &fw_ddb_dma); 6939 if (fw_ddb_entry == NULL) { 6940 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6941 goto exit_nt_list; 6942 } 6943 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6944 MAX_DEV_DB_ENTRIES; 6945 fw_idx_size = sizeof(struct qla_ddb_index); 6946 6947 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6948 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6949 NULL, &next_idx, &state, 6950 &conn_err, NULL, &conn_id); 6951 if (ret == QLA_ERROR) 6952 break; 6953 6954 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 6955 goto continue_next_nt; 6956 6957 /* Check if NT, then add to list it */ 6958 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 6959 goto continue_next_nt; 6960 6961 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6962 if (ddb_link < max_ddbs) 6963 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 6964 6965 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 6966 state == DDB_DS_SESSION_FAILED) && 6967 (is_reset == INIT_ADAPTER)) 6968 goto continue_next_nt; 6969 6970 DEBUG2(ql4_printk(KERN_INFO, ha, 6971 "Adding DDB to session = 0x%x\n", idx)); 6972 6973 if (is_reset == INIT_ADAPTER) { 6974 nt_ddb_idx = vmalloc(fw_idx_size); 6975 if (!nt_ddb_idx) 6976 break; 6977 6978 nt_ddb_idx->fw_ddb_idx = idx; 6979 6980 /* Copy original isid as it may get updated in function 6981 * qla4xxx_update_isid(). We need original isid in 6982 * function qla4xxx_compare_tuple_ddb to find duplicate 6983 * target */ 6984 memcpy(&nt_ddb_idx->flash_isid[0], 6985 &fw_ddb_entry->isid[0], 6986 sizeof(nt_ddb_idx->flash_isid)); 6987 6988 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 6989 fw_ddb_entry); 6990 if (ret == QLA_SUCCESS) { 6991 /* free nt_ddb_idx and do not add to list_nt */ 6992 vfree(nt_ddb_idx); 6993 goto continue_next_nt; 6994 } 6995 6996 /* Copy updated isid */ 6997 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 6998 sizeof(struct dev_db_entry)); 6999 7000 list_add_tail(&nt_ddb_idx->list, list_nt); 7001 } else if (is_reset == RESET_ADAPTER) { 7002 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7003 &ddb_idx); 7004 if (ret == QLA_SUCCESS) { 7005 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7006 ddb_idx); 7007 if (ddb_entry != NULL) 7008 qla4xxx_update_sess_disc_idx(ha, 7009 ddb_entry, 7010 fw_ddb_entry); 7011 goto continue_next_nt; 7012 } 7013 } 7014 7015 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7016 if (ret == QLA_ERROR) 7017 goto exit_nt_list; 7018 7019 continue_next_nt: 7020 if (next_idx == 0) 7021 break; 7022 } 7023 7024 exit_nt_list: 7025 if (fw_ddb_entry) 7026 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7027 } 7028 7029 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7030 struct list_head *list_nt, 7031 uint16_t target_id) 7032 { 7033 struct dev_db_entry *fw_ddb_entry; 7034 dma_addr_t fw_ddb_dma; 7035 int max_ddbs; 7036 int fw_idx_size; 7037 int ret; 7038 uint32_t idx = 0, next_idx = 0; 7039 uint32_t state = 0, conn_err = 0; 7040 uint16_t conn_id = 0; 7041 struct qla_ddb_index *nt_ddb_idx; 7042 7043 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7044 &fw_ddb_dma); 7045 if (fw_ddb_entry == NULL) { 7046 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7047 goto exit_new_nt_list; 7048 } 7049 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7050 MAX_DEV_DB_ENTRIES; 7051 fw_idx_size = sizeof(struct qla_ddb_index); 7052 7053 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7054 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7055 NULL, &next_idx, &state, 7056 &conn_err, NULL, &conn_id); 7057 if (ret == QLA_ERROR) 7058 break; 7059 7060 /* Check if NT, then add it to list */ 7061 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7062 goto continue_next_new_nt; 7063 7064 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7065 goto continue_next_new_nt; 7066 7067 DEBUG2(ql4_printk(KERN_INFO, ha, 7068 "Adding DDB to session = 0x%x\n", idx)); 7069 7070 nt_ddb_idx = vmalloc(fw_idx_size); 7071 if (!nt_ddb_idx) 7072 break; 7073 7074 nt_ddb_idx->fw_ddb_idx = idx; 7075 7076 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7077 if (ret == QLA_SUCCESS) { 7078 /* free nt_ddb_idx and do not add to list_nt */ 7079 vfree(nt_ddb_idx); 7080 goto continue_next_new_nt; 7081 } 7082 7083 if (target_id < max_ddbs) 7084 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7085 7086 list_add_tail(&nt_ddb_idx->list, list_nt); 7087 7088 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7089 idx); 7090 if (ret == QLA_ERROR) 7091 goto exit_new_nt_list; 7092 7093 continue_next_new_nt: 7094 if (next_idx == 0) 7095 break; 7096 } 7097 7098 exit_new_nt_list: 7099 if (fw_ddb_entry) 7100 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7101 } 7102 7103 /** 7104 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7105 * @dev: dev associated with the sysfs entry 7106 * @data: pointer to flashnode session object 7107 * 7108 * Returns: 7109 * 1: if flashnode entry is non-persistent 7110 * 0: if flashnode entry is persistent 7111 **/ 7112 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7113 { 7114 struct iscsi_bus_flash_session *fnode_sess; 7115 7116 if (!iscsi_flashnode_bus_match(dev, NULL)) 7117 return 0; 7118 7119 fnode_sess = iscsi_dev_to_flash_session(dev); 7120 7121 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7122 } 7123 7124 /** 7125 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7126 * @ha: pointer to host 7127 * @fw_ddb_entry: flash ddb data 7128 * @idx: target index 7129 * @user: if set then this call is made from userland else from kernel 7130 * 7131 * Returns: 7132 * On sucess: QLA_SUCCESS 7133 * On failure: QLA_ERROR 7134 * 7135 * This create separate sysfs entries for session and connection attributes of 7136 * the given fw ddb entry. 7137 * If this is invoked as a result of a userspace call then the entry is marked 7138 * as nonpersistent using flash_state field. 7139 **/ 7140 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7141 struct dev_db_entry *fw_ddb_entry, 7142 uint16_t *idx, int user) 7143 { 7144 struct iscsi_bus_flash_session *fnode_sess = NULL; 7145 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7146 int rc = QLA_ERROR; 7147 7148 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7149 &qla4xxx_iscsi_transport, 0); 7150 if (!fnode_sess) { 7151 ql4_printk(KERN_ERR, ha, 7152 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7153 __func__, *idx, ha->host_no); 7154 goto exit_tgt_create; 7155 } 7156 7157 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7158 &qla4xxx_iscsi_transport, 0); 7159 if (!fnode_conn) { 7160 ql4_printk(KERN_ERR, ha, 7161 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7162 __func__, *idx, ha->host_no); 7163 goto free_sess; 7164 } 7165 7166 if (user) { 7167 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7168 } else { 7169 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7170 7171 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7172 fnode_sess->is_boot_target = 1; 7173 else 7174 fnode_sess->is_boot_target = 0; 7175 } 7176 7177 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7178 fw_ddb_entry); 7179 7180 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7181 __func__, fnode_sess->dev.kobj.name); 7182 7183 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7184 __func__, fnode_conn->dev.kobj.name); 7185 7186 return QLA_SUCCESS; 7187 7188 free_sess: 7189 iscsi_destroy_flashnode_sess(fnode_sess); 7190 7191 exit_tgt_create: 7192 return QLA_ERROR; 7193 } 7194 7195 /** 7196 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7197 * @shost: pointer to host 7198 * @buf: type of ddb entry (ipv4/ipv6) 7199 * @len: length of buf 7200 * 7201 * This creates new ddb entry in the flash by finding first free index and 7202 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7203 **/ 7204 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7205 int len) 7206 { 7207 struct scsi_qla_host *ha = to_qla_host(shost); 7208 struct dev_db_entry *fw_ddb_entry = NULL; 7209 dma_addr_t fw_ddb_entry_dma; 7210 struct device *dev; 7211 uint16_t idx = 0; 7212 uint16_t max_ddbs = 0; 7213 uint32_t options = 0; 7214 uint32_t rval = QLA_ERROR; 7215 7216 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7217 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7218 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7219 __func__)); 7220 goto exit_ddb_add; 7221 } 7222 7223 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7224 MAX_DEV_DB_ENTRIES; 7225 7226 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7227 &fw_ddb_entry_dma, GFP_KERNEL); 7228 if (!fw_ddb_entry) { 7229 DEBUG2(ql4_printk(KERN_ERR, ha, 7230 "%s: Unable to allocate dma buffer\n", 7231 __func__)); 7232 goto exit_ddb_add; 7233 } 7234 7235 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7236 qla4xxx_sysfs_ddb_is_non_persistent); 7237 if (dev) { 7238 ql4_printk(KERN_ERR, ha, 7239 "%s: A non-persistent entry %s found\n", 7240 __func__, dev->kobj.name); 7241 put_device(dev); 7242 goto exit_ddb_add; 7243 } 7244 7245 /* Index 0 and 1 are reserved for boot target entries */ 7246 for (idx = 2; idx < max_ddbs; idx++) { 7247 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7248 fw_ddb_entry_dma, idx)) 7249 break; 7250 } 7251 7252 if (idx == max_ddbs) 7253 goto exit_ddb_add; 7254 7255 if (!strncasecmp("ipv6", buf, 4)) 7256 options |= IPV6_DEFAULT_DDB_ENTRY; 7257 7258 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7259 if (rval == QLA_ERROR) 7260 goto exit_ddb_add; 7261 7262 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7263 7264 exit_ddb_add: 7265 if (fw_ddb_entry) 7266 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7267 fw_ddb_entry, fw_ddb_entry_dma); 7268 if (rval == QLA_SUCCESS) 7269 return idx; 7270 else 7271 return -EIO; 7272 } 7273 7274 /** 7275 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7276 * @fnode_sess: pointer to session attrs of flash ddb entry 7277 * @fnode_conn: pointer to connection attrs of flash ddb entry 7278 * 7279 * This writes the contents of target ddb buffer to Flash with a valid cookie 7280 * value in order to make the ddb entry persistent. 7281 **/ 7282 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7283 struct iscsi_bus_flash_conn *fnode_conn) 7284 { 7285 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7286 struct scsi_qla_host *ha = to_qla_host(shost); 7287 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7288 struct dev_db_entry *fw_ddb_entry = NULL; 7289 dma_addr_t fw_ddb_entry_dma; 7290 uint32_t options = 0; 7291 int rval = 0; 7292 7293 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7294 &fw_ddb_entry_dma, GFP_KERNEL); 7295 if (!fw_ddb_entry) { 7296 DEBUG2(ql4_printk(KERN_ERR, ha, 7297 "%s: Unable to allocate dma buffer\n", 7298 __func__)); 7299 rval = -ENOMEM; 7300 goto exit_ddb_apply; 7301 } 7302 7303 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7304 options |= IPV6_DEFAULT_DDB_ENTRY; 7305 7306 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7307 if (rval == QLA_ERROR) 7308 goto exit_ddb_apply; 7309 7310 dev_db_start_offset += (fnode_sess->target_id * 7311 sizeof(*fw_ddb_entry)); 7312 7313 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7314 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7315 7316 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7317 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7318 7319 if (rval == QLA_SUCCESS) { 7320 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7321 ql4_printk(KERN_INFO, ha, 7322 "%s: flash node %u of host %lu written to flash\n", 7323 __func__, fnode_sess->target_id, ha->host_no); 7324 } else { 7325 rval = -EIO; 7326 ql4_printk(KERN_ERR, ha, 7327 "%s: Error while writing flash node %u of host %lu to flash\n", 7328 __func__, fnode_sess->target_id, ha->host_no); 7329 } 7330 7331 exit_ddb_apply: 7332 if (fw_ddb_entry) 7333 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7334 fw_ddb_entry, fw_ddb_entry_dma); 7335 return rval; 7336 } 7337 7338 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7339 struct dev_db_entry *fw_ddb_entry, 7340 uint16_t idx) 7341 { 7342 struct dev_db_entry *ddb_entry = NULL; 7343 dma_addr_t ddb_entry_dma; 7344 unsigned long wtime; 7345 uint32_t mbx_sts = 0; 7346 uint32_t state = 0, conn_err = 0; 7347 uint16_t tmo = 0; 7348 int ret = 0; 7349 7350 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7351 &ddb_entry_dma, GFP_KERNEL); 7352 if (!ddb_entry) { 7353 DEBUG2(ql4_printk(KERN_ERR, ha, 7354 "%s: Unable to allocate dma buffer\n", 7355 __func__)); 7356 return QLA_ERROR; 7357 } 7358 7359 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7360 7361 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7362 if (ret != QLA_SUCCESS) { 7363 DEBUG2(ql4_printk(KERN_ERR, ha, 7364 "%s: Unable to set ddb entry for index %d\n", 7365 __func__, idx)); 7366 goto exit_ddb_conn_open; 7367 } 7368 7369 qla4xxx_conn_open(ha, idx); 7370 7371 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7372 tmo = ((ha->def_timeout > LOGIN_TOV) && 7373 (ha->def_timeout < LOGIN_TOV * 10) ? 7374 ha->def_timeout : LOGIN_TOV); 7375 7376 DEBUG2(ql4_printk(KERN_INFO, ha, 7377 "Default time to wait for login to ddb %d\n", tmo)); 7378 7379 wtime = jiffies + (HZ * tmo); 7380 do { 7381 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7382 NULL, &state, &conn_err, NULL, 7383 NULL); 7384 if (ret == QLA_ERROR) 7385 continue; 7386 7387 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7388 state == DDB_DS_SESSION_FAILED) 7389 break; 7390 7391 schedule_timeout_uninterruptible(HZ / 10); 7392 } while (time_after(wtime, jiffies)); 7393 7394 exit_ddb_conn_open: 7395 if (ddb_entry) 7396 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7397 ddb_entry, ddb_entry_dma); 7398 return ret; 7399 } 7400 7401 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7402 struct dev_db_entry *fw_ddb_entry, 7403 uint16_t target_id) 7404 { 7405 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7406 struct list_head list_nt; 7407 uint16_t ddb_index; 7408 int ret = 0; 7409 7410 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7411 ql4_printk(KERN_WARNING, ha, 7412 "%s: A discovery already in progress!\n", __func__); 7413 return QLA_ERROR; 7414 } 7415 7416 INIT_LIST_HEAD(&list_nt); 7417 7418 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7419 7420 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7421 if (ret == QLA_ERROR) 7422 goto exit_login_st_clr_bit; 7423 7424 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7425 if (ret == QLA_ERROR) 7426 goto exit_login_st; 7427 7428 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7429 7430 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7431 list_del_init(&ddb_idx->list); 7432 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7433 vfree(ddb_idx); 7434 } 7435 7436 exit_login_st: 7437 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7438 ql4_printk(KERN_ERR, ha, 7439 "Unable to clear DDB index = 0x%x\n", ddb_index); 7440 } 7441 7442 clear_bit(ddb_index, ha->ddb_idx_map); 7443 7444 exit_login_st_clr_bit: 7445 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7446 return ret; 7447 } 7448 7449 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7450 struct dev_db_entry *fw_ddb_entry, 7451 uint16_t idx) 7452 { 7453 int ret = QLA_ERROR; 7454 7455 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7456 if (ret != QLA_SUCCESS) 7457 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7458 idx); 7459 else 7460 ret = -EPERM; 7461 7462 return ret; 7463 } 7464 7465 /** 7466 * qla4xxx_sysfs_ddb_login - Login to the specified target 7467 * @fnode_sess: pointer to session attrs of flash ddb entry 7468 * @fnode_conn: pointer to connection attrs of flash ddb entry 7469 * 7470 * This logs in to the specified target 7471 **/ 7472 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7473 struct iscsi_bus_flash_conn *fnode_conn) 7474 { 7475 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7476 struct scsi_qla_host *ha = to_qla_host(shost); 7477 struct dev_db_entry *fw_ddb_entry = NULL; 7478 dma_addr_t fw_ddb_entry_dma; 7479 uint32_t options = 0; 7480 int ret = 0; 7481 7482 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7483 ql4_printk(KERN_ERR, ha, 7484 "%s: Target info is not persistent\n", __func__); 7485 ret = -EIO; 7486 goto exit_ddb_login; 7487 } 7488 7489 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7490 &fw_ddb_entry_dma, GFP_KERNEL); 7491 if (!fw_ddb_entry) { 7492 DEBUG2(ql4_printk(KERN_ERR, ha, 7493 "%s: Unable to allocate dma buffer\n", 7494 __func__)); 7495 ret = -ENOMEM; 7496 goto exit_ddb_login; 7497 } 7498 7499 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7500 options |= IPV6_DEFAULT_DDB_ENTRY; 7501 7502 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7503 if (ret == QLA_ERROR) 7504 goto exit_ddb_login; 7505 7506 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7507 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7508 7509 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7510 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7511 fnode_sess->target_id); 7512 else 7513 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7514 fnode_sess->target_id); 7515 7516 if (ret > 0) 7517 ret = -EIO; 7518 7519 exit_ddb_login: 7520 if (fw_ddb_entry) 7521 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7522 fw_ddb_entry, fw_ddb_entry_dma); 7523 return ret; 7524 } 7525 7526 /** 7527 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7528 * @cls_sess: pointer to session to be logged out 7529 * 7530 * This performs session log out from the specified target 7531 **/ 7532 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7533 { 7534 struct iscsi_session *sess; 7535 struct ddb_entry *ddb_entry = NULL; 7536 struct scsi_qla_host *ha; 7537 struct dev_db_entry *fw_ddb_entry = NULL; 7538 dma_addr_t fw_ddb_entry_dma; 7539 unsigned long flags; 7540 unsigned long wtime; 7541 uint32_t ddb_state; 7542 int options; 7543 int ret = 0; 7544 7545 sess = cls_sess->dd_data; 7546 ddb_entry = sess->dd_data; 7547 ha = ddb_entry->ha; 7548 7549 if (ddb_entry->ddb_type != FLASH_DDB) { 7550 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7551 __func__); 7552 ret = -ENXIO; 7553 goto exit_ddb_logout; 7554 } 7555 7556 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7557 ql4_printk(KERN_ERR, ha, 7558 "%s: Logout from boot target entry is not permitted.\n", 7559 __func__); 7560 ret = -EPERM; 7561 goto exit_ddb_logout; 7562 } 7563 7564 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7565 &fw_ddb_entry_dma, GFP_KERNEL); 7566 if (!fw_ddb_entry) { 7567 ql4_printk(KERN_ERR, ha, 7568 "%s: Unable to allocate dma buffer\n", __func__); 7569 ret = -ENOMEM; 7570 goto exit_ddb_logout; 7571 } 7572 7573 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7574 goto ddb_logout_init; 7575 7576 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7577 fw_ddb_entry, fw_ddb_entry_dma, 7578 NULL, NULL, &ddb_state, NULL, 7579 NULL, NULL); 7580 if (ret == QLA_ERROR) 7581 goto ddb_logout_init; 7582 7583 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7584 goto ddb_logout_init; 7585 7586 /* wait until next relogin is triggered using DF_RELOGIN and 7587 * clear DF_RELOGIN to avoid invocation of further relogin 7588 */ 7589 wtime = jiffies + (HZ * RELOGIN_TOV); 7590 do { 7591 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7592 goto ddb_logout_init; 7593 7594 schedule_timeout_uninterruptible(HZ); 7595 } while ((time_after(wtime, jiffies))); 7596 7597 ddb_logout_init: 7598 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7599 atomic_set(&ddb_entry->relogin_timer, 0); 7600 7601 options = LOGOUT_OPTION_CLOSE_SESSION; 7602 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7603 7604 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7605 wtime = jiffies + (HZ * LOGOUT_TOV); 7606 do { 7607 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7608 fw_ddb_entry, fw_ddb_entry_dma, 7609 NULL, NULL, &ddb_state, NULL, 7610 NULL, NULL); 7611 if (ret == QLA_ERROR) 7612 goto ddb_logout_clr_sess; 7613 7614 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7615 (ddb_state == DDB_DS_SESSION_FAILED)) 7616 goto ddb_logout_clr_sess; 7617 7618 schedule_timeout_uninterruptible(HZ); 7619 } while ((time_after(wtime, jiffies))); 7620 7621 ddb_logout_clr_sess: 7622 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7623 /* 7624 * we have decremented the reference count of the driver 7625 * when we setup the session to have the driver unload 7626 * to be seamless without actually destroying the 7627 * session 7628 **/ 7629 try_module_get(qla4xxx_iscsi_transport.owner); 7630 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7631 7632 spin_lock_irqsave(&ha->hardware_lock, flags); 7633 qla4xxx_free_ddb(ha, ddb_entry); 7634 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7636 7637 iscsi_session_teardown(ddb_entry->sess); 7638 7639 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7640 ret = QLA_SUCCESS; 7641 7642 exit_ddb_logout: 7643 if (fw_ddb_entry) 7644 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7645 fw_ddb_entry, fw_ddb_entry_dma); 7646 return ret; 7647 } 7648 7649 /** 7650 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7651 * @fnode_sess: pointer to session attrs of flash ddb entry 7652 * @fnode_conn: pointer to connection attrs of flash ddb entry 7653 * 7654 * This performs log out from the specified target 7655 **/ 7656 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7657 struct iscsi_bus_flash_conn *fnode_conn) 7658 { 7659 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7660 struct scsi_qla_host *ha = to_qla_host(shost); 7661 struct ql4_tuple_ddb *flash_tddb = NULL; 7662 struct ql4_tuple_ddb *tmp_tddb = NULL; 7663 struct dev_db_entry *fw_ddb_entry = NULL; 7664 struct ddb_entry *ddb_entry = NULL; 7665 dma_addr_t fw_ddb_dma; 7666 uint32_t next_idx = 0; 7667 uint32_t state = 0, conn_err = 0; 7668 uint16_t conn_id = 0; 7669 int idx, index; 7670 int status, ret = 0; 7671 7672 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7673 &fw_ddb_dma); 7674 if (fw_ddb_entry == NULL) { 7675 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7676 ret = -ENOMEM; 7677 goto exit_ddb_logout; 7678 } 7679 7680 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7681 if (!flash_tddb) { 7682 ql4_printk(KERN_WARNING, ha, 7683 "%s:Memory Allocation failed.\n", __func__); 7684 ret = -ENOMEM; 7685 goto exit_ddb_logout; 7686 } 7687 7688 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7689 if (!tmp_tddb) { 7690 ql4_printk(KERN_WARNING, ha, 7691 "%s:Memory Allocation failed.\n", __func__); 7692 ret = -ENOMEM; 7693 goto exit_ddb_logout; 7694 } 7695 7696 if (!fnode_sess->targetname) { 7697 ql4_printk(KERN_ERR, ha, 7698 "%s:Cannot logout from SendTarget entry\n", 7699 __func__); 7700 ret = -EPERM; 7701 goto exit_ddb_logout; 7702 } 7703 7704 if (fnode_sess->is_boot_target) { 7705 ql4_printk(KERN_ERR, ha, 7706 "%s: Logout from boot target entry is not permitted.\n", 7707 __func__); 7708 ret = -EPERM; 7709 goto exit_ddb_logout; 7710 } 7711 7712 strncpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7713 ISCSI_NAME_SIZE); 7714 7715 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7716 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7717 else 7718 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7719 7720 flash_tddb->tpgt = fnode_sess->tpgt; 7721 flash_tddb->port = fnode_conn->port; 7722 7723 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7724 7725 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7726 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7727 if (ddb_entry == NULL) 7728 continue; 7729 7730 if (ddb_entry->ddb_type != FLASH_DDB) 7731 continue; 7732 7733 index = ddb_entry->sess->target_id; 7734 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7735 fw_ddb_dma, NULL, &next_idx, 7736 &state, &conn_err, NULL, 7737 &conn_id); 7738 if (status == QLA_ERROR) { 7739 ret = -ENOMEM; 7740 break; 7741 } 7742 7743 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7744 7745 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7746 true); 7747 if (status == QLA_SUCCESS) { 7748 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7749 break; 7750 } 7751 } 7752 7753 if (idx == MAX_DDB_ENTRIES) 7754 ret = -ESRCH; 7755 7756 exit_ddb_logout: 7757 if (flash_tddb) 7758 vfree(flash_tddb); 7759 if (tmp_tddb) 7760 vfree(tmp_tddb); 7761 if (fw_ddb_entry) 7762 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7763 7764 return ret; 7765 } 7766 7767 static int 7768 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7769 int param, char *buf) 7770 { 7771 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7772 struct scsi_qla_host *ha = to_qla_host(shost); 7773 struct iscsi_bus_flash_conn *fnode_conn; 7774 struct ql4_chap_table chap_tbl; 7775 struct device *dev; 7776 int parent_type; 7777 int rc = 0; 7778 7779 dev = iscsi_find_flashnode_conn(fnode_sess); 7780 if (!dev) 7781 return -EIO; 7782 7783 fnode_conn = iscsi_dev_to_flash_conn(dev); 7784 7785 switch (param) { 7786 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7787 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7788 break; 7789 case ISCSI_FLASHNODE_PORTAL_TYPE: 7790 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7791 break; 7792 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7793 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7794 break; 7795 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7796 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7797 break; 7798 case ISCSI_FLASHNODE_ENTRY_EN: 7799 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7800 break; 7801 case ISCSI_FLASHNODE_HDR_DGST_EN: 7802 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7803 break; 7804 case ISCSI_FLASHNODE_DATA_DGST_EN: 7805 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7806 break; 7807 case ISCSI_FLASHNODE_IMM_DATA_EN: 7808 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7809 break; 7810 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7811 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7812 break; 7813 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7814 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7815 break; 7816 case ISCSI_FLASHNODE_PDU_INORDER: 7817 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7818 break; 7819 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7820 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7821 break; 7822 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7823 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7824 break; 7825 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7826 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7827 break; 7828 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7829 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7830 break; 7831 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7832 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7833 break; 7834 case ISCSI_FLASHNODE_ERL: 7835 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7836 break; 7837 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7838 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7839 break; 7840 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7841 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7842 break; 7843 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7844 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7845 break; 7846 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7847 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7848 break; 7849 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7850 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7851 break; 7852 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7853 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7854 break; 7855 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7856 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7857 break; 7858 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7859 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7860 break; 7861 case ISCSI_FLASHNODE_FIRST_BURST: 7862 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7863 break; 7864 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7865 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7866 break; 7867 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7868 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7869 break; 7870 case ISCSI_FLASHNODE_MAX_R2T: 7871 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7872 break; 7873 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7874 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7875 break; 7876 case ISCSI_FLASHNODE_ISID: 7877 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", 7878 fnode_sess->isid[0], fnode_sess->isid[1], 7879 fnode_sess->isid[2], fnode_sess->isid[3], 7880 fnode_sess->isid[4], fnode_sess->isid[5]); 7881 break; 7882 case ISCSI_FLASHNODE_TSID: 7883 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7884 break; 7885 case ISCSI_FLASHNODE_PORT: 7886 rc = sprintf(buf, "%d\n", fnode_conn->port); 7887 break; 7888 case ISCSI_FLASHNODE_MAX_BURST: 7889 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7890 break; 7891 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7892 rc = sprintf(buf, "%u\n", 7893 fnode_sess->default_taskmgmt_timeout); 7894 break; 7895 case ISCSI_FLASHNODE_IPADDR: 7896 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7897 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7898 else 7899 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7900 break; 7901 case ISCSI_FLASHNODE_ALIAS: 7902 if (fnode_sess->targetalias) 7903 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7904 else 7905 rc = sprintf(buf, "\n"); 7906 break; 7907 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7908 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7909 rc = sprintf(buf, "%pI6\n", 7910 fnode_conn->redirect_ipaddr); 7911 else 7912 rc = sprintf(buf, "%pI4\n", 7913 fnode_conn->redirect_ipaddr); 7914 break; 7915 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7916 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7917 break; 7918 case ISCSI_FLASHNODE_LOCAL_PORT: 7919 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 7920 break; 7921 case ISCSI_FLASHNODE_IPV4_TOS: 7922 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 7923 break; 7924 case ISCSI_FLASHNODE_IPV6_TC: 7925 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7926 rc = sprintf(buf, "%u\n", 7927 fnode_conn->ipv6_traffic_class); 7928 else 7929 rc = sprintf(buf, "\n"); 7930 break; 7931 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 7932 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 7933 break; 7934 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 7935 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7936 rc = sprintf(buf, "%pI6\n", 7937 fnode_conn->link_local_ipv6_addr); 7938 else 7939 rc = sprintf(buf, "\n"); 7940 break; 7941 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 7942 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 7943 break; 7944 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 7945 if (fnode_sess->discovery_parent_type == DDB_ISNS) 7946 parent_type = ISCSI_DISC_PARENT_ISNS; 7947 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 7948 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 7949 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 7950 parent_type = ISCSI_DISC_PARENT_SENDTGT; 7951 else 7952 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 7953 7954 rc = sprintf(buf, "%s\n", 7955 iscsi_get_discovery_parent_name(parent_type)); 7956 break; 7957 case ISCSI_FLASHNODE_NAME: 7958 if (fnode_sess->targetname) 7959 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 7960 else 7961 rc = sprintf(buf, "\n"); 7962 break; 7963 case ISCSI_FLASHNODE_TPGT: 7964 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 7965 break; 7966 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 7967 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 7968 break; 7969 case ISCSI_FLASHNODE_TCP_RECV_WSF: 7970 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 7971 break; 7972 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 7973 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 7974 break; 7975 case ISCSI_FLASHNODE_USERNAME: 7976 if (fnode_sess->chap_auth_en) { 7977 qla4xxx_get_uni_chap_at_index(ha, 7978 chap_tbl.name, 7979 chap_tbl.secret, 7980 fnode_sess->chap_out_idx); 7981 rc = sprintf(buf, "%s\n", chap_tbl.name); 7982 } else { 7983 rc = sprintf(buf, "\n"); 7984 } 7985 break; 7986 case ISCSI_FLASHNODE_PASSWORD: 7987 if (fnode_sess->chap_auth_en) { 7988 qla4xxx_get_uni_chap_at_index(ha, 7989 chap_tbl.name, 7990 chap_tbl.secret, 7991 fnode_sess->chap_out_idx); 7992 rc = sprintf(buf, "%s\n", chap_tbl.secret); 7993 } else { 7994 rc = sprintf(buf, "\n"); 7995 } 7996 break; 7997 case ISCSI_FLASHNODE_STATSN: 7998 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 7999 break; 8000 case ISCSI_FLASHNODE_EXP_STATSN: 8001 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8002 break; 8003 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8004 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8005 break; 8006 default: 8007 rc = -ENOSYS; 8008 break; 8009 } 8010 8011 put_device(dev); 8012 return rc; 8013 } 8014 8015 /** 8016 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8017 * @fnode_sess: pointer to session attrs of flash ddb entry 8018 * @fnode_conn: pointer to connection attrs of flash ddb entry 8019 * @data: Parameters and their values to update 8020 * @len: len of data 8021 * 8022 * This sets the parameter of flash ddb entry and writes them to flash 8023 **/ 8024 static int 8025 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8026 struct iscsi_bus_flash_conn *fnode_conn, 8027 void *data, int len) 8028 { 8029 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8030 struct scsi_qla_host *ha = to_qla_host(shost); 8031 struct iscsi_flashnode_param_info *fnode_param; 8032 struct ql4_chap_table chap_tbl; 8033 struct nlattr *attr; 8034 uint16_t chap_out_idx = INVALID_ENTRY; 8035 int rc = QLA_ERROR; 8036 uint32_t rem = len; 8037 8038 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8039 nla_for_each_attr(attr, data, len, rem) { 8040 fnode_param = nla_data(attr); 8041 8042 switch (fnode_param->param) { 8043 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8044 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8045 break; 8046 case ISCSI_FLASHNODE_PORTAL_TYPE: 8047 memcpy(fnode_sess->portal_type, fnode_param->value, 8048 strlen(fnode_sess->portal_type)); 8049 break; 8050 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8051 fnode_sess->auto_snd_tgt_disable = 8052 fnode_param->value[0]; 8053 break; 8054 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8055 fnode_sess->discovery_sess = fnode_param->value[0]; 8056 break; 8057 case ISCSI_FLASHNODE_ENTRY_EN: 8058 fnode_sess->entry_state = fnode_param->value[0]; 8059 break; 8060 case ISCSI_FLASHNODE_HDR_DGST_EN: 8061 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8062 break; 8063 case ISCSI_FLASHNODE_DATA_DGST_EN: 8064 fnode_conn->datadgst_en = fnode_param->value[0]; 8065 break; 8066 case ISCSI_FLASHNODE_IMM_DATA_EN: 8067 fnode_sess->imm_data_en = fnode_param->value[0]; 8068 break; 8069 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8070 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8071 break; 8072 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8073 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8074 break; 8075 case ISCSI_FLASHNODE_PDU_INORDER: 8076 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8077 break; 8078 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8079 fnode_sess->chap_auth_en = fnode_param->value[0]; 8080 /* Invalidate chap index if chap auth is disabled */ 8081 if (!fnode_sess->chap_auth_en) 8082 fnode_sess->chap_out_idx = INVALID_ENTRY; 8083 8084 break; 8085 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8086 fnode_conn->snack_req_en = fnode_param->value[0]; 8087 break; 8088 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8089 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8090 break; 8091 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8092 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8093 break; 8094 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8095 fnode_sess->discovery_auth_optional = 8096 fnode_param->value[0]; 8097 break; 8098 case ISCSI_FLASHNODE_ERL: 8099 fnode_sess->erl = fnode_param->value[0]; 8100 break; 8101 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8102 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8103 break; 8104 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8105 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8106 break; 8107 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8108 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8109 break; 8110 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8111 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8112 break; 8113 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8114 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8115 break; 8116 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8117 fnode_conn->fragment_disable = fnode_param->value[0]; 8118 break; 8119 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8120 fnode_conn->max_recv_dlength = 8121 *(unsigned *)fnode_param->value; 8122 break; 8123 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8124 fnode_conn->max_xmit_dlength = 8125 *(unsigned *)fnode_param->value; 8126 break; 8127 case ISCSI_FLASHNODE_FIRST_BURST: 8128 fnode_sess->first_burst = 8129 *(unsigned *)fnode_param->value; 8130 break; 8131 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8132 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8133 break; 8134 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8135 fnode_sess->time2retain = 8136 *(uint16_t *)fnode_param->value; 8137 break; 8138 case ISCSI_FLASHNODE_MAX_R2T: 8139 fnode_sess->max_r2t = 8140 *(uint16_t *)fnode_param->value; 8141 break; 8142 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8143 fnode_conn->keepalive_timeout = 8144 *(uint16_t *)fnode_param->value; 8145 break; 8146 case ISCSI_FLASHNODE_ISID: 8147 memcpy(fnode_sess->isid, fnode_param->value, 8148 sizeof(fnode_sess->isid)); 8149 break; 8150 case ISCSI_FLASHNODE_TSID: 8151 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8152 break; 8153 case ISCSI_FLASHNODE_PORT: 8154 fnode_conn->port = *(uint16_t *)fnode_param->value; 8155 break; 8156 case ISCSI_FLASHNODE_MAX_BURST: 8157 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8158 break; 8159 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8160 fnode_sess->default_taskmgmt_timeout = 8161 *(uint16_t *)fnode_param->value; 8162 break; 8163 case ISCSI_FLASHNODE_IPADDR: 8164 memcpy(fnode_conn->ipaddress, fnode_param->value, 8165 IPv6_ADDR_LEN); 8166 break; 8167 case ISCSI_FLASHNODE_ALIAS: 8168 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8169 (char *)fnode_param->value); 8170 break; 8171 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8172 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8173 IPv6_ADDR_LEN); 8174 break; 8175 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8176 fnode_conn->max_segment_size = 8177 *(unsigned *)fnode_param->value; 8178 break; 8179 case ISCSI_FLASHNODE_LOCAL_PORT: 8180 fnode_conn->local_port = 8181 *(uint16_t *)fnode_param->value; 8182 break; 8183 case ISCSI_FLASHNODE_IPV4_TOS: 8184 fnode_conn->ipv4_tos = fnode_param->value[0]; 8185 break; 8186 case ISCSI_FLASHNODE_IPV6_TC: 8187 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8188 break; 8189 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8190 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8191 break; 8192 case ISCSI_FLASHNODE_NAME: 8193 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8194 (char *)fnode_param->value); 8195 break; 8196 case ISCSI_FLASHNODE_TPGT: 8197 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8198 break; 8199 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8200 memcpy(fnode_conn->link_local_ipv6_addr, 8201 fnode_param->value, IPv6_ADDR_LEN); 8202 break; 8203 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8204 fnode_sess->discovery_parent_idx = 8205 *(uint16_t *)fnode_param->value; 8206 break; 8207 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8208 fnode_conn->tcp_xmit_wsf = 8209 *(uint8_t *)fnode_param->value; 8210 break; 8211 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8212 fnode_conn->tcp_recv_wsf = 8213 *(uint8_t *)fnode_param->value; 8214 break; 8215 case ISCSI_FLASHNODE_STATSN: 8216 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8217 break; 8218 case ISCSI_FLASHNODE_EXP_STATSN: 8219 fnode_conn->exp_statsn = 8220 *(uint32_t *)fnode_param->value; 8221 break; 8222 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8223 chap_out_idx = *(uint16_t *)fnode_param->value; 8224 if (!qla4xxx_get_uni_chap_at_index(ha, 8225 chap_tbl.name, 8226 chap_tbl.secret, 8227 chap_out_idx)) { 8228 fnode_sess->chap_out_idx = chap_out_idx; 8229 /* Enable chap auth if chap index is valid */ 8230 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8231 } 8232 break; 8233 default: 8234 ql4_printk(KERN_ERR, ha, 8235 "%s: No such sysfs attribute\n", __func__); 8236 rc = -ENOSYS; 8237 goto exit_set_param; 8238 } 8239 } 8240 8241 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8242 8243 exit_set_param: 8244 return rc; 8245 } 8246 8247 /** 8248 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8249 * @fnode_sess: pointer to session attrs of flash ddb entry 8250 * 8251 * This invalidates the flash ddb entry at the given index 8252 **/ 8253 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8254 { 8255 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8256 struct scsi_qla_host *ha = to_qla_host(shost); 8257 uint32_t dev_db_start_offset; 8258 uint32_t dev_db_end_offset; 8259 struct dev_db_entry *fw_ddb_entry = NULL; 8260 dma_addr_t fw_ddb_entry_dma; 8261 uint16_t *ddb_cookie = NULL; 8262 size_t ddb_size = 0; 8263 void *pddb = NULL; 8264 int target_id; 8265 int rc = 0; 8266 8267 if (fnode_sess->is_boot_target) { 8268 rc = -EPERM; 8269 DEBUG2(ql4_printk(KERN_ERR, ha, 8270 "%s: Deletion of boot target entry is not permitted.\n", 8271 __func__)); 8272 goto exit_ddb_del; 8273 } 8274 8275 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8276 goto sysfs_ddb_del; 8277 8278 if (is_qla40XX(ha)) { 8279 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8280 dev_db_end_offset = FLASH_OFFSET_DB_END; 8281 dev_db_start_offset += (fnode_sess->target_id * 8282 sizeof(*fw_ddb_entry)); 8283 ddb_size = sizeof(*fw_ddb_entry); 8284 } else { 8285 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8286 (ha->hw.flt_region_ddb << 2); 8287 /* flt_ddb_size is DDB table size for both ports 8288 * so divide it by 2 to calculate the offset for second port 8289 */ 8290 if (ha->port_num == 1) 8291 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8292 8293 dev_db_end_offset = dev_db_start_offset + 8294 (ha->hw.flt_ddb_size / 2); 8295 8296 dev_db_start_offset += (fnode_sess->target_id * 8297 sizeof(*fw_ddb_entry)); 8298 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8299 8300 ddb_size = sizeof(*ddb_cookie); 8301 } 8302 8303 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8304 __func__, dev_db_start_offset, dev_db_end_offset)); 8305 8306 if (dev_db_start_offset > dev_db_end_offset) { 8307 rc = -EIO; 8308 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8309 __func__, fnode_sess->target_id)); 8310 goto exit_ddb_del; 8311 } 8312 8313 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8314 &fw_ddb_entry_dma, GFP_KERNEL); 8315 if (!pddb) { 8316 rc = -ENOMEM; 8317 DEBUG2(ql4_printk(KERN_ERR, ha, 8318 "%s: Unable to allocate dma buffer\n", 8319 __func__)); 8320 goto exit_ddb_del; 8321 } 8322 8323 if (is_qla40XX(ha)) { 8324 fw_ddb_entry = pddb; 8325 memset(fw_ddb_entry, 0, ddb_size); 8326 ddb_cookie = &fw_ddb_entry->cookie; 8327 } else { 8328 ddb_cookie = pddb; 8329 } 8330 8331 /* invalidate the cookie */ 8332 *ddb_cookie = 0xFFEE; 8333 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8334 ddb_size, FLASH_OPT_RMW_COMMIT); 8335 8336 sysfs_ddb_del: 8337 target_id = fnode_sess->target_id; 8338 iscsi_destroy_flashnode_sess(fnode_sess); 8339 ql4_printk(KERN_INFO, ha, 8340 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8341 __func__, target_id, ha->host_no); 8342 exit_ddb_del: 8343 if (pddb) 8344 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8345 fw_ddb_entry_dma); 8346 return rc; 8347 } 8348 8349 /** 8350 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8351 * @ha: pointer to adapter structure 8352 * 8353 * Export the firmware DDB for all send targets and normal targets to sysfs. 8354 **/ 8355 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8356 { 8357 struct dev_db_entry *fw_ddb_entry = NULL; 8358 dma_addr_t fw_ddb_entry_dma; 8359 uint16_t max_ddbs; 8360 uint16_t idx = 0; 8361 int ret = QLA_SUCCESS; 8362 8363 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8364 sizeof(*fw_ddb_entry), 8365 &fw_ddb_entry_dma, GFP_KERNEL); 8366 if (!fw_ddb_entry) { 8367 DEBUG2(ql4_printk(KERN_ERR, ha, 8368 "%s: Unable to allocate dma buffer\n", 8369 __func__)); 8370 return -ENOMEM; 8371 } 8372 8373 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8374 MAX_DEV_DB_ENTRIES; 8375 8376 for (idx = 0; idx < max_ddbs; idx++) { 8377 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8378 idx)) 8379 continue; 8380 8381 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8382 if (ret) { 8383 ret = -EIO; 8384 break; 8385 } 8386 } 8387 8388 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8389 fw_ddb_entry_dma); 8390 8391 return ret; 8392 } 8393 8394 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8395 { 8396 iscsi_destroy_all_flashnode(ha->host); 8397 } 8398 8399 /** 8400 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8401 * @ha: pointer to adapter structure 8402 * @is_reset: Is this init path or reset path 8403 * 8404 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8405 * using connection open, then create the list of normal targets (nt) 8406 * from firmware DDBs. Based on the list of nt setup session and connection 8407 * objects. 8408 **/ 8409 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8410 { 8411 uint16_t tmo = 0; 8412 struct list_head list_st, list_nt; 8413 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8414 unsigned long wtime; 8415 8416 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8417 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8418 ha->is_reset = is_reset; 8419 return; 8420 } 8421 8422 INIT_LIST_HEAD(&list_st); 8423 INIT_LIST_HEAD(&list_nt); 8424 8425 qla4xxx_build_st_list(ha, &list_st); 8426 8427 /* Before issuing conn open mbox, ensure all IPs states are configured 8428 * Note, conn open fails if IPs are not configured 8429 */ 8430 qla4xxx_wait_for_ip_configuration(ha); 8431 8432 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8433 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8434 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8435 } 8436 8437 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8438 tmo = ((ha->def_timeout > LOGIN_TOV) && 8439 (ha->def_timeout < LOGIN_TOV * 10) ? 8440 ha->def_timeout : LOGIN_TOV); 8441 8442 DEBUG2(ql4_printk(KERN_INFO, ha, 8443 "Default time to wait for build ddb %d\n", tmo)); 8444 8445 wtime = jiffies + (HZ * tmo); 8446 do { 8447 if (list_empty(&list_st)) 8448 break; 8449 8450 qla4xxx_remove_failed_ddb(ha, &list_st); 8451 schedule_timeout_uninterruptible(HZ / 10); 8452 } while (time_after(wtime, jiffies)); 8453 8454 8455 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8456 8457 qla4xxx_free_ddb_list(&list_st); 8458 qla4xxx_free_ddb_list(&list_nt); 8459 8460 qla4xxx_free_ddb_index(ha); 8461 } 8462 8463 /** 8464 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8465 * response. 8466 * @ha: pointer to adapter structure 8467 * 8468 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8469 * set in DDB and we will wait for login response of boot targets during 8470 * probe. 8471 **/ 8472 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8473 { 8474 struct ddb_entry *ddb_entry; 8475 struct dev_db_entry *fw_ddb_entry = NULL; 8476 dma_addr_t fw_ddb_entry_dma; 8477 unsigned long wtime; 8478 uint32_t ddb_state; 8479 int max_ddbs, idx, ret; 8480 8481 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8482 MAX_DEV_DB_ENTRIES; 8483 8484 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8485 &fw_ddb_entry_dma, GFP_KERNEL); 8486 if (!fw_ddb_entry) { 8487 ql4_printk(KERN_ERR, ha, 8488 "%s: Unable to allocate dma buffer\n", __func__); 8489 goto exit_login_resp; 8490 } 8491 8492 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8493 8494 for (idx = 0; idx < max_ddbs; idx++) { 8495 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8496 if (ddb_entry == NULL) 8497 continue; 8498 8499 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8500 DEBUG2(ql4_printk(KERN_INFO, ha, 8501 "%s: DDB index [%d]\n", __func__, 8502 ddb_entry->fw_ddb_index)); 8503 do { 8504 ret = qla4xxx_get_fwddb_entry(ha, 8505 ddb_entry->fw_ddb_index, 8506 fw_ddb_entry, fw_ddb_entry_dma, 8507 NULL, NULL, &ddb_state, NULL, 8508 NULL, NULL); 8509 if (ret == QLA_ERROR) 8510 goto exit_login_resp; 8511 8512 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8513 (ddb_state == DDB_DS_SESSION_FAILED)) 8514 break; 8515 8516 schedule_timeout_uninterruptible(HZ); 8517 8518 } while ((time_after(wtime, jiffies))); 8519 8520 if (!time_after(wtime, jiffies)) { 8521 DEBUG2(ql4_printk(KERN_INFO, ha, 8522 "%s: Login response wait timer expired\n", 8523 __func__)); 8524 goto exit_login_resp; 8525 } 8526 } 8527 } 8528 8529 exit_login_resp: 8530 if (fw_ddb_entry) 8531 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8532 fw_ddb_entry, fw_ddb_entry_dma); 8533 } 8534 8535 /** 8536 * qla4xxx_probe_adapter - callback function to probe HBA 8537 * @pdev: pointer to pci_dev structure 8538 * @pci_device_id: pointer to pci_device entry 8539 * 8540 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8541 * It returns zero if successful. It also initializes all data necessary for 8542 * the driver. 8543 **/ 8544 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8545 const struct pci_device_id *ent) 8546 { 8547 int ret = -ENODEV, status; 8548 struct Scsi_Host *host; 8549 struct scsi_qla_host *ha; 8550 uint8_t init_retry_count = 0; 8551 char buf[34]; 8552 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8553 uint32_t dev_state; 8554 8555 if (pci_enable_device(pdev)) 8556 return -1; 8557 8558 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8559 if (host == NULL) { 8560 printk(KERN_WARNING 8561 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8562 goto probe_disable_device; 8563 } 8564 8565 /* Clear our data area */ 8566 ha = to_qla_host(host); 8567 memset(ha, 0, sizeof(*ha)); 8568 8569 /* Save the information from PCI BIOS. */ 8570 ha->pdev = pdev; 8571 ha->host = host; 8572 ha->host_no = host->host_no; 8573 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8574 8575 pci_enable_pcie_error_reporting(pdev); 8576 8577 /* Setup Runtime configurable options */ 8578 if (is_qla8022(ha)) { 8579 ha->isp_ops = &qla4_82xx_isp_ops; 8580 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8581 ha->qdr_sn_window = -1; 8582 ha->ddr_mn_window = -1; 8583 ha->curr_window = 255; 8584 nx_legacy_intr = &legacy_intr[ha->func_num]; 8585 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8586 ha->nx_legacy_intr.tgt_status_reg = 8587 nx_legacy_intr->tgt_status_reg; 8588 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8589 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8590 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8591 ha->isp_ops = &qla4_83xx_isp_ops; 8592 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8593 } else { 8594 ha->isp_ops = &qla4xxx_isp_ops; 8595 } 8596 8597 if (is_qla80XX(ha)) { 8598 rwlock_init(&ha->hw_lock); 8599 ha->pf_bit = ha->func_num << 16; 8600 /* Set EEH reset type to fundamental if required by hba */ 8601 pdev->needs_freset = 1; 8602 } 8603 8604 /* Configure PCI I/O space. */ 8605 ret = ha->isp_ops->iospace_config(ha); 8606 if (ret) 8607 goto probe_failed_ioconfig; 8608 8609 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8610 pdev->device, pdev->irq, ha->reg); 8611 8612 qla4xxx_config_dma_addressing(ha); 8613 8614 /* Initialize lists and spinlocks. */ 8615 INIT_LIST_HEAD(&ha->free_srb_q); 8616 8617 mutex_init(&ha->mbox_sem); 8618 mutex_init(&ha->chap_sem); 8619 init_completion(&ha->mbx_intr_comp); 8620 init_completion(&ha->disable_acb_comp); 8621 init_completion(&ha->idc_comp); 8622 init_completion(&ha->link_up_comp); 8623 init_completion(&ha->disable_acb_comp); 8624 8625 spin_lock_init(&ha->hardware_lock); 8626 spin_lock_init(&ha->work_lock); 8627 8628 /* Initialize work list */ 8629 INIT_LIST_HEAD(&ha->work_list); 8630 8631 /* Allocate dma buffers */ 8632 if (qla4xxx_mem_alloc(ha)) { 8633 ql4_printk(KERN_WARNING, ha, 8634 "[ERROR] Failed to allocate memory for adapter\n"); 8635 8636 ret = -ENOMEM; 8637 goto probe_failed; 8638 } 8639 8640 host->cmd_per_lun = 3; 8641 host->max_channel = 0; 8642 host->max_lun = MAX_LUNS - 1; 8643 host->max_id = MAX_TARGETS; 8644 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8645 host->can_queue = MAX_SRBS ; 8646 host->transportt = qla4xxx_scsi_transport; 8647 8648 ret = scsi_init_shared_tag_map(host, MAX_SRBS); 8649 if (ret) { 8650 ql4_printk(KERN_WARNING, ha, 8651 "%s: scsi_init_shared_tag_map failed\n", __func__); 8652 goto probe_failed; 8653 } 8654 8655 pci_set_drvdata(pdev, ha); 8656 8657 ret = scsi_add_host(host, &pdev->dev); 8658 if (ret) 8659 goto probe_failed; 8660 8661 if (is_qla80XX(ha)) 8662 qla4_8xxx_get_flash_info(ha); 8663 8664 if (is_qla8032(ha) || is_qla8042(ha)) { 8665 qla4_83xx_read_reset_template(ha); 8666 /* 8667 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8668 * If DONRESET_BIT0 is set, drivers should not set dev_state 8669 * to NEED_RESET. But if NEED_RESET is set, drivers should 8670 * should honor the reset. 8671 */ 8672 if (ql4xdontresethba == 1) 8673 qla4_83xx_set_idc_dontreset(ha); 8674 } 8675 8676 /* 8677 * Initialize the Host adapter request/response queues and 8678 * firmware 8679 * NOTE: interrupts enabled upon successful completion 8680 */ 8681 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8682 8683 /* Dont retry adapter initialization if IRQ allocation failed */ 8684 if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) { 8685 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n", 8686 __func__); 8687 goto skip_retry_init; 8688 } 8689 8690 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8691 init_retry_count++ < MAX_INIT_RETRIES) { 8692 8693 if (is_qla80XX(ha)) { 8694 ha->isp_ops->idc_lock(ha); 8695 dev_state = qla4_8xxx_rd_direct(ha, 8696 QLA8XXX_CRB_DEV_STATE); 8697 ha->isp_ops->idc_unlock(ha); 8698 if (dev_state == QLA8XXX_DEV_FAILED) { 8699 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8700 "initialize adapter. H/W is in failed state\n", 8701 __func__); 8702 break; 8703 } 8704 } 8705 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8706 "(%d)\n", __func__, init_retry_count)); 8707 8708 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8709 continue; 8710 8711 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8712 } 8713 8714 skip_retry_init: 8715 if (!test_bit(AF_ONLINE, &ha->flags)) { 8716 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8717 8718 if ((is_qla8022(ha) && ql4xdontresethba) || 8719 ((is_qla8032(ha) || is_qla8042(ha)) && 8720 qla4_83xx_idc_dontreset(ha))) { 8721 /* Put the device in failed state. */ 8722 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8723 ha->isp_ops->idc_lock(ha); 8724 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8725 QLA8XXX_DEV_FAILED); 8726 ha->isp_ops->idc_unlock(ha); 8727 } 8728 ret = -ENODEV; 8729 goto remove_host; 8730 } 8731 8732 /* Startup the kernel thread for this host adapter. */ 8733 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8734 "qla4xxx_dpc\n", __func__)); 8735 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8736 ha->dpc_thread = create_singlethread_workqueue(buf); 8737 if (!ha->dpc_thread) { 8738 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8739 ret = -ENODEV; 8740 goto remove_host; 8741 } 8742 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8743 8744 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8745 ha->host_no); 8746 if (!ha->task_wq) { 8747 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8748 ret = -ENODEV; 8749 goto remove_host; 8750 } 8751 8752 /* 8753 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8754 * (which is called indirectly by qla4xxx_initialize_adapter), 8755 * so that irqs will be registered after crbinit but before 8756 * mbx_intr_enable. 8757 */ 8758 if (is_qla40XX(ha)) { 8759 ret = qla4xxx_request_irqs(ha); 8760 if (ret) { 8761 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8762 "interrupt %d already in use.\n", pdev->irq); 8763 goto remove_host; 8764 } 8765 } 8766 8767 pci_save_state(ha->pdev); 8768 ha->isp_ops->enable_intrs(ha); 8769 8770 /* Start timer thread. */ 8771 qla4xxx_start_timer(ha, qla4xxx_timer, 1); 8772 8773 set_bit(AF_INIT_DONE, &ha->flags); 8774 8775 qla4_8xxx_alloc_sysfs_attr(ha); 8776 8777 printk(KERN_INFO 8778 " QLogic iSCSI HBA Driver version: %s\n" 8779 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8780 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8781 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8782 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8783 8784 /* Set the driver version */ 8785 if (is_qla80XX(ha)) 8786 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8787 8788 if (qla4xxx_setup_boot_info(ha)) 8789 ql4_printk(KERN_ERR, ha, 8790 "%s: No iSCSI boot target configured\n", __func__); 8791 8792 if (qla4xxx_sysfs_ddb_export(ha)) 8793 ql4_printk(KERN_ERR, ha, 8794 "%s: Error exporting ddb to sysfs\n", __func__); 8795 8796 /* Perform the build ddb list and login to each */ 8797 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8798 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8799 qla4xxx_wait_login_resp_boot_tgt(ha); 8800 8801 qla4xxx_create_chap_list(ha); 8802 8803 qla4xxx_create_ifaces(ha); 8804 return 0; 8805 8806 remove_host: 8807 scsi_remove_host(ha->host); 8808 8809 probe_failed: 8810 qla4xxx_free_adapter(ha); 8811 8812 probe_failed_ioconfig: 8813 pci_disable_pcie_error_reporting(pdev); 8814 scsi_host_put(ha->host); 8815 8816 probe_disable_device: 8817 pci_disable_device(pdev); 8818 8819 return ret; 8820 } 8821 8822 /** 8823 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8824 * @ha: pointer to adapter structure 8825 * 8826 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8827 * so that the other port will not re-initialize while in the process of 8828 * removing the ha due to driver unload or hba hotplug. 8829 **/ 8830 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8831 { 8832 struct scsi_qla_host *other_ha = NULL; 8833 struct pci_dev *other_pdev = NULL; 8834 int fn = ISP4XXX_PCI_FN_2; 8835 8836 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8837 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8838 fn = ISP4XXX_PCI_FN_1; 8839 8840 other_pdev = 8841 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8842 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8843 fn)); 8844 8845 /* Get other_ha if other_pdev is valid and state is enable*/ 8846 if (other_pdev) { 8847 if (atomic_read(&other_pdev->enable_cnt)) { 8848 other_ha = pci_get_drvdata(other_pdev); 8849 if (other_ha) { 8850 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8851 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8852 "Prevent %s reinit\n", __func__, 8853 dev_name(&other_ha->pdev->dev))); 8854 } 8855 } 8856 pci_dev_put(other_pdev); 8857 } 8858 } 8859 8860 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8861 { 8862 struct ddb_entry *ddb_entry; 8863 int options; 8864 int idx; 8865 8866 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8867 8868 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8869 if ((ddb_entry != NULL) && 8870 (ddb_entry->ddb_type == FLASH_DDB)) { 8871 8872 options = LOGOUT_OPTION_CLOSE_SESSION; 8873 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) 8874 == QLA_ERROR) 8875 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", 8876 __func__); 8877 8878 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8879 /* 8880 * we have decremented the reference count of the driver 8881 * when we setup the session to have the driver unload 8882 * to be seamless without actually destroying the 8883 * session 8884 **/ 8885 try_module_get(qla4xxx_iscsi_transport.owner); 8886 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8887 qla4xxx_free_ddb(ha, ddb_entry); 8888 iscsi_session_teardown(ddb_entry->sess); 8889 } 8890 } 8891 } 8892 /** 8893 * qla4xxx_remove_adapter - callback function to remove adapter. 8894 * @pci_dev: PCI device pointer 8895 **/ 8896 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 8897 { 8898 struct scsi_qla_host *ha; 8899 8900 /* 8901 * If the PCI device is disabled then it means probe_adapter had 8902 * failed and resources already cleaned up on probe_adapter exit. 8903 */ 8904 if (!pci_is_enabled(pdev)) 8905 return; 8906 8907 ha = pci_get_drvdata(pdev); 8908 8909 if (is_qla40XX(ha)) 8910 qla4xxx_prevent_other_port_reinit(ha); 8911 8912 /* destroy iface from sysfs */ 8913 qla4xxx_destroy_ifaces(ha); 8914 8915 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 8916 iscsi_boot_destroy_kset(ha->boot_kset); 8917 8918 qla4xxx_destroy_fw_ddb_session(ha); 8919 qla4_8xxx_free_sysfs_attr(ha); 8920 8921 qla4xxx_sysfs_ddb_remove(ha); 8922 scsi_remove_host(ha->host); 8923 8924 qla4xxx_free_adapter(ha); 8925 8926 scsi_host_put(ha->host); 8927 8928 pci_disable_pcie_error_reporting(pdev); 8929 pci_disable_device(pdev); 8930 } 8931 8932 /** 8933 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 8934 * @ha: HA context 8935 * 8936 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 8937 * supported addressing method. 8938 */ 8939 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 8940 { 8941 int retval; 8942 8943 /* Update our PCI device dma_mask for full 64 bit mask */ 8944 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { 8945 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 8946 dev_dbg(&ha->pdev->dev, 8947 "Failed to set 64 bit PCI consistent mask; " 8948 "using 32 bit.\n"); 8949 retval = pci_set_consistent_dma_mask(ha->pdev, 8950 DMA_BIT_MASK(32)); 8951 } 8952 } else 8953 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 8954 } 8955 8956 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 8957 { 8958 struct iscsi_cls_session *cls_sess; 8959 struct iscsi_session *sess; 8960 struct ddb_entry *ddb; 8961 int queue_depth = QL4_DEF_QDEPTH; 8962 8963 cls_sess = starget_to_session(sdev->sdev_target); 8964 sess = cls_sess->dd_data; 8965 ddb = sess->dd_data; 8966 8967 sdev->hostdata = ddb; 8968 sdev->tagged_supported = 1; 8969 8970 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 8971 queue_depth = ql4xmaxqdepth; 8972 8973 scsi_activate_tcq(sdev, queue_depth); 8974 return 0; 8975 } 8976 8977 static int qla4xxx_slave_configure(struct scsi_device *sdev) 8978 { 8979 sdev->tagged_supported = 1; 8980 return 0; 8981 } 8982 8983 static void qla4xxx_slave_destroy(struct scsi_device *sdev) 8984 { 8985 scsi_deactivate_tcq(sdev, 1); 8986 } 8987 8988 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 8989 int reason) 8990 { 8991 if (!ql4xqfulltracking) 8992 return -EOPNOTSUPP; 8993 8994 return iscsi_change_queue_depth(sdev, qdepth, reason); 8995 } 8996 8997 /** 8998 * qla4xxx_del_from_active_array - returns an active srb 8999 * @ha: Pointer to host adapter structure. 9000 * @index: index into the active_array 9001 * 9002 * This routine removes and returns the srb at the specified index 9003 **/ 9004 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9005 uint32_t index) 9006 { 9007 struct srb *srb = NULL; 9008 struct scsi_cmnd *cmd = NULL; 9009 9010 cmd = scsi_host_find_tag(ha->host, index); 9011 if (!cmd) 9012 return srb; 9013 9014 srb = (struct srb *)CMD_SP(cmd); 9015 if (!srb) 9016 return srb; 9017 9018 /* update counters */ 9019 if (srb->flags & SRB_DMA_VALID) { 9020 ha->iocb_cnt -= srb->iocb_cnt; 9021 if (srb->cmd) 9022 srb->cmd->host_scribble = 9023 (unsigned char *)(unsigned long) MAX_SRBS; 9024 } 9025 return srb; 9026 } 9027 9028 /** 9029 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9030 * @ha: Pointer to host adapter structure. 9031 * @cmd: Scsi Command to wait on. 9032 * 9033 * This routine waits for the command to be returned by the Firmware 9034 * for some max time. 9035 **/ 9036 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9037 struct scsi_cmnd *cmd) 9038 { 9039 int done = 0; 9040 struct srb *rp; 9041 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9042 int ret = SUCCESS; 9043 9044 /* Dont wait on command if PCI error is being handled 9045 * by PCI AER driver 9046 */ 9047 if (unlikely(pci_channel_offline(ha->pdev)) || 9048 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9049 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9050 ha->host_no, __func__); 9051 return ret; 9052 } 9053 9054 do { 9055 /* Checking to see if its returned to OS */ 9056 rp = (struct srb *) CMD_SP(cmd); 9057 if (rp == NULL) { 9058 done++; 9059 break; 9060 } 9061 9062 msleep(2000); 9063 } while (max_wait_time--); 9064 9065 return done; 9066 } 9067 9068 /** 9069 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9070 * @ha: Pointer to host adapter structure 9071 **/ 9072 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9073 { 9074 unsigned long wait_online; 9075 9076 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9077 while (time_before(jiffies, wait_online)) { 9078 9079 if (adapter_up(ha)) 9080 return QLA_SUCCESS; 9081 9082 msleep(2000); 9083 } 9084 9085 return QLA_ERROR; 9086 } 9087 9088 /** 9089 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9090 * @ha: pointer to HBA 9091 * @t: target id 9092 * @l: lun id 9093 * 9094 * This function waits for all outstanding commands to a lun to complete. It 9095 * returns 0 if all pending commands are returned and 1 otherwise. 9096 **/ 9097 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9098 struct scsi_target *stgt, 9099 struct scsi_device *sdev) 9100 { 9101 int cnt; 9102 int status = 0; 9103 struct scsi_cmnd *cmd; 9104 9105 /* 9106 * Waiting for all commands for the designated target or dev 9107 * in the active array 9108 */ 9109 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9110 cmd = scsi_host_find_tag(ha->host, cnt); 9111 if (cmd && stgt == scsi_target(cmd->device) && 9112 (!sdev || sdev == cmd->device)) { 9113 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9114 status++; 9115 break; 9116 } 9117 } 9118 } 9119 return status; 9120 } 9121 9122 /** 9123 * qla4xxx_eh_abort - callback for abort task. 9124 * @cmd: Pointer to Linux's SCSI command structure 9125 * 9126 * This routine is called by the Linux OS to abort the specified 9127 * command. 9128 **/ 9129 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9130 { 9131 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9132 unsigned int id = cmd->device->id; 9133 unsigned int lun = cmd->device->lun; 9134 unsigned long flags; 9135 struct srb *srb = NULL; 9136 int ret = SUCCESS; 9137 int wait = 0; 9138 9139 ql4_printk(KERN_INFO, ha, 9140 "scsi%ld:%d:%d: Abort command issued cmd=%p\n", 9141 ha->host_no, id, lun, cmd); 9142 9143 spin_lock_irqsave(&ha->hardware_lock, flags); 9144 srb = (struct srb *) CMD_SP(cmd); 9145 if (!srb) { 9146 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9147 return SUCCESS; 9148 } 9149 kref_get(&srb->srb_ref); 9150 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9151 9152 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9153 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n", 9154 ha->host_no, id, lun)); 9155 ret = FAILED; 9156 } else { 9157 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n", 9158 ha->host_no, id, lun)); 9159 wait = 1; 9160 } 9161 9162 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9163 9164 /* Wait for command to complete */ 9165 if (wait) { 9166 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9167 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n", 9168 ha->host_no, id, lun)); 9169 ret = FAILED; 9170 } 9171 } 9172 9173 ql4_printk(KERN_INFO, ha, 9174 "scsi%ld:%d:%d: Abort command - %s\n", 9175 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9176 9177 return ret; 9178 } 9179 9180 /** 9181 * qla4xxx_eh_device_reset - callback for target reset. 9182 * @cmd: Pointer to Linux's SCSI command structure 9183 * 9184 * This routine is called by the Linux OS to reset all luns on the 9185 * specified target. 9186 **/ 9187 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9188 { 9189 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9190 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9191 int ret = FAILED, stat; 9192 9193 if (!ddb_entry) 9194 return ret; 9195 9196 ret = iscsi_block_scsi_eh(cmd); 9197 if (ret) 9198 return ret; 9199 ret = FAILED; 9200 9201 ql4_printk(KERN_INFO, ha, 9202 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no, 9203 cmd->device->channel, cmd->device->id, cmd->device->lun); 9204 9205 DEBUG2(printk(KERN_INFO 9206 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9207 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9208 cmd, jiffies, cmd->request->timeout / HZ, 9209 ha->dpc_flags, cmd->result, cmd->allowed)); 9210 9211 /* FIXME: wait for hba to go online */ 9212 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9213 if (stat != QLA_SUCCESS) { 9214 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9215 goto eh_dev_reset_done; 9216 } 9217 9218 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9219 cmd->device)) { 9220 ql4_printk(KERN_INFO, ha, 9221 "DEVICE RESET FAILED - waiting for " 9222 "commands.\n"); 9223 goto eh_dev_reset_done; 9224 } 9225 9226 /* Send marker. */ 9227 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9228 MM_LUN_RESET) != QLA_SUCCESS) 9229 goto eh_dev_reset_done; 9230 9231 ql4_printk(KERN_INFO, ha, 9232 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", 9233 ha->host_no, cmd->device->channel, cmd->device->id, 9234 cmd->device->lun); 9235 9236 ret = SUCCESS; 9237 9238 eh_dev_reset_done: 9239 9240 return ret; 9241 } 9242 9243 /** 9244 * qla4xxx_eh_target_reset - callback for target reset. 9245 * @cmd: Pointer to Linux's SCSI command structure 9246 * 9247 * This routine is called by the Linux OS to reset the target. 9248 **/ 9249 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9250 { 9251 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9252 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9253 int stat, ret; 9254 9255 if (!ddb_entry) 9256 return FAILED; 9257 9258 ret = iscsi_block_scsi_eh(cmd); 9259 if (ret) 9260 return ret; 9261 9262 starget_printk(KERN_INFO, scsi_target(cmd->device), 9263 "WARM TARGET RESET ISSUED.\n"); 9264 9265 DEBUG2(printk(KERN_INFO 9266 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9267 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9268 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9269 ha->dpc_flags, cmd->result, cmd->allowed)); 9270 9271 stat = qla4xxx_reset_target(ha, ddb_entry); 9272 if (stat != QLA_SUCCESS) { 9273 starget_printk(KERN_INFO, scsi_target(cmd->device), 9274 "WARM TARGET RESET FAILED.\n"); 9275 return FAILED; 9276 } 9277 9278 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9279 NULL)) { 9280 starget_printk(KERN_INFO, scsi_target(cmd->device), 9281 "WARM TARGET DEVICE RESET FAILED - " 9282 "waiting for commands.\n"); 9283 return FAILED; 9284 } 9285 9286 /* Send marker. */ 9287 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9288 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9289 starget_printk(KERN_INFO, scsi_target(cmd->device), 9290 "WARM TARGET DEVICE RESET FAILED - " 9291 "marker iocb failed.\n"); 9292 return FAILED; 9293 } 9294 9295 starget_printk(KERN_INFO, scsi_target(cmd->device), 9296 "WARM TARGET RESET SUCCEEDED.\n"); 9297 return SUCCESS; 9298 } 9299 9300 /** 9301 * qla4xxx_is_eh_active - check if error handler is running 9302 * @shost: Pointer to SCSI Host struct 9303 * 9304 * This routine finds that if reset host is called in EH 9305 * scenario or from some application like sg_reset 9306 **/ 9307 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9308 { 9309 if (shost->shost_state == SHOST_RECOVERY) 9310 return 1; 9311 return 0; 9312 } 9313 9314 /** 9315 * qla4xxx_eh_host_reset - kernel callback 9316 * @cmd: Pointer to Linux's SCSI command structure 9317 * 9318 * This routine is invoked by the Linux kernel to perform fatal error 9319 * recovery on the specified adapter. 9320 **/ 9321 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9322 { 9323 int return_status = FAILED; 9324 struct scsi_qla_host *ha; 9325 9326 ha = to_qla_host(cmd->device->host); 9327 9328 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9329 qla4_83xx_set_idc_dontreset(ha); 9330 9331 /* 9332 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9333 * protocol drivers, we should not set device_state to NEED_RESET 9334 */ 9335 if (ql4xdontresethba || 9336 ((is_qla8032(ha) || is_qla8042(ha)) && 9337 qla4_83xx_idc_dontreset(ha))) { 9338 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9339 ha->host_no, __func__)); 9340 9341 /* Clear outstanding srb in queues */ 9342 if (qla4xxx_is_eh_active(cmd->device->host)) 9343 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9344 9345 return FAILED; 9346 } 9347 9348 ql4_printk(KERN_INFO, ha, 9349 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no, 9350 cmd->device->channel, cmd->device->id, cmd->device->lun); 9351 9352 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9353 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9354 "DEAD.\n", ha->host_no, cmd->device->channel, 9355 __func__)); 9356 9357 return FAILED; 9358 } 9359 9360 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9361 if (is_qla80XX(ha)) 9362 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9363 else 9364 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9365 } 9366 9367 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9368 return_status = SUCCESS; 9369 9370 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9371 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9372 9373 return return_status; 9374 } 9375 9376 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9377 { 9378 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9379 uint32_t mbox_sts[MBOX_REG_COUNT]; 9380 struct addr_ctrl_blk_def *acb = NULL; 9381 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9382 int rval = QLA_SUCCESS; 9383 dma_addr_t acb_dma; 9384 9385 acb = dma_alloc_coherent(&ha->pdev->dev, 9386 sizeof(struct addr_ctrl_blk_def), 9387 &acb_dma, GFP_KERNEL); 9388 if (!acb) { 9389 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9390 __func__); 9391 rval = -ENOMEM; 9392 goto exit_port_reset; 9393 } 9394 9395 memset(acb, 0, acb_len); 9396 9397 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9398 if (rval != QLA_SUCCESS) { 9399 rval = -EIO; 9400 goto exit_free_acb; 9401 } 9402 9403 rval = qla4xxx_disable_acb(ha); 9404 if (rval != QLA_SUCCESS) { 9405 rval = -EIO; 9406 goto exit_free_acb; 9407 } 9408 9409 wait_for_completion_timeout(&ha->disable_acb_comp, 9410 DISABLE_ACB_TOV * HZ); 9411 9412 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9413 if (rval != QLA_SUCCESS) { 9414 rval = -EIO; 9415 goto exit_free_acb; 9416 } 9417 9418 exit_free_acb: 9419 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9420 acb, acb_dma); 9421 exit_port_reset: 9422 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9423 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9424 return rval; 9425 } 9426 9427 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9428 { 9429 struct scsi_qla_host *ha = to_qla_host(shost); 9430 int rval = QLA_SUCCESS; 9431 uint32_t idc_ctrl; 9432 9433 if (ql4xdontresethba) { 9434 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9435 __func__)); 9436 rval = -EPERM; 9437 goto exit_host_reset; 9438 } 9439 9440 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9441 goto recover_adapter; 9442 9443 switch (reset_type) { 9444 case SCSI_ADAPTER_RESET: 9445 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9446 break; 9447 case SCSI_FIRMWARE_RESET: 9448 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9449 if (is_qla80XX(ha)) 9450 /* set firmware context reset */ 9451 set_bit(DPC_RESET_HA_FW_CONTEXT, 9452 &ha->dpc_flags); 9453 else { 9454 rval = qla4xxx_context_reset(ha); 9455 goto exit_host_reset; 9456 } 9457 } 9458 break; 9459 } 9460 9461 recover_adapter: 9462 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9463 * reset is issued by application */ 9464 if ((is_qla8032(ha) || is_qla8042(ha)) && 9465 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9466 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9467 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9468 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9469 } 9470 9471 rval = qla4xxx_recover_adapter(ha); 9472 if (rval != QLA_SUCCESS) { 9473 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9474 __func__)); 9475 rval = -EIO; 9476 } 9477 9478 exit_host_reset: 9479 return rval; 9480 } 9481 9482 /* PCI AER driver recovers from all correctable errors w/o 9483 * driver intervention. For uncorrectable errors PCI AER 9484 * driver calls the following device driver's callbacks 9485 * 9486 * - Fatal Errors - link_reset 9487 * - Non-Fatal Errors - driver's pci_error_detected() which 9488 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9489 * 9490 * PCI AER driver calls 9491 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled 9492 * returns RECOVERED or NEED_RESET if fw_hung 9493 * NEED_RESET - driver's slot_reset() 9494 * DISCONNECT - device is dead & cannot recover 9495 * RECOVERED - driver's pci_resume() 9496 */ 9497 static pci_ers_result_t 9498 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9499 { 9500 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9501 9502 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9503 ha->host_no, __func__, state); 9504 9505 if (!is_aer_supported(ha)) 9506 return PCI_ERS_RESULT_NONE; 9507 9508 switch (state) { 9509 case pci_channel_io_normal: 9510 clear_bit(AF_EEH_BUSY, &ha->flags); 9511 return PCI_ERS_RESULT_CAN_RECOVER; 9512 case pci_channel_io_frozen: 9513 set_bit(AF_EEH_BUSY, &ha->flags); 9514 qla4xxx_mailbox_premature_completion(ha); 9515 qla4xxx_free_irqs(ha); 9516 pci_disable_device(pdev); 9517 /* Return back all IOs */ 9518 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9519 return PCI_ERS_RESULT_NEED_RESET; 9520 case pci_channel_io_perm_failure: 9521 set_bit(AF_EEH_BUSY, &ha->flags); 9522 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9523 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9524 return PCI_ERS_RESULT_DISCONNECT; 9525 } 9526 return PCI_ERS_RESULT_NEED_RESET; 9527 } 9528 9529 /** 9530 * qla4xxx_pci_mmio_enabled() gets called if 9531 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9532 * and read/write to the device still works. 9533 **/ 9534 static pci_ers_result_t 9535 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9536 { 9537 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9538 9539 if (!is_aer_supported(ha)) 9540 return PCI_ERS_RESULT_NONE; 9541 9542 return PCI_ERS_RESULT_RECOVERED; 9543 } 9544 9545 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9546 { 9547 uint32_t rval = QLA_ERROR; 9548 int fn; 9549 struct pci_dev *other_pdev = NULL; 9550 9551 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9552 9553 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9554 9555 if (test_bit(AF_ONLINE, &ha->flags)) { 9556 clear_bit(AF_ONLINE, &ha->flags); 9557 clear_bit(AF_LINK_UP, &ha->flags); 9558 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9559 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9560 } 9561 9562 fn = PCI_FUNC(ha->pdev->devfn); 9563 while (fn > 0) { 9564 fn--; 9565 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at " 9566 "func %x\n", ha->host_no, __func__, fn); 9567 /* Get the pci device given the domain, bus, 9568 * slot/function number */ 9569 other_pdev = 9570 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 9571 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9572 fn)); 9573 9574 if (!other_pdev) 9575 continue; 9576 9577 if (atomic_read(&other_pdev->enable_cnt)) { 9578 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI " 9579 "func in enabled state%x\n", ha->host_no, 9580 __func__, fn); 9581 pci_dev_put(other_pdev); 9582 break; 9583 } 9584 pci_dev_put(other_pdev); 9585 } 9586 9587 /* The first function on the card, the reset owner will 9588 * start & initialize the firmware. The other functions 9589 * on the card will reset the firmware context 9590 */ 9591 if (!fn) { 9592 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9593 "0x%x is the owner\n", ha->host_no, __func__, 9594 ha->pdev->devfn); 9595 9596 ha->isp_ops->idc_lock(ha); 9597 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9598 QLA8XXX_DEV_COLD); 9599 ha->isp_ops->idc_unlock(ha); 9600 9601 rval = qla4_8xxx_update_idc_reg(ha); 9602 if (rval == QLA_ERROR) { 9603 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9604 ha->host_no, __func__); 9605 ha->isp_ops->idc_lock(ha); 9606 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9607 QLA8XXX_DEV_FAILED); 9608 ha->isp_ops->idc_unlock(ha); 9609 goto exit_error_recovery; 9610 } 9611 9612 clear_bit(AF_FW_RECOVERY, &ha->flags); 9613 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9614 9615 if (rval != QLA_SUCCESS) { 9616 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9617 "FAILED\n", ha->host_no, __func__); 9618 ha->isp_ops->idc_lock(ha); 9619 qla4_8xxx_clear_drv_active(ha); 9620 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9621 QLA8XXX_DEV_FAILED); 9622 ha->isp_ops->idc_unlock(ha); 9623 } else { 9624 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9625 "READY\n", ha->host_no, __func__); 9626 ha->isp_ops->idc_lock(ha); 9627 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9628 QLA8XXX_DEV_READY); 9629 /* Clear driver state register */ 9630 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9631 qla4_8xxx_set_drv_active(ha); 9632 ha->isp_ops->idc_unlock(ha); 9633 ha->isp_ops->enable_intrs(ha); 9634 } 9635 } else { 9636 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9637 "the reset owner\n", ha->host_no, __func__, 9638 ha->pdev->devfn); 9639 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9640 QLA8XXX_DEV_READY)) { 9641 clear_bit(AF_FW_RECOVERY, &ha->flags); 9642 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9643 if (rval == QLA_SUCCESS) 9644 ha->isp_ops->enable_intrs(ha); 9645 9646 ha->isp_ops->idc_lock(ha); 9647 qla4_8xxx_set_drv_active(ha); 9648 ha->isp_ops->idc_unlock(ha); 9649 } 9650 } 9651 exit_error_recovery: 9652 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9653 return rval; 9654 } 9655 9656 static pci_ers_result_t 9657 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9658 { 9659 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9660 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9661 int rc; 9662 9663 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9664 ha->host_no, __func__); 9665 9666 if (!is_aer_supported(ha)) 9667 return PCI_ERS_RESULT_NONE; 9668 9669 /* Restore the saved state of PCIe device - 9670 * BAR registers, PCI Config space, PCIX, MSI, 9671 * IOV states 9672 */ 9673 pci_restore_state(pdev); 9674 9675 /* pci_restore_state() clears the saved_state flag of the device 9676 * save restored state which resets saved_state flag 9677 */ 9678 pci_save_state(pdev); 9679 9680 /* Initialize device or resume if in suspended state */ 9681 rc = pci_enable_device(pdev); 9682 if (rc) { 9683 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9684 "device after reset\n", ha->host_no, __func__); 9685 goto exit_slot_reset; 9686 } 9687 9688 ha->isp_ops->disable_intrs(ha); 9689 9690 if (is_qla80XX(ha)) { 9691 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9692 ret = PCI_ERS_RESULT_RECOVERED; 9693 goto exit_slot_reset; 9694 } else 9695 goto exit_slot_reset; 9696 } 9697 9698 exit_slot_reset: 9699 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9700 "device after reset\n", ha->host_no, __func__, ret); 9701 return ret; 9702 } 9703 9704 static void 9705 qla4xxx_pci_resume(struct pci_dev *pdev) 9706 { 9707 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9708 int ret; 9709 9710 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9711 ha->host_no, __func__); 9712 9713 ret = qla4xxx_wait_for_hba_online(ha); 9714 if (ret != QLA_SUCCESS) { 9715 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9716 "resume I/O from slot/link_reset\n", ha->host_no, 9717 __func__); 9718 } 9719 9720 pci_cleanup_aer_uncorrect_error_status(pdev); 9721 clear_bit(AF_EEH_BUSY, &ha->flags); 9722 } 9723 9724 static const struct pci_error_handlers qla4xxx_err_handler = { 9725 .error_detected = qla4xxx_pci_error_detected, 9726 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9727 .slot_reset = qla4xxx_pci_slot_reset, 9728 .resume = qla4xxx_pci_resume, 9729 }; 9730 9731 static struct pci_device_id qla4xxx_pci_tbl[] = { 9732 { 9733 .vendor = PCI_VENDOR_ID_QLOGIC, 9734 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9735 .subvendor = PCI_ANY_ID, 9736 .subdevice = PCI_ANY_ID, 9737 }, 9738 { 9739 .vendor = PCI_VENDOR_ID_QLOGIC, 9740 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9741 .subvendor = PCI_ANY_ID, 9742 .subdevice = PCI_ANY_ID, 9743 }, 9744 { 9745 .vendor = PCI_VENDOR_ID_QLOGIC, 9746 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9747 .subvendor = PCI_ANY_ID, 9748 .subdevice = PCI_ANY_ID, 9749 }, 9750 { 9751 .vendor = PCI_VENDOR_ID_QLOGIC, 9752 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9753 .subvendor = PCI_ANY_ID, 9754 .subdevice = PCI_ANY_ID, 9755 }, 9756 { 9757 .vendor = PCI_VENDOR_ID_QLOGIC, 9758 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9759 .subvendor = PCI_ANY_ID, 9760 .subdevice = PCI_ANY_ID, 9761 }, 9762 { 9763 .vendor = PCI_VENDOR_ID_QLOGIC, 9764 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9765 .subvendor = PCI_ANY_ID, 9766 .subdevice = PCI_ANY_ID, 9767 }, 9768 {0, 0}, 9769 }; 9770 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9771 9772 static struct pci_driver qla4xxx_pci_driver = { 9773 .name = DRIVER_NAME, 9774 .id_table = qla4xxx_pci_tbl, 9775 .probe = qla4xxx_probe_adapter, 9776 .remove = qla4xxx_remove_adapter, 9777 .err_handler = &qla4xxx_err_handler, 9778 }; 9779 9780 static int __init qla4xxx_module_init(void) 9781 { 9782 int ret; 9783 9784 /* Allocate cache for SRBs. */ 9785 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9786 SLAB_HWCACHE_ALIGN, NULL); 9787 if (srb_cachep == NULL) { 9788 printk(KERN_ERR 9789 "%s: Unable to allocate SRB cache..." 9790 "Failing load!\n", DRIVER_NAME); 9791 ret = -ENOMEM; 9792 goto no_srp_cache; 9793 } 9794 9795 /* Derive version string. */ 9796 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9797 if (ql4xextended_error_logging) 9798 strcat(qla4xxx_version_str, "-debug"); 9799 9800 qla4xxx_scsi_transport = 9801 iscsi_register_transport(&qla4xxx_iscsi_transport); 9802 if (!qla4xxx_scsi_transport){ 9803 ret = -ENODEV; 9804 goto release_srb_cache; 9805 } 9806 9807 ret = pci_register_driver(&qla4xxx_pci_driver); 9808 if (ret) 9809 goto unregister_transport; 9810 9811 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9812 return 0; 9813 9814 unregister_transport: 9815 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9816 release_srb_cache: 9817 kmem_cache_destroy(srb_cachep); 9818 no_srp_cache: 9819 return ret; 9820 } 9821 9822 static void __exit qla4xxx_module_exit(void) 9823 { 9824 pci_unregister_driver(&qla4xxx_pci_driver); 9825 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9826 kmem_cache_destroy(srb_cachep); 9827 } 9828 9829 module_init(qla4xxx_module_init); 9830 module_exit(qla4xxx_module_exit); 9831 9832 MODULE_AUTHOR("QLogic Corporation"); 9833 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9834 MODULE_LICENSE("GPL"); 9835 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9836