1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 #include <linux/moduleparam.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/iscsi_boot_sysfs.h> 11 #include <linux/inet.h> 12 13 #include <scsi/scsi_tcq.h> 14 #include <scsi/scsicam.h> 15 16 #include "ql4_def.h" 17 #include "ql4_version.h" 18 #include "ql4_glbl.h" 19 #include "ql4_dbg.h" 20 #include "ql4_inline.h" 21 #include "ql4_83xx.h" 22 23 /* 24 * Driver version 25 */ 26 static char qla4xxx_version_str[40]; 27 28 /* 29 * SRB allocation cache 30 */ 31 static struct kmem_cache *srb_cachep; 32 33 /* 34 * Module parameter information and variables 35 */ 36 static int ql4xdisablesysfsboot = 1; 37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(ql4xdisablesysfsboot, 39 " Set to disable exporting boot targets to sysfs.\n" 40 "\t\t 0 - Export boot targets\n" 41 "\t\t 1 - Do not export boot targets (Default)"); 42 43 int ql4xdontresethba; 44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 45 MODULE_PARM_DESC(ql4xdontresethba, 46 " Don't reset the HBA for driver recovery.\n" 47 "\t\t 0 - It will reset HBA (Default)\n" 48 "\t\t 1 - It will NOT reset HBA"); 49 50 int ql4xextended_error_logging; 51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 52 MODULE_PARM_DESC(ql4xextended_error_logging, 53 " Option to enable extended error logging.\n" 54 "\t\t 0 - no logging (Default)\n" 55 "\t\t 2 - debug logging"); 56 57 int ql4xenablemsix = 1; 58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 59 MODULE_PARM_DESC(ql4xenablemsix, 60 " Set to enable MSI or MSI-X interrupt mechanism.\n" 61 "\t\t 0 = enable INTx interrupt mechanism.\n" 62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 63 "\t\t 2 = enable MSI interrupt mechanism."); 64 65 #define QL4_DEF_QDEPTH 32 66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 68 MODULE_PARM_DESC(ql4xmaxqdepth, 69 " Maximum queue depth to report for target devices.\n" 70 "\t\t Default: 32."); 71 72 static int ql4xqfulltracking = 1; 73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(ql4xqfulltracking, 75 " Enable or disable dynamic tracking and adjustment of\n" 76 "\t\t scsi device queue depth.\n" 77 "\t\t 0 - Disable.\n" 78 "\t\t 1 - Enable. (Default)"); 79 80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 82 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 83 " Target Session Recovery Timeout.\n" 84 "\t\t Default: 120 sec."); 85 86 int ql4xmdcapmask = 0; 87 module_param(ql4xmdcapmask, int, S_IRUGO); 88 MODULE_PARM_DESC(ql4xmdcapmask, 89 " Set the Minidump driver capture mask level.\n" 90 "\t\t Default is 0 (firmware default capture mask)\n" 91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 92 93 int ql4xenablemd = 1; 94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 95 MODULE_PARM_DESC(ql4xenablemd, 96 " Set to enable minidump.\n" 97 "\t\t 0 - disable minidump\n" 98 "\t\t 1 - enable minidump (Default)"); 99 100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 101 /* 102 * SCSI host template entry points 103 */ 104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 105 106 /* 107 * iSCSI template entry points 108 */ 109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 110 enum iscsi_param param, char *buf); 111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 112 enum iscsi_param param, char *buf); 113 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 114 enum iscsi_host_param param, char *buf); 115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 116 uint32_t len); 117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 118 enum iscsi_param_type param_type, 119 int param, char *buf); 120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 122 struct sockaddr *dst_addr, 123 int non_blocking); 124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 127 enum iscsi_param param, char *buf); 128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 129 static struct iscsi_cls_conn * 130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 132 struct iscsi_cls_conn *cls_conn, 133 uint64_t transport_fd, int is_leading); 134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 135 static struct iscsi_cls_session * 136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 137 uint16_t qdepth, uint32_t initial_cmdsn); 138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 139 static void qla4xxx_task_work(struct work_struct *wdata); 140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 141 static int qla4xxx_task_xmit(struct iscsi_task *); 142 static void qla4xxx_task_cleanup(struct iscsi_task *); 143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 145 struct iscsi_stats *stats); 146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 147 uint32_t iface_type, uint32_t payload_size, 148 uint32_t pid, struct sockaddr *dst_addr); 149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 150 uint32_t *num_entries, char *buf); 151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 153 int len); 154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 155 156 /* 157 * SCSI host template entry points 158 */ 159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 164 static int qla4xxx_slave_alloc(struct scsi_device *device); 165 static umode_t qla4_attr_is_visible(int param_type, int param); 166 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 167 168 /* 169 * iSCSI Flash DDB sysfs entry points 170 */ 171 static int 172 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 173 struct iscsi_bus_flash_conn *fnode_conn, 174 void *data, int len); 175 static int 176 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 177 int param, char *buf); 178 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 179 int len); 180 static int 181 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 182 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 183 struct iscsi_bus_flash_conn *fnode_conn); 184 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 185 struct iscsi_bus_flash_conn *fnode_conn); 186 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 187 188 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 189 QLA82XX_LEGACY_INTR_CONFIG; 190 191 static struct scsi_host_template qla4xxx_driver_template = { 192 .module = THIS_MODULE, 193 .name = DRIVER_NAME, 194 .proc_name = DRIVER_NAME, 195 .queuecommand = qla4xxx_queuecommand, 196 197 .eh_abort_handler = qla4xxx_eh_abort, 198 .eh_device_reset_handler = qla4xxx_eh_device_reset, 199 .eh_target_reset_handler = qla4xxx_eh_target_reset, 200 .eh_host_reset_handler = qla4xxx_eh_host_reset, 201 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 202 203 .slave_alloc = qla4xxx_slave_alloc, 204 .change_queue_depth = scsi_change_queue_depth, 205 206 .this_id = -1, 207 .cmd_per_lun = 3, 208 .use_clustering = ENABLE_CLUSTERING, 209 .sg_tablesize = SG_ALL, 210 211 .max_sectors = 0xFFFF, 212 .shost_attrs = qla4xxx_host_attrs, 213 .host_reset = qla4xxx_host_reset, 214 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 215 }; 216 217 static struct iscsi_transport qla4xxx_iscsi_transport = { 218 .owner = THIS_MODULE, 219 .name = DRIVER_NAME, 220 .caps = CAP_TEXT_NEGO | 221 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 222 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 223 CAP_MULTI_R2T, 224 .attr_is_visible = qla4_attr_is_visible, 225 .create_session = qla4xxx_session_create, 226 .destroy_session = qla4xxx_session_destroy, 227 .start_conn = qla4xxx_conn_start, 228 .create_conn = qla4xxx_conn_create, 229 .bind_conn = qla4xxx_conn_bind, 230 .stop_conn = iscsi_conn_stop, 231 .destroy_conn = qla4xxx_conn_destroy, 232 .set_param = iscsi_set_param, 233 .get_conn_param = qla4xxx_conn_get_param, 234 .get_session_param = qla4xxx_session_get_param, 235 .get_ep_param = qla4xxx_get_ep_param, 236 .ep_connect = qla4xxx_ep_connect, 237 .ep_poll = qla4xxx_ep_poll, 238 .ep_disconnect = qla4xxx_ep_disconnect, 239 .get_stats = qla4xxx_conn_get_stats, 240 .send_pdu = iscsi_conn_send_pdu, 241 .xmit_task = qla4xxx_task_xmit, 242 .cleanup_task = qla4xxx_task_cleanup, 243 .alloc_pdu = qla4xxx_alloc_pdu, 244 245 .get_host_param = qla4xxx_host_get_param, 246 .set_iface_param = qla4xxx_iface_set_param, 247 .get_iface_param = qla4xxx_get_iface_param, 248 .bsg_request = qla4xxx_bsg_request, 249 .send_ping = qla4xxx_send_ping, 250 .get_chap = qla4xxx_get_chap_list, 251 .delete_chap = qla4xxx_delete_chap, 252 .set_chap = qla4xxx_set_chap_entry, 253 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 254 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 255 .new_flashnode = qla4xxx_sysfs_ddb_add, 256 .del_flashnode = qla4xxx_sysfs_ddb_delete, 257 .login_flashnode = qla4xxx_sysfs_ddb_login, 258 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 259 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 260 .get_host_stats = qla4xxx_get_host_stats, 261 }; 262 263 static struct scsi_transport_template *qla4xxx_scsi_transport; 264 265 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 266 uint32_t iface_type, uint32_t payload_size, 267 uint32_t pid, struct sockaddr *dst_addr) 268 { 269 struct scsi_qla_host *ha = to_qla_host(shost); 270 struct sockaddr_in *addr; 271 struct sockaddr_in6 *addr6; 272 uint32_t options = 0; 273 uint8_t ipaddr[IPv6_ADDR_LEN]; 274 int rval; 275 276 memset(ipaddr, 0, IPv6_ADDR_LEN); 277 /* IPv4 to IPv4 */ 278 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 279 (dst_addr->sa_family == AF_INET)) { 280 addr = (struct sockaddr_in *)dst_addr; 281 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 282 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 283 "dest: %pI4\n", __func__, 284 &ha->ip_config.ip_address, ipaddr)); 285 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 286 ipaddr); 287 if (rval) 288 rval = -EINVAL; 289 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 290 (dst_addr->sa_family == AF_INET6)) { 291 /* IPv6 to IPv6 */ 292 addr6 = (struct sockaddr_in6 *)dst_addr; 293 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 294 295 options |= PING_IPV6_PROTOCOL_ENABLE; 296 297 /* Ping using LinkLocal address */ 298 if ((iface_num == 0) || (iface_num == 1)) { 299 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 300 "src: %pI6 dest: %pI6\n", __func__, 301 &ha->ip_config.ipv6_link_local_addr, 302 ipaddr)); 303 options |= PING_IPV6_LINKLOCAL_ADDR; 304 rval = qla4xxx_ping_iocb(ha, options, payload_size, 305 pid, ipaddr); 306 } else { 307 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 308 "not supported\n", __func__, iface_num); 309 rval = -ENOSYS; 310 goto exit_send_ping; 311 } 312 313 /* 314 * If ping using LinkLocal address fails, try ping using 315 * IPv6 address 316 */ 317 if (rval != QLA_SUCCESS) { 318 options &= ~PING_IPV6_LINKLOCAL_ADDR; 319 if (iface_num == 0) { 320 options |= PING_IPV6_ADDR0; 321 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 322 "Ping src: %pI6 " 323 "dest: %pI6\n", __func__, 324 &ha->ip_config.ipv6_addr0, 325 ipaddr)); 326 } else if (iface_num == 1) { 327 options |= PING_IPV6_ADDR1; 328 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 329 "Ping src: %pI6 " 330 "dest: %pI6\n", __func__, 331 &ha->ip_config.ipv6_addr1, 332 ipaddr)); 333 } 334 rval = qla4xxx_ping_iocb(ha, options, payload_size, 335 pid, ipaddr); 336 if (rval) 337 rval = -EINVAL; 338 } 339 } else 340 rval = -ENOSYS; 341 exit_send_ping: 342 return rval; 343 } 344 345 static umode_t qla4_attr_is_visible(int param_type, int param) 346 { 347 switch (param_type) { 348 case ISCSI_HOST_PARAM: 349 switch (param) { 350 case ISCSI_HOST_PARAM_HWADDRESS: 351 case ISCSI_HOST_PARAM_IPADDRESS: 352 case ISCSI_HOST_PARAM_INITIATOR_NAME: 353 case ISCSI_HOST_PARAM_PORT_STATE: 354 case ISCSI_HOST_PARAM_PORT_SPEED: 355 return S_IRUGO; 356 default: 357 return 0; 358 } 359 case ISCSI_PARAM: 360 switch (param) { 361 case ISCSI_PARAM_PERSISTENT_ADDRESS: 362 case ISCSI_PARAM_PERSISTENT_PORT: 363 case ISCSI_PARAM_CONN_ADDRESS: 364 case ISCSI_PARAM_CONN_PORT: 365 case ISCSI_PARAM_TARGET_NAME: 366 case ISCSI_PARAM_TPGT: 367 case ISCSI_PARAM_TARGET_ALIAS: 368 case ISCSI_PARAM_MAX_BURST: 369 case ISCSI_PARAM_MAX_R2T: 370 case ISCSI_PARAM_FIRST_BURST: 371 case ISCSI_PARAM_MAX_RECV_DLENGTH: 372 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 373 case ISCSI_PARAM_IFACE_NAME: 374 case ISCSI_PARAM_CHAP_OUT_IDX: 375 case ISCSI_PARAM_CHAP_IN_IDX: 376 case ISCSI_PARAM_USERNAME: 377 case ISCSI_PARAM_PASSWORD: 378 case ISCSI_PARAM_USERNAME_IN: 379 case ISCSI_PARAM_PASSWORD_IN: 380 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 381 case ISCSI_PARAM_DISCOVERY_SESS: 382 case ISCSI_PARAM_PORTAL_TYPE: 383 case ISCSI_PARAM_CHAP_AUTH_EN: 384 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 385 case ISCSI_PARAM_BIDI_CHAP_EN: 386 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 387 case ISCSI_PARAM_DEF_TIME2WAIT: 388 case ISCSI_PARAM_DEF_TIME2RETAIN: 389 case ISCSI_PARAM_HDRDGST_EN: 390 case ISCSI_PARAM_DATADGST_EN: 391 case ISCSI_PARAM_INITIAL_R2T_EN: 392 case ISCSI_PARAM_IMM_DATA_EN: 393 case ISCSI_PARAM_PDU_INORDER_EN: 394 case ISCSI_PARAM_DATASEQ_INORDER_EN: 395 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 396 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 397 case ISCSI_PARAM_TCP_WSF_DISABLE: 398 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 399 case ISCSI_PARAM_TCP_TIMER_SCALE: 400 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 401 case ISCSI_PARAM_TCP_XMIT_WSF: 402 case ISCSI_PARAM_TCP_RECV_WSF: 403 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 404 case ISCSI_PARAM_IPV4_TOS: 405 case ISCSI_PARAM_IPV6_TC: 406 case ISCSI_PARAM_IPV6_FLOW_LABEL: 407 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 408 case ISCSI_PARAM_KEEPALIVE_TMO: 409 case ISCSI_PARAM_LOCAL_PORT: 410 case ISCSI_PARAM_ISID: 411 case ISCSI_PARAM_TSID: 412 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 413 case ISCSI_PARAM_ERL: 414 case ISCSI_PARAM_STATSN: 415 case ISCSI_PARAM_EXP_STATSN: 416 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 417 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 418 case ISCSI_PARAM_LOCAL_IPADDR: 419 return S_IRUGO; 420 default: 421 return 0; 422 } 423 case ISCSI_NET_PARAM: 424 switch (param) { 425 case ISCSI_NET_PARAM_IPV4_ADDR: 426 case ISCSI_NET_PARAM_IPV4_SUBNET: 427 case ISCSI_NET_PARAM_IPV4_GW: 428 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 429 case ISCSI_NET_PARAM_IFACE_ENABLE: 430 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 431 case ISCSI_NET_PARAM_IPV6_ADDR: 432 case ISCSI_NET_PARAM_IPV6_ROUTER: 433 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 434 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 435 case ISCSI_NET_PARAM_VLAN_ID: 436 case ISCSI_NET_PARAM_VLAN_PRIORITY: 437 case ISCSI_NET_PARAM_VLAN_ENABLED: 438 case ISCSI_NET_PARAM_MTU: 439 case ISCSI_NET_PARAM_PORT: 440 case ISCSI_NET_PARAM_IPADDR_STATE: 441 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 442 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 443 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 444 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 445 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 446 case ISCSI_NET_PARAM_TCP_WSF: 447 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 448 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 449 case ISCSI_NET_PARAM_CACHE_ID: 450 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 451 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 452 case ISCSI_NET_PARAM_IPV4_TOS_EN: 453 case ISCSI_NET_PARAM_IPV4_TOS: 454 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 455 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 456 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 457 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 458 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 459 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 460 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 461 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 462 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 463 case ISCSI_NET_PARAM_REDIRECT_EN: 464 case ISCSI_NET_PARAM_IPV4_TTL: 465 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 466 case ISCSI_NET_PARAM_IPV6_MLD_EN: 467 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 468 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 469 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 470 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 471 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 472 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 473 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 474 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 475 return S_IRUGO; 476 default: 477 return 0; 478 } 479 case ISCSI_IFACE_PARAM: 480 switch (param) { 481 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 482 case ISCSI_IFACE_PARAM_HDRDGST_EN: 483 case ISCSI_IFACE_PARAM_DATADGST_EN: 484 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 485 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 486 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 487 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 488 case ISCSI_IFACE_PARAM_ERL: 489 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 490 case ISCSI_IFACE_PARAM_FIRST_BURST: 491 case ISCSI_IFACE_PARAM_MAX_R2T: 492 case ISCSI_IFACE_PARAM_MAX_BURST: 493 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 494 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 495 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 496 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 497 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 498 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 499 return S_IRUGO; 500 default: 501 return 0; 502 } 503 case ISCSI_FLASHNODE_PARAM: 504 switch (param) { 505 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 506 case ISCSI_FLASHNODE_PORTAL_TYPE: 507 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 508 case ISCSI_FLASHNODE_DISCOVERY_SESS: 509 case ISCSI_FLASHNODE_ENTRY_EN: 510 case ISCSI_FLASHNODE_HDR_DGST_EN: 511 case ISCSI_FLASHNODE_DATA_DGST_EN: 512 case ISCSI_FLASHNODE_IMM_DATA_EN: 513 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 514 case ISCSI_FLASHNODE_DATASEQ_INORDER: 515 case ISCSI_FLASHNODE_PDU_INORDER: 516 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 517 case ISCSI_FLASHNODE_SNACK_REQ_EN: 518 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 519 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 520 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 521 case ISCSI_FLASHNODE_ERL: 522 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 523 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 524 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 525 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 526 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 527 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 528 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 529 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 530 case ISCSI_FLASHNODE_FIRST_BURST: 531 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 532 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 533 case ISCSI_FLASHNODE_MAX_R2T: 534 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 535 case ISCSI_FLASHNODE_ISID: 536 case ISCSI_FLASHNODE_TSID: 537 case ISCSI_FLASHNODE_PORT: 538 case ISCSI_FLASHNODE_MAX_BURST: 539 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 540 case ISCSI_FLASHNODE_IPADDR: 541 case ISCSI_FLASHNODE_ALIAS: 542 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 543 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 544 case ISCSI_FLASHNODE_LOCAL_PORT: 545 case ISCSI_FLASHNODE_IPV4_TOS: 546 case ISCSI_FLASHNODE_IPV6_TC: 547 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 548 case ISCSI_FLASHNODE_NAME: 549 case ISCSI_FLASHNODE_TPGT: 550 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 551 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 552 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 553 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 554 case ISCSI_FLASHNODE_TCP_RECV_WSF: 555 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 556 case ISCSI_FLASHNODE_USERNAME: 557 case ISCSI_FLASHNODE_PASSWORD: 558 case ISCSI_FLASHNODE_STATSN: 559 case ISCSI_FLASHNODE_EXP_STATSN: 560 case ISCSI_FLASHNODE_IS_BOOT_TGT: 561 return S_IRUGO; 562 default: 563 return 0; 564 } 565 } 566 567 return 0; 568 } 569 570 /** 571 * qla4xxx_create chap_list - Create CHAP list from FLASH 572 * @ha: pointer to adapter structure 573 * 574 * Read flash and make a list of CHAP entries, during login when a CHAP entry 575 * is received, it will be checked in this list. If entry exist then the CHAP 576 * entry index is set in the DDB. If CHAP entry does not exist in this list 577 * then a new entry is added in FLASH in CHAP table and the index obtained is 578 * used in the DDB. 579 **/ 580 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 581 { 582 int rval = 0; 583 uint8_t *chap_flash_data = NULL; 584 uint32_t offset; 585 dma_addr_t chap_dma; 586 uint32_t chap_size = 0; 587 588 if (is_qla40XX(ha)) 589 chap_size = MAX_CHAP_ENTRIES_40XX * 590 sizeof(struct ql4_chap_table); 591 else /* Single region contains CHAP info for both 592 * ports which is divided into half for each port. 593 */ 594 chap_size = ha->hw.flt_chap_size / 2; 595 596 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 597 &chap_dma, GFP_KERNEL); 598 if (!chap_flash_data) { 599 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 600 return; 601 } 602 603 if (is_qla40XX(ha)) { 604 offset = FLASH_CHAP_OFFSET; 605 } else { 606 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 607 if (ha->port_num == 1) 608 offset += chap_size; 609 } 610 611 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 612 if (rval != QLA_SUCCESS) 613 goto exit_chap_list; 614 615 if (ha->chap_list == NULL) 616 ha->chap_list = vmalloc(chap_size); 617 if (ha->chap_list == NULL) { 618 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 619 goto exit_chap_list; 620 } 621 622 memset(ha->chap_list, 0, chap_size); 623 memcpy(ha->chap_list, chap_flash_data, chap_size); 624 625 exit_chap_list: 626 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 627 } 628 629 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 630 int16_t chap_index, 631 struct ql4_chap_table **chap_entry) 632 { 633 int rval = QLA_ERROR; 634 int max_chap_entries; 635 636 if (!ha->chap_list) { 637 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 638 rval = QLA_ERROR; 639 goto exit_get_chap; 640 } 641 642 if (is_qla80XX(ha)) 643 max_chap_entries = (ha->hw.flt_chap_size / 2) / 644 sizeof(struct ql4_chap_table); 645 else 646 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 647 648 if (chap_index > max_chap_entries) { 649 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 650 rval = QLA_ERROR; 651 goto exit_get_chap; 652 } 653 654 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 655 if ((*chap_entry)->cookie != 656 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 657 rval = QLA_ERROR; 658 *chap_entry = NULL; 659 } else { 660 rval = QLA_SUCCESS; 661 } 662 663 exit_get_chap: 664 return rval; 665 } 666 667 /** 668 * qla4xxx_find_free_chap_index - Find the first free chap index 669 * @ha: pointer to adapter structure 670 * @chap_index: CHAP index to be returned 671 * 672 * Find the first free chap index available in the chap table 673 * 674 * Note: Caller should acquire the chap lock before getting here. 675 **/ 676 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 677 uint16_t *chap_index) 678 { 679 int i, rval; 680 int free_index = -1; 681 int max_chap_entries = 0; 682 struct ql4_chap_table *chap_table; 683 684 if (is_qla80XX(ha)) 685 max_chap_entries = (ha->hw.flt_chap_size / 2) / 686 sizeof(struct ql4_chap_table); 687 else 688 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 689 690 if (!ha->chap_list) { 691 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 692 rval = QLA_ERROR; 693 goto exit_find_chap; 694 } 695 696 for (i = 0; i < max_chap_entries; i++) { 697 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 698 699 if ((chap_table->cookie != 700 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 701 (i > MAX_RESRV_CHAP_IDX)) { 702 free_index = i; 703 break; 704 } 705 } 706 707 if (free_index != -1) { 708 *chap_index = free_index; 709 rval = QLA_SUCCESS; 710 } else { 711 rval = QLA_ERROR; 712 } 713 714 exit_find_chap: 715 return rval; 716 } 717 718 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 719 uint32_t *num_entries, char *buf) 720 { 721 struct scsi_qla_host *ha = to_qla_host(shost); 722 struct ql4_chap_table *chap_table; 723 struct iscsi_chap_rec *chap_rec; 724 int max_chap_entries = 0; 725 int valid_chap_entries = 0; 726 int ret = 0, i; 727 728 if (is_qla80XX(ha)) 729 max_chap_entries = (ha->hw.flt_chap_size / 2) / 730 sizeof(struct ql4_chap_table); 731 else 732 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 733 734 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 735 __func__, *num_entries, chap_tbl_idx); 736 737 if (!buf) { 738 ret = -ENOMEM; 739 goto exit_get_chap_list; 740 } 741 742 qla4xxx_create_chap_list(ha); 743 744 chap_rec = (struct iscsi_chap_rec *) buf; 745 mutex_lock(&ha->chap_sem); 746 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 747 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 748 if (chap_table->cookie != 749 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 750 continue; 751 752 chap_rec->chap_tbl_idx = i; 753 strlcpy(chap_rec->username, chap_table->name, 754 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 755 strlcpy(chap_rec->password, chap_table->secret, 756 QL4_CHAP_MAX_SECRET_LEN); 757 chap_rec->password_length = chap_table->secret_len; 758 759 if (chap_table->flags & BIT_7) /* local */ 760 chap_rec->chap_type = CHAP_TYPE_OUT; 761 762 if (chap_table->flags & BIT_6) /* peer */ 763 chap_rec->chap_type = CHAP_TYPE_IN; 764 765 chap_rec++; 766 767 valid_chap_entries++; 768 if (valid_chap_entries == *num_entries) 769 break; 770 else 771 continue; 772 } 773 mutex_unlock(&ha->chap_sem); 774 775 exit_get_chap_list: 776 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 777 __func__, valid_chap_entries); 778 *num_entries = valid_chap_entries; 779 return ret; 780 } 781 782 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 783 { 784 int ret = 0; 785 uint16_t *chap_tbl_idx = (uint16_t *) data; 786 struct iscsi_cls_session *cls_session; 787 struct iscsi_session *sess; 788 struct ddb_entry *ddb_entry; 789 790 if (!iscsi_is_session_dev(dev)) 791 goto exit_is_chap_active; 792 793 cls_session = iscsi_dev_to_session(dev); 794 sess = cls_session->dd_data; 795 ddb_entry = sess->dd_data; 796 797 if (iscsi_session_chkready(cls_session)) 798 goto exit_is_chap_active; 799 800 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 801 ret = 1; 802 803 exit_is_chap_active: 804 return ret; 805 } 806 807 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 808 uint16_t chap_tbl_idx) 809 { 810 int ret = 0; 811 812 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 813 __qla4xxx_is_chap_active); 814 815 return ret; 816 } 817 818 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 819 { 820 struct scsi_qla_host *ha = to_qla_host(shost); 821 struct ql4_chap_table *chap_table; 822 dma_addr_t chap_dma; 823 int max_chap_entries = 0; 824 uint32_t offset = 0; 825 uint32_t chap_size; 826 int ret = 0; 827 828 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 829 if (chap_table == NULL) 830 return -ENOMEM; 831 832 memset(chap_table, 0, sizeof(struct ql4_chap_table)); 833 834 if (is_qla80XX(ha)) 835 max_chap_entries = (ha->hw.flt_chap_size / 2) / 836 sizeof(struct ql4_chap_table); 837 else 838 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 839 840 if (chap_tbl_idx > max_chap_entries) { 841 ret = -EINVAL; 842 goto exit_delete_chap; 843 } 844 845 /* Check if chap index is in use. 846 * If chap is in use don't delet chap entry */ 847 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 848 if (ret) { 849 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 850 "delete from flash\n", chap_tbl_idx); 851 ret = -EBUSY; 852 goto exit_delete_chap; 853 } 854 855 chap_size = sizeof(struct ql4_chap_table); 856 if (is_qla40XX(ha)) 857 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 858 else { 859 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 860 /* flt_chap_size is CHAP table size for both ports 861 * so divide it by 2 to calculate the offset for second port 862 */ 863 if (ha->port_num == 1) 864 offset += (ha->hw.flt_chap_size / 2); 865 offset += (chap_tbl_idx * chap_size); 866 } 867 868 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 869 if (ret != QLA_SUCCESS) { 870 ret = -EINVAL; 871 goto exit_delete_chap; 872 } 873 874 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 875 __le16_to_cpu(chap_table->cookie))); 876 877 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 878 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 879 goto exit_delete_chap; 880 } 881 882 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 883 884 offset = FLASH_CHAP_OFFSET | 885 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 886 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 887 FLASH_OPT_RMW_COMMIT); 888 if (ret == QLA_SUCCESS && ha->chap_list) { 889 mutex_lock(&ha->chap_sem); 890 /* Update ha chap_list cache */ 891 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 892 chap_table, sizeof(struct ql4_chap_table)); 893 mutex_unlock(&ha->chap_sem); 894 } 895 if (ret != QLA_SUCCESS) 896 ret = -EINVAL; 897 898 exit_delete_chap: 899 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 900 return ret; 901 } 902 903 /** 904 * qla4xxx_set_chap_entry - Make chap entry with given information 905 * @shost: pointer to host 906 * @data: chap info - credentials, index and type to make chap entry 907 * @len: length of data 908 * 909 * Add or update chap entry with the given information 910 **/ 911 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 912 { 913 struct scsi_qla_host *ha = to_qla_host(shost); 914 struct iscsi_chap_rec chap_rec; 915 struct ql4_chap_table *chap_entry = NULL; 916 struct iscsi_param_info *param_info; 917 struct nlattr *attr; 918 int max_chap_entries = 0; 919 int type; 920 int rem = len; 921 int rc = 0; 922 int size; 923 924 memset(&chap_rec, 0, sizeof(chap_rec)); 925 926 nla_for_each_attr(attr, data, len, rem) { 927 param_info = nla_data(attr); 928 929 switch (param_info->param) { 930 case ISCSI_CHAP_PARAM_INDEX: 931 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 932 break; 933 case ISCSI_CHAP_PARAM_CHAP_TYPE: 934 chap_rec.chap_type = param_info->value[0]; 935 break; 936 case ISCSI_CHAP_PARAM_USERNAME: 937 size = min_t(size_t, sizeof(chap_rec.username), 938 param_info->len); 939 memcpy(chap_rec.username, param_info->value, size); 940 break; 941 case ISCSI_CHAP_PARAM_PASSWORD: 942 size = min_t(size_t, sizeof(chap_rec.password), 943 param_info->len); 944 memcpy(chap_rec.password, param_info->value, size); 945 break; 946 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 947 chap_rec.password_length = param_info->value[0]; 948 break; 949 default: 950 ql4_printk(KERN_ERR, ha, 951 "%s: No such sysfs attribute\n", __func__); 952 rc = -ENOSYS; 953 goto exit_set_chap; 954 }; 955 } 956 957 if (chap_rec.chap_type == CHAP_TYPE_IN) 958 type = BIDI_CHAP; 959 else 960 type = LOCAL_CHAP; 961 962 if (is_qla80XX(ha)) 963 max_chap_entries = (ha->hw.flt_chap_size / 2) / 964 sizeof(struct ql4_chap_table); 965 else 966 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 967 968 mutex_lock(&ha->chap_sem); 969 if (chap_rec.chap_tbl_idx < max_chap_entries) { 970 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 971 &chap_entry); 972 if (!rc) { 973 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 974 ql4_printk(KERN_INFO, ha, 975 "Type mismatch for CHAP entry %d\n", 976 chap_rec.chap_tbl_idx); 977 rc = -EINVAL; 978 goto exit_unlock_chap; 979 } 980 981 /* If chap index is in use then don't modify it */ 982 rc = qla4xxx_is_chap_active(shost, 983 chap_rec.chap_tbl_idx); 984 if (rc) { 985 ql4_printk(KERN_INFO, ha, 986 "CHAP entry %d is in use\n", 987 chap_rec.chap_tbl_idx); 988 rc = -EBUSY; 989 goto exit_unlock_chap; 990 } 991 } 992 } else { 993 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 994 if (rc) { 995 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 996 rc = -EBUSY; 997 goto exit_unlock_chap; 998 } 999 } 1000 1001 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1002 chap_rec.chap_tbl_idx, type); 1003 1004 exit_unlock_chap: 1005 mutex_unlock(&ha->chap_sem); 1006 1007 exit_set_chap: 1008 return rc; 1009 } 1010 1011 1012 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1013 { 1014 struct scsi_qla_host *ha = to_qla_host(shost); 1015 struct iscsi_offload_host_stats *host_stats = NULL; 1016 int host_stats_size; 1017 int ret = 0; 1018 int ddb_idx = 0; 1019 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1020 int stats_size; 1021 dma_addr_t iscsi_stats_dma; 1022 1023 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1024 1025 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1026 1027 if (host_stats_size != len) { 1028 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1029 __func__, len, host_stats_size); 1030 ret = -EINVAL; 1031 goto exit_host_stats; 1032 } 1033 host_stats = (struct iscsi_offload_host_stats *)buf; 1034 1035 if (!buf) { 1036 ret = -ENOMEM; 1037 goto exit_host_stats; 1038 } 1039 1040 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1041 1042 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1043 &iscsi_stats_dma, GFP_KERNEL); 1044 if (!ql_iscsi_stats) { 1045 ql4_printk(KERN_ERR, ha, 1046 "Unable to allocate memory for iscsi stats\n"); 1047 ret = -ENOMEM; 1048 goto exit_host_stats; 1049 } 1050 1051 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1052 iscsi_stats_dma); 1053 if (ret != QLA_SUCCESS) { 1054 ql4_printk(KERN_ERR, ha, 1055 "Unable to retrieve iscsi stats\n"); 1056 ret = -EIO; 1057 goto exit_host_stats; 1058 } 1059 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1060 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1061 host_stats->mactx_multicast_frames = 1062 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1063 host_stats->mactx_broadcast_frames = 1064 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1065 host_stats->mactx_pause_frames = 1066 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1067 host_stats->mactx_control_frames = 1068 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1069 host_stats->mactx_deferral = 1070 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1071 host_stats->mactx_excess_deferral = 1072 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1073 host_stats->mactx_late_collision = 1074 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1075 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1076 host_stats->mactx_single_collision = 1077 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1078 host_stats->mactx_multiple_collision = 1079 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1080 host_stats->mactx_collision = 1081 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1082 host_stats->mactx_frames_dropped = 1083 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1084 host_stats->mactx_jumbo_frames = 1085 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1086 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1087 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1088 host_stats->macrx_unknown_control_frames = 1089 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1090 host_stats->macrx_pause_frames = 1091 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1092 host_stats->macrx_control_frames = 1093 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1094 host_stats->macrx_dribble = 1095 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1096 host_stats->macrx_frame_length_error = 1097 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1098 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1099 host_stats->macrx_carrier_sense_error = 1100 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1101 host_stats->macrx_frame_discarded = 1102 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1103 host_stats->macrx_frames_dropped = 1104 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1105 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1106 host_stats->mac_encoding_error = 1107 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1108 host_stats->macrx_length_error_large = 1109 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1110 host_stats->macrx_length_error_small = 1111 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1112 host_stats->macrx_multicast_frames = 1113 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1114 host_stats->macrx_broadcast_frames = 1115 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1116 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1117 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1118 host_stats->iptx_fragments = 1119 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1120 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1121 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1122 host_stats->iprx_fragments = 1123 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1124 host_stats->ip_datagram_reassembly = 1125 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1126 host_stats->ip_invalid_address_error = 1127 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1128 host_stats->ip_error_packets = 1129 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1130 host_stats->ip_fragrx_overlap = 1131 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1132 host_stats->ip_fragrx_outoforder = 1133 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1134 host_stats->ip_datagram_reassembly_timeout = 1135 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1136 host_stats->ipv6tx_packets = 1137 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1138 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1139 host_stats->ipv6tx_fragments = 1140 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1141 host_stats->ipv6rx_packets = 1142 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1143 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1144 host_stats->ipv6rx_fragments = 1145 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1146 host_stats->ipv6_datagram_reassembly = 1147 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1148 host_stats->ipv6_invalid_address_error = 1149 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1150 host_stats->ipv6_error_packets = 1151 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1152 host_stats->ipv6_fragrx_overlap = 1153 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1154 host_stats->ipv6_fragrx_outoforder = 1155 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1156 host_stats->ipv6_datagram_reassembly_timeout = 1157 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1158 host_stats->tcptx_segments = 1159 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1160 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1161 host_stats->tcprx_segments = 1162 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1163 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1164 host_stats->tcp_duplicate_ack_retx = 1165 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1166 host_stats->tcp_retx_timer_expired = 1167 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1168 host_stats->tcprx_duplicate_ack = 1169 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1170 host_stats->tcprx_pure_ackr = 1171 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1172 host_stats->tcptx_delayed_ack = 1173 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1174 host_stats->tcptx_pure_ack = 1175 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1176 host_stats->tcprx_segment_error = 1177 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1178 host_stats->tcprx_segment_outoforder = 1179 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1180 host_stats->tcprx_window_probe = 1181 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1182 host_stats->tcprx_window_update = 1183 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1184 host_stats->tcptx_window_probe_persist = 1185 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1186 host_stats->ecc_error_correction = 1187 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1188 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1189 host_stats->iscsi_data_bytes_tx = 1190 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1191 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1192 host_stats->iscsi_data_bytes_rx = 1193 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1194 host_stats->iscsi_io_completed = 1195 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1196 host_stats->iscsi_unexpected_io_rx = 1197 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1198 host_stats->iscsi_format_error = 1199 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1200 host_stats->iscsi_hdr_digest_error = 1201 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1202 host_stats->iscsi_data_digest_error = 1203 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1204 host_stats->iscsi_sequence_error = 1205 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1206 exit_host_stats: 1207 if (ql_iscsi_stats) 1208 dma_free_coherent(&ha->pdev->dev, host_stats_size, 1209 ql_iscsi_stats, iscsi_stats_dma); 1210 1211 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1212 __func__); 1213 return ret; 1214 } 1215 1216 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1217 enum iscsi_param_type param_type, 1218 int param, char *buf) 1219 { 1220 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1221 struct scsi_qla_host *ha = to_qla_host(shost); 1222 int ival; 1223 char *pval = NULL; 1224 int len = -ENOSYS; 1225 1226 if (param_type == ISCSI_NET_PARAM) { 1227 switch (param) { 1228 case ISCSI_NET_PARAM_IPV4_ADDR: 1229 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1230 break; 1231 case ISCSI_NET_PARAM_IPV4_SUBNET: 1232 len = sprintf(buf, "%pI4\n", 1233 &ha->ip_config.subnet_mask); 1234 break; 1235 case ISCSI_NET_PARAM_IPV4_GW: 1236 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1237 break; 1238 case ISCSI_NET_PARAM_IFACE_ENABLE: 1239 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1240 OP_STATE(ha->ip_config.ipv4_options, 1241 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1242 } else { 1243 OP_STATE(ha->ip_config.ipv6_options, 1244 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1245 } 1246 1247 len = sprintf(buf, "%s\n", pval); 1248 break; 1249 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1250 len = sprintf(buf, "%s\n", 1251 (ha->ip_config.tcp_options & 1252 TCPOPT_DHCP_ENABLE) ? 1253 "dhcp" : "static"); 1254 break; 1255 case ISCSI_NET_PARAM_IPV6_ADDR: 1256 if (iface->iface_num == 0) 1257 len = sprintf(buf, "%pI6\n", 1258 &ha->ip_config.ipv6_addr0); 1259 if (iface->iface_num == 1) 1260 len = sprintf(buf, "%pI6\n", 1261 &ha->ip_config.ipv6_addr1); 1262 break; 1263 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1264 len = sprintf(buf, "%pI6\n", 1265 &ha->ip_config.ipv6_link_local_addr); 1266 break; 1267 case ISCSI_NET_PARAM_IPV6_ROUTER: 1268 len = sprintf(buf, "%pI6\n", 1269 &ha->ip_config.ipv6_default_router_addr); 1270 break; 1271 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1272 pval = (ha->ip_config.ipv6_addl_options & 1273 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1274 "nd" : "static"; 1275 1276 len = sprintf(buf, "%s\n", pval); 1277 break; 1278 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1279 pval = (ha->ip_config.ipv6_addl_options & 1280 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1281 "auto" : "static"; 1282 1283 len = sprintf(buf, "%s\n", pval); 1284 break; 1285 case ISCSI_NET_PARAM_VLAN_ID: 1286 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1287 ival = ha->ip_config.ipv4_vlan_tag & 1288 ISCSI_MAX_VLAN_ID; 1289 else 1290 ival = ha->ip_config.ipv6_vlan_tag & 1291 ISCSI_MAX_VLAN_ID; 1292 1293 len = sprintf(buf, "%d\n", ival); 1294 break; 1295 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1296 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1297 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1298 ISCSI_MAX_VLAN_PRIORITY; 1299 else 1300 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1301 ISCSI_MAX_VLAN_PRIORITY; 1302 1303 len = sprintf(buf, "%d\n", ival); 1304 break; 1305 case ISCSI_NET_PARAM_VLAN_ENABLED: 1306 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1307 OP_STATE(ha->ip_config.ipv4_options, 1308 IPOPT_VLAN_TAGGING_ENABLE, pval); 1309 } else { 1310 OP_STATE(ha->ip_config.ipv6_options, 1311 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1312 } 1313 len = sprintf(buf, "%s\n", pval); 1314 break; 1315 case ISCSI_NET_PARAM_MTU: 1316 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1317 break; 1318 case ISCSI_NET_PARAM_PORT: 1319 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1320 len = sprintf(buf, "%d\n", 1321 ha->ip_config.ipv4_port); 1322 else 1323 len = sprintf(buf, "%d\n", 1324 ha->ip_config.ipv6_port); 1325 break; 1326 case ISCSI_NET_PARAM_IPADDR_STATE: 1327 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1328 pval = iscsi_get_ipaddress_state_name( 1329 ha->ip_config.ipv4_addr_state); 1330 } else { 1331 if (iface->iface_num == 0) 1332 pval = iscsi_get_ipaddress_state_name( 1333 ha->ip_config.ipv6_addr0_state); 1334 else if (iface->iface_num == 1) 1335 pval = iscsi_get_ipaddress_state_name( 1336 ha->ip_config.ipv6_addr1_state); 1337 } 1338 1339 len = sprintf(buf, "%s\n", pval); 1340 break; 1341 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1342 pval = iscsi_get_ipaddress_state_name( 1343 ha->ip_config.ipv6_link_local_state); 1344 len = sprintf(buf, "%s\n", pval); 1345 break; 1346 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1347 pval = iscsi_get_router_state_name( 1348 ha->ip_config.ipv6_default_router_state); 1349 len = sprintf(buf, "%s\n", pval); 1350 break; 1351 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1352 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1353 OP_STATE(~ha->ip_config.tcp_options, 1354 TCPOPT_DELAYED_ACK_DISABLE, pval); 1355 } else { 1356 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1357 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1358 } 1359 len = sprintf(buf, "%s\n", pval); 1360 break; 1361 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1362 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1363 OP_STATE(~ha->ip_config.tcp_options, 1364 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1365 } else { 1366 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1367 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1368 } 1369 len = sprintf(buf, "%s\n", pval); 1370 break; 1371 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1372 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1373 OP_STATE(~ha->ip_config.tcp_options, 1374 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1375 } else { 1376 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1377 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1378 pval); 1379 } 1380 len = sprintf(buf, "%s\n", pval); 1381 break; 1382 case ISCSI_NET_PARAM_TCP_WSF: 1383 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1384 len = sprintf(buf, "%d\n", 1385 ha->ip_config.tcp_wsf); 1386 else 1387 len = sprintf(buf, "%d\n", 1388 ha->ip_config.ipv6_tcp_wsf); 1389 break; 1390 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1391 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1392 ival = (ha->ip_config.tcp_options & 1393 TCPOPT_TIMER_SCALE) >> 1; 1394 else 1395 ival = (ha->ip_config.ipv6_tcp_options & 1396 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1397 1398 len = sprintf(buf, "%d\n", ival); 1399 break; 1400 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1401 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1402 OP_STATE(ha->ip_config.tcp_options, 1403 TCPOPT_TIMESTAMP_ENABLE, pval); 1404 } else { 1405 OP_STATE(ha->ip_config.ipv6_tcp_options, 1406 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1407 } 1408 len = sprintf(buf, "%s\n", pval); 1409 break; 1410 case ISCSI_NET_PARAM_CACHE_ID: 1411 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1412 len = sprintf(buf, "%d\n", 1413 ha->ip_config.ipv4_cache_id); 1414 else 1415 len = sprintf(buf, "%d\n", 1416 ha->ip_config.ipv6_cache_id); 1417 break; 1418 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1419 OP_STATE(ha->ip_config.tcp_options, 1420 TCPOPT_DNS_SERVER_IP_EN, pval); 1421 1422 len = sprintf(buf, "%s\n", pval); 1423 break; 1424 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1425 OP_STATE(ha->ip_config.tcp_options, 1426 TCPOPT_SLP_DA_INFO_EN, pval); 1427 1428 len = sprintf(buf, "%s\n", pval); 1429 break; 1430 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1431 OP_STATE(ha->ip_config.ipv4_options, 1432 IPOPT_IPV4_TOS_EN, pval); 1433 1434 len = sprintf(buf, "%s\n", pval); 1435 break; 1436 case ISCSI_NET_PARAM_IPV4_TOS: 1437 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1438 break; 1439 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1440 OP_STATE(ha->ip_config.ipv4_options, 1441 IPOPT_GRAT_ARP_EN, pval); 1442 1443 len = sprintf(buf, "%s\n", pval); 1444 break; 1445 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1446 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1447 pval); 1448 1449 len = sprintf(buf, "%s\n", pval); 1450 break; 1451 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1452 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1453 (char *)ha->ip_config.ipv4_alt_cid : ""; 1454 1455 len = sprintf(buf, "%s\n", pval); 1456 break; 1457 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1458 OP_STATE(ha->ip_config.ipv4_options, 1459 IPOPT_REQ_VID_EN, pval); 1460 1461 len = sprintf(buf, "%s\n", pval); 1462 break; 1463 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1464 OP_STATE(ha->ip_config.ipv4_options, 1465 IPOPT_USE_VID_EN, pval); 1466 1467 len = sprintf(buf, "%s\n", pval); 1468 break; 1469 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1470 pval = (ha->ip_config.ipv4_vid_len) ? 1471 (char *)ha->ip_config.ipv4_vid : ""; 1472 1473 len = sprintf(buf, "%s\n", pval); 1474 break; 1475 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1476 OP_STATE(ha->ip_config.ipv4_options, 1477 IPOPT_LEARN_IQN_EN, pval); 1478 1479 len = sprintf(buf, "%s\n", pval); 1480 break; 1481 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1482 OP_STATE(~ha->ip_config.ipv4_options, 1483 IPOPT_FRAGMENTATION_DISABLE, pval); 1484 1485 len = sprintf(buf, "%s\n", pval); 1486 break; 1487 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1488 OP_STATE(ha->ip_config.ipv4_options, 1489 IPOPT_IN_FORWARD_EN, pval); 1490 1491 len = sprintf(buf, "%s\n", pval); 1492 break; 1493 case ISCSI_NET_PARAM_REDIRECT_EN: 1494 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1495 OP_STATE(ha->ip_config.ipv4_options, 1496 IPOPT_ARP_REDIRECT_EN, pval); 1497 } else { 1498 OP_STATE(ha->ip_config.ipv6_options, 1499 IPV6_OPT_REDIRECT_EN, pval); 1500 } 1501 len = sprintf(buf, "%s\n", pval); 1502 break; 1503 case ISCSI_NET_PARAM_IPV4_TTL: 1504 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1505 break; 1506 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1507 OP_STATE(ha->ip_config.ipv6_options, 1508 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1509 1510 len = sprintf(buf, "%s\n", pval); 1511 break; 1512 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1513 OP_STATE(ha->ip_config.ipv6_addl_options, 1514 IPV6_ADDOPT_MLD_EN, pval); 1515 1516 len = sprintf(buf, "%s\n", pval); 1517 break; 1518 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1519 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1520 break; 1521 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1522 len = sprintf(buf, "%d\n", 1523 ha->ip_config.ipv6_traffic_class); 1524 break; 1525 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1526 len = sprintf(buf, "%d\n", 1527 ha->ip_config.ipv6_hop_limit); 1528 break; 1529 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1530 len = sprintf(buf, "%d\n", 1531 ha->ip_config.ipv6_nd_reach_time); 1532 break; 1533 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1534 len = sprintf(buf, "%d\n", 1535 ha->ip_config.ipv6_nd_rexmit_timer); 1536 break; 1537 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1538 len = sprintf(buf, "%d\n", 1539 ha->ip_config.ipv6_nd_stale_timeout); 1540 break; 1541 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1542 len = sprintf(buf, "%d\n", 1543 ha->ip_config.ipv6_dup_addr_detect_count); 1544 break; 1545 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1546 len = sprintf(buf, "%d\n", 1547 ha->ip_config.ipv6_gw_advrt_mtu); 1548 break; 1549 default: 1550 len = -ENOSYS; 1551 } 1552 } else if (param_type == ISCSI_IFACE_PARAM) { 1553 switch (param) { 1554 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1555 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1556 break; 1557 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1558 OP_STATE(ha->ip_config.iscsi_options, 1559 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1560 1561 len = sprintf(buf, "%s\n", pval); 1562 break; 1563 case ISCSI_IFACE_PARAM_DATADGST_EN: 1564 OP_STATE(ha->ip_config.iscsi_options, 1565 ISCSIOPTS_DATA_DIGEST_EN, pval); 1566 1567 len = sprintf(buf, "%s\n", pval); 1568 break; 1569 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1570 OP_STATE(ha->ip_config.iscsi_options, 1571 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1572 1573 len = sprintf(buf, "%s\n", pval); 1574 break; 1575 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1576 OP_STATE(ha->ip_config.iscsi_options, 1577 ISCSIOPTS_INITIAL_R2T_EN, pval); 1578 1579 len = sprintf(buf, "%s\n", pval); 1580 break; 1581 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1582 OP_STATE(ha->ip_config.iscsi_options, 1583 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1584 1585 len = sprintf(buf, "%s\n", pval); 1586 break; 1587 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1588 OP_STATE(ha->ip_config.iscsi_options, 1589 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1590 1591 len = sprintf(buf, "%s\n", pval); 1592 break; 1593 case ISCSI_IFACE_PARAM_ERL: 1594 len = sprintf(buf, "%d\n", 1595 (ha->ip_config.iscsi_options & 1596 ISCSIOPTS_ERL)); 1597 break; 1598 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1599 len = sprintf(buf, "%u\n", 1600 ha->ip_config.iscsi_max_pdu_size * 1601 BYTE_UNITS); 1602 break; 1603 case ISCSI_IFACE_PARAM_FIRST_BURST: 1604 len = sprintf(buf, "%u\n", 1605 ha->ip_config.iscsi_first_burst_len * 1606 BYTE_UNITS); 1607 break; 1608 case ISCSI_IFACE_PARAM_MAX_R2T: 1609 len = sprintf(buf, "%d\n", 1610 ha->ip_config.iscsi_max_outstnd_r2t); 1611 break; 1612 case ISCSI_IFACE_PARAM_MAX_BURST: 1613 len = sprintf(buf, "%u\n", 1614 ha->ip_config.iscsi_max_burst_len * 1615 BYTE_UNITS); 1616 break; 1617 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1618 OP_STATE(ha->ip_config.iscsi_options, 1619 ISCSIOPTS_CHAP_AUTH_EN, pval); 1620 1621 len = sprintf(buf, "%s\n", pval); 1622 break; 1623 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1624 OP_STATE(ha->ip_config.iscsi_options, 1625 ISCSIOPTS_BIDI_CHAP_EN, pval); 1626 1627 len = sprintf(buf, "%s\n", pval); 1628 break; 1629 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1630 OP_STATE(ha->ip_config.iscsi_options, 1631 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1632 1633 len = sprintf(buf, "%s\n", pval); 1634 break; 1635 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1636 OP_STATE(ha->ip_config.iscsi_options, 1637 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1638 1639 len = sprintf(buf, "%s\n", pval); 1640 break; 1641 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1642 OP_STATE(ha->ip_config.iscsi_options, 1643 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1644 1645 len = sprintf(buf, "%s\n", pval); 1646 break; 1647 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1648 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1649 break; 1650 default: 1651 len = -ENOSYS; 1652 } 1653 } 1654 1655 return len; 1656 } 1657 1658 static struct iscsi_endpoint * 1659 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1660 int non_blocking) 1661 { 1662 int ret; 1663 struct iscsi_endpoint *ep; 1664 struct qla_endpoint *qla_ep; 1665 struct scsi_qla_host *ha; 1666 struct sockaddr_in *addr; 1667 struct sockaddr_in6 *addr6; 1668 1669 if (!shost) { 1670 ret = -ENXIO; 1671 pr_err("%s: shost is NULL\n", __func__); 1672 return ERR_PTR(ret); 1673 } 1674 1675 ha = iscsi_host_priv(shost); 1676 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1677 if (!ep) { 1678 ret = -ENOMEM; 1679 return ERR_PTR(ret); 1680 } 1681 1682 qla_ep = ep->dd_data; 1683 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1684 if (dst_addr->sa_family == AF_INET) { 1685 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1686 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1687 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1688 (char *)&addr->sin_addr)); 1689 } else if (dst_addr->sa_family == AF_INET6) { 1690 memcpy(&qla_ep->dst_addr, dst_addr, 1691 sizeof(struct sockaddr_in6)); 1692 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1693 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1694 (char *)&addr6->sin6_addr)); 1695 } else { 1696 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1697 __func__); 1698 } 1699 1700 qla_ep->host = shost; 1701 1702 return ep; 1703 } 1704 1705 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1706 { 1707 struct qla_endpoint *qla_ep; 1708 struct scsi_qla_host *ha; 1709 int ret = 0; 1710 1711 qla_ep = ep->dd_data; 1712 ha = to_qla_host(qla_ep->host); 1713 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1714 1715 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1716 ret = 1; 1717 1718 return ret; 1719 } 1720 1721 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1722 { 1723 struct qla_endpoint *qla_ep; 1724 struct scsi_qla_host *ha; 1725 1726 qla_ep = ep->dd_data; 1727 ha = to_qla_host(qla_ep->host); 1728 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1729 ha->host_no)); 1730 iscsi_destroy_endpoint(ep); 1731 } 1732 1733 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1734 enum iscsi_param param, 1735 char *buf) 1736 { 1737 struct qla_endpoint *qla_ep = ep->dd_data; 1738 struct sockaddr *dst_addr; 1739 struct scsi_qla_host *ha; 1740 1741 if (!qla_ep) 1742 return -ENOTCONN; 1743 1744 ha = to_qla_host(qla_ep->host); 1745 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1746 ha->host_no)); 1747 1748 switch (param) { 1749 case ISCSI_PARAM_CONN_PORT: 1750 case ISCSI_PARAM_CONN_ADDRESS: 1751 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1752 if (!dst_addr) 1753 return -ENOTCONN; 1754 1755 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1756 &qla_ep->dst_addr, param, buf); 1757 default: 1758 return -ENOSYS; 1759 } 1760 } 1761 1762 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1763 struct iscsi_stats *stats) 1764 { 1765 struct iscsi_session *sess; 1766 struct iscsi_cls_session *cls_sess; 1767 struct ddb_entry *ddb_entry; 1768 struct scsi_qla_host *ha; 1769 struct ql_iscsi_stats *ql_iscsi_stats; 1770 int stats_size; 1771 int ret; 1772 dma_addr_t iscsi_stats_dma; 1773 1774 cls_sess = iscsi_conn_to_session(cls_conn); 1775 sess = cls_sess->dd_data; 1776 ddb_entry = sess->dd_data; 1777 ha = ddb_entry->ha; 1778 1779 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1780 ha->host_no)); 1781 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1782 /* Allocate memory */ 1783 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1784 &iscsi_stats_dma, GFP_KERNEL); 1785 if (!ql_iscsi_stats) { 1786 ql4_printk(KERN_ERR, ha, 1787 "Unable to allocate memory for iscsi stats\n"); 1788 goto exit_get_stats; 1789 } 1790 1791 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1792 iscsi_stats_dma); 1793 if (ret != QLA_SUCCESS) { 1794 ql4_printk(KERN_ERR, ha, 1795 "Unable to retrieve iscsi stats\n"); 1796 goto free_stats; 1797 } 1798 1799 /* octets */ 1800 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1801 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1802 /* xmit pdus */ 1803 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1804 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1805 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1806 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1807 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1808 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1809 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1810 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1811 /* recv pdus */ 1812 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1813 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1814 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1815 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1816 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1817 stats->logoutrsp_pdus = 1818 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1819 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1820 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1821 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1822 1823 free_stats: 1824 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1825 iscsi_stats_dma); 1826 exit_get_stats: 1827 return; 1828 } 1829 1830 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1831 { 1832 struct iscsi_cls_session *session; 1833 struct iscsi_session *sess; 1834 unsigned long flags; 1835 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED; 1836 1837 session = starget_to_session(scsi_target(sc->device)); 1838 sess = session->dd_data; 1839 1840 spin_lock_irqsave(&session->lock, flags); 1841 if (session->state == ISCSI_SESSION_FAILED) 1842 ret = BLK_EH_RESET_TIMER; 1843 spin_unlock_irqrestore(&session->lock, flags); 1844 1845 return ret; 1846 } 1847 1848 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1849 { 1850 struct scsi_qla_host *ha = to_qla_host(shost); 1851 struct iscsi_cls_host *ihost = shost->shost_data; 1852 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1853 1854 qla4xxx_get_firmware_state(ha); 1855 1856 switch (ha->addl_fw_state & 0x0F00) { 1857 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1858 speed = ISCSI_PORT_SPEED_10MBPS; 1859 break; 1860 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1861 speed = ISCSI_PORT_SPEED_100MBPS; 1862 break; 1863 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1864 speed = ISCSI_PORT_SPEED_1GBPS; 1865 break; 1866 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1867 speed = ISCSI_PORT_SPEED_10GBPS; 1868 break; 1869 } 1870 ihost->port_speed = speed; 1871 } 1872 1873 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1874 { 1875 struct scsi_qla_host *ha = to_qla_host(shost); 1876 struct iscsi_cls_host *ihost = shost->shost_data; 1877 uint32_t state = ISCSI_PORT_STATE_DOWN; 1878 1879 if (test_bit(AF_LINK_UP, &ha->flags)) 1880 state = ISCSI_PORT_STATE_UP; 1881 1882 ihost->port_state = state; 1883 } 1884 1885 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1886 enum iscsi_host_param param, char *buf) 1887 { 1888 struct scsi_qla_host *ha = to_qla_host(shost); 1889 int len; 1890 1891 switch (param) { 1892 case ISCSI_HOST_PARAM_HWADDRESS: 1893 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1894 break; 1895 case ISCSI_HOST_PARAM_IPADDRESS: 1896 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1897 break; 1898 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1899 len = sprintf(buf, "%s\n", ha->name_string); 1900 break; 1901 case ISCSI_HOST_PARAM_PORT_STATE: 1902 qla4xxx_set_port_state(shost); 1903 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1904 break; 1905 case ISCSI_HOST_PARAM_PORT_SPEED: 1906 qla4xxx_set_port_speed(shost); 1907 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1908 break; 1909 default: 1910 return -ENOSYS; 1911 } 1912 1913 return len; 1914 } 1915 1916 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1917 { 1918 if (ha->iface_ipv4) 1919 return; 1920 1921 /* IPv4 */ 1922 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1923 &qla4xxx_iscsi_transport, 1924 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1925 if (!ha->iface_ipv4) 1926 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1927 "iface0.\n"); 1928 } 1929 1930 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1931 { 1932 if (!ha->iface_ipv6_0) 1933 /* IPv6 iface-0 */ 1934 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1935 &qla4xxx_iscsi_transport, 1936 ISCSI_IFACE_TYPE_IPV6, 0, 1937 0); 1938 if (!ha->iface_ipv6_0) 1939 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1940 "iface0.\n"); 1941 1942 if (!ha->iface_ipv6_1) 1943 /* IPv6 iface-1 */ 1944 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1945 &qla4xxx_iscsi_transport, 1946 ISCSI_IFACE_TYPE_IPV6, 1, 1947 0); 1948 if (!ha->iface_ipv6_1) 1949 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1950 "iface1.\n"); 1951 } 1952 1953 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 1954 { 1955 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 1956 qla4xxx_create_ipv4_iface(ha); 1957 1958 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 1959 qla4xxx_create_ipv6_iface(ha); 1960 } 1961 1962 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 1963 { 1964 if (ha->iface_ipv4) { 1965 iscsi_destroy_iface(ha->iface_ipv4); 1966 ha->iface_ipv4 = NULL; 1967 } 1968 } 1969 1970 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 1971 { 1972 if (ha->iface_ipv6_0) { 1973 iscsi_destroy_iface(ha->iface_ipv6_0); 1974 ha->iface_ipv6_0 = NULL; 1975 } 1976 if (ha->iface_ipv6_1) { 1977 iscsi_destroy_iface(ha->iface_ipv6_1); 1978 ha->iface_ipv6_1 = NULL; 1979 } 1980 } 1981 1982 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 1983 { 1984 qla4xxx_destroy_ipv4_iface(ha); 1985 qla4xxx_destroy_ipv6_iface(ha); 1986 } 1987 1988 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 1989 struct iscsi_iface_param_info *iface_param, 1990 struct addr_ctrl_blk *init_fw_cb) 1991 { 1992 /* 1993 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 1994 * iface_num 1 is valid only for IPv6 Addr. 1995 */ 1996 switch (iface_param->param) { 1997 case ISCSI_NET_PARAM_IPV6_ADDR: 1998 if (iface_param->iface_num & 0x1) 1999 /* IPv6 Addr 1 */ 2000 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2001 sizeof(init_fw_cb->ipv6_addr1)); 2002 else 2003 /* IPv6 Addr 0 */ 2004 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2005 sizeof(init_fw_cb->ipv6_addr0)); 2006 break; 2007 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2008 if (iface_param->iface_num & 0x1) 2009 break; 2010 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2011 sizeof(init_fw_cb->ipv6_if_id)); 2012 break; 2013 case ISCSI_NET_PARAM_IPV6_ROUTER: 2014 if (iface_param->iface_num & 0x1) 2015 break; 2016 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2017 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2018 break; 2019 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2020 /* Autocfg applies to even interface */ 2021 if (iface_param->iface_num & 0x1) 2022 break; 2023 2024 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2025 init_fw_cb->ipv6_addtl_opts &= 2026 cpu_to_le16( 2027 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2028 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2029 init_fw_cb->ipv6_addtl_opts |= 2030 cpu_to_le16( 2031 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2032 else 2033 ql4_printk(KERN_ERR, ha, 2034 "Invalid autocfg setting for IPv6 addr\n"); 2035 break; 2036 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2037 /* Autocfg applies to even interface */ 2038 if (iface_param->iface_num & 0x1) 2039 break; 2040 2041 if (iface_param->value[0] == 2042 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2043 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2044 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2045 else if (iface_param->value[0] == 2046 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2047 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2048 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2049 else 2050 ql4_printk(KERN_ERR, ha, 2051 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2052 break; 2053 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2054 /* Autocfg applies to even interface */ 2055 if (iface_param->iface_num & 0x1) 2056 break; 2057 2058 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2059 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2060 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2061 break; 2062 case ISCSI_NET_PARAM_IFACE_ENABLE: 2063 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2064 init_fw_cb->ipv6_opts |= 2065 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2066 qla4xxx_create_ipv6_iface(ha); 2067 } else { 2068 init_fw_cb->ipv6_opts &= 2069 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2070 0xFFFF); 2071 qla4xxx_destroy_ipv6_iface(ha); 2072 } 2073 break; 2074 case ISCSI_NET_PARAM_VLAN_TAG: 2075 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2076 break; 2077 init_fw_cb->ipv6_vlan_tag = 2078 cpu_to_be16(*(uint16_t *)iface_param->value); 2079 break; 2080 case ISCSI_NET_PARAM_VLAN_ENABLED: 2081 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2082 init_fw_cb->ipv6_opts |= 2083 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2084 else 2085 init_fw_cb->ipv6_opts &= 2086 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2087 break; 2088 case ISCSI_NET_PARAM_MTU: 2089 init_fw_cb->eth_mtu_size = 2090 cpu_to_le16(*(uint16_t *)iface_param->value); 2091 break; 2092 case ISCSI_NET_PARAM_PORT: 2093 /* Autocfg applies to even interface */ 2094 if (iface_param->iface_num & 0x1) 2095 break; 2096 2097 init_fw_cb->ipv6_port = 2098 cpu_to_le16(*(uint16_t *)iface_param->value); 2099 break; 2100 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2101 if (iface_param->iface_num & 0x1) 2102 break; 2103 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2104 init_fw_cb->ipv6_tcp_opts |= 2105 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2106 else 2107 init_fw_cb->ipv6_tcp_opts &= 2108 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2109 0xFFFF); 2110 break; 2111 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2112 if (iface_param->iface_num & 0x1) 2113 break; 2114 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2115 init_fw_cb->ipv6_tcp_opts |= 2116 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2117 else 2118 init_fw_cb->ipv6_tcp_opts &= 2119 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2120 break; 2121 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2122 if (iface_param->iface_num & 0x1) 2123 break; 2124 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2125 init_fw_cb->ipv6_tcp_opts |= 2126 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2127 else 2128 init_fw_cb->ipv6_tcp_opts &= 2129 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2130 break; 2131 case ISCSI_NET_PARAM_TCP_WSF: 2132 if (iface_param->iface_num & 0x1) 2133 break; 2134 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2135 break; 2136 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2137 if (iface_param->iface_num & 0x1) 2138 break; 2139 init_fw_cb->ipv6_tcp_opts &= 2140 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2141 init_fw_cb->ipv6_tcp_opts |= 2142 cpu_to_le16((iface_param->value[0] << 1) & 2143 IPV6_TCPOPT_TIMER_SCALE); 2144 break; 2145 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2146 if (iface_param->iface_num & 0x1) 2147 break; 2148 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2149 init_fw_cb->ipv6_tcp_opts |= 2150 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2151 else 2152 init_fw_cb->ipv6_tcp_opts &= 2153 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2154 break; 2155 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2156 if (iface_param->iface_num & 0x1) 2157 break; 2158 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2159 init_fw_cb->ipv6_opts |= 2160 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2161 else 2162 init_fw_cb->ipv6_opts &= 2163 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2164 break; 2165 case ISCSI_NET_PARAM_REDIRECT_EN: 2166 if (iface_param->iface_num & 0x1) 2167 break; 2168 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2169 init_fw_cb->ipv6_opts |= 2170 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2171 else 2172 init_fw_cb->ipv6_opts &= 2173 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2174 break; 2175 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2176 if (iface_param->iface_num & 0x1) 2177 break; 2178 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2179 init_fw_cb->ipv6_addtl_opts |= 2180 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2181 else 2182 init_fw_cb->ipv6_addtl_opts &= 2183 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2184 break; 2185 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2186 if (iface_param->iface_num & 0x1) 2187 break; 2188 init_fw_cb->ipv6_flow_lbl = 2189 cpu_to_le16(*(uint16_t *)iface_param->value); 2190 break; 2191 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2192 if (iface_param->iface_num & 0x1) 2193 break; 2194 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2195 break; 2196 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2197 if (iface_param->iface_num & 0x1) 2198 break; 2199 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2200 break; 2201 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2202 if (iface_param->iface_num & 0x1) 2203 break; 2204 init_fw_cb->ipv6_nd_reach_time = 2205 cpu_to_le32(*(uint32_t *)iface_param->value); 2206 break; 2207 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2208 if (iface_param->iface_num & 0x1) 2209 break; 2210 init_fw_cb->ipv6_nd_rexmit_timer = 2211 cpu_to_le32(*(uint32_t *)iface_param->value); 2212 break; 2213 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2214 if (iface_param->iface_num & 0x1) 2215 break; 2216 init_fw_cb->ipv6_nd_stale_timeout = 2217 cpu_to_le32(*(uint32_t *)iface_param->value); 2218 break; 2219 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2220 if (iface_param->iface_num & 0x1) 2221 break; 2222 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2223 break; 2224 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2225 if (iface_param->iface_num & 0x1) 2226 break; 2227 init_fw_cb->ipv6_gw_advrt_mtu = 2228 cpu_to_le32(*(uint32_t *)iface_param->value); 2229 break; 2230 default: 2231 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2232 iface_param->param); 2233 break; 2234 } 2235 } 2236 2237 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2238 struct iscsi_iface_param_info *iface_param, 2239 struct addr_ctrl_blk *init_fw_cb) 2240 { 2241 switch (iface_param->param) { 2242 case ISCSI_NET_PARAM_IPV4_ADDR: 2243 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2244 sizeof(init_fw_cb->ipv4_addr)); 2245 break; 2246 case ISCSI_NET_PARAM_IPV4_SUBNET: 2247 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2248 sizeof(init_fw_cb->ipv4_subnet)); 2249 break; 2250 case ISCSI_NET_PARAM_IPV4_GW: 2251 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2252 sizeof(init_fw_cb->ipv4_gw_addr)); 2253 break; 2254 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2255 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2256 init_fw_cb->ipv4_tcp_opts |= 2257 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2258 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2259 init_fw_cb->ipv4_tcp_opts &= 2260 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2261 else 2262 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2263 break; 2264 case ISCSI_NET_PARAM_IFACE_ENABLE: 2265 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2266 init_fw_cb->ipv4_ip_opts |= 2267 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2268 qla4xxx_create_ipv4_iface(ha); 2269 } else { 2270 init_fw_cb->ipv4_ip_opts &= 2271 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2272 0xFFFF); 2273 qla4xxx_destroy_ipv4_iface(ha); 2274 } 2275 break; 2276 case ISCSI_NET_PARAM_VLAN_TAG: 2277 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2278 break; 2279 init_fw_cb->ipv4_vlan_tag = 2280 cpu_to_be16(*(uint16_t *)iface_param->value); 2281 break; 2282 case ISCSI_NET_PARAM_VLAN_ENABLED: 2283 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2284 init_fw_cb->ipv4_ip_opts |= 2285 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2286 else 2287 init_fw_cb->ipv4_ip_opts &= 2288 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2289 break; 2290 case ISCSI_NET_PARAM_MTU: 2291 init_fw_cb->eth_mtu_size = 2292 cpu_to_le16(*(uint16_t *)iface_param->value); 2293 break; 2294 case ISCSI_NET_PARAM_PORT: 2295 init_fw_cb->ipv4_port = 2296 cpu_to_le16(*(uint16_t *)iface_param->value); 2297 break; 2298 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2299 if (iface_param->iface_num & 0x1) 2300 break; 2301 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2302 init_fw_cb->ipv4_tcp_opts |= 2303 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2304 else 2305 init_fw_cb->ipv4_tcp_opts &= 2306 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2307 0xFFFF); 2308 break; 2309 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2310 if (iface_param->iface_num & 0x1) 2311 break; 2312 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2313 init_fw_cb->ipv4_tcp_opts |= 2314 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2315 else 2316 init_fw_cb->ipv4_tcp_opts &= 2317 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2318 break; 2319 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2320 if (iface_param->iface_num & 0x1) 2321 break; 2322 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2323 init_fw_cb->ipv4_tcp_opts |= 2324 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2325 else 2326 init_fw_cb->ipv4_tcp_opts &= 2327 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2328 break; 2329 case ISCSI_NET_PARAM_TCP_WSF: 2330 if (iface_param->iface_num & 0x1) 2331 break; 2332 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2333 break; 2334 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2335 if (iface_param->iface_num & 0x1) 2336 break; 2337 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2338 init_fw_cb->ipv4_tcp_opts |= 2339 cpu_to_le16((iface_param->value[0] << 1) & 2340 TCPOPT_TIMER_SCALE); 2341 break; 2342 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2343 if (iface_param->iface_num & 0x1) 2344 break; 2345 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2346 init_fw_cb->ipv4_tcp_opts |= 2347 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2348 else 2349 init_fw_cb->ipv4_tcp_opts &= 2350 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2351 break; 2352 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2353 if (iface_param->iface_num & 0x1) 2354 break; 2355 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2356 init_fw_cb->ipv4_tcp_opts |= 2357 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2358 else 2359 init_fw_cb->ipv4_tcp_opts &= 2360 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2361 break; 2362 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2363 if (iface_param->iface_num & 0x1) 2364 break; 2365 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2366 init_fw_cb->ipv4_tcp_opts |= 2367 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2368 else 2369 init_fw_cb->ipv4_tcp_opts &= 2370 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2371 break; 2372 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2373 if (iface_param->iface_num & 0x1) 2374 break; 2375 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2376 init_fw_cb->ipv4_ip_opts |= 2377 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2378 else 2379 init_fw_cb->ipv4_ip_opts &= 2380 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2381 break; 2382 case ISCSI_NET_PARAM_IPV4_TOS: 2383 if (iface_param->iface_num & 0x1) 2384 break; 2385 init_fw_cb->ipv4_tos = iface_param->value[0]; 2386 break; 2387 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2388 if (iface_param->iface_num & 0x1) 2389 break; 2390 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2391 init_fw_cb->ipv4_ip_opts |= 2392 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2393 else 2394 init_fw_cb->ipv4_ip_opts &= 2395 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2396 break; 2397 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2398 if (iface_param->iface_num & 0x1) 2399 break; 2400 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2401 init_fw_cb->ipv4_ip_opts |= 2402 cpu_to_le16(IPOPT_ALT_CID_EN); 2403 else 2404 init_fw_cb->ipv4_ip_opts &= 2405 cpu_to_le16(~IPOPT_ALT_CID_EN); 2406 break; 2407 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2408 if (iface_param->iface_num & 0x1) 2409 break; 2410 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2411 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2412 init_fw_cb->ipv4_dhcp_alt_cid_len = 2413 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2414 break; 2415 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2416 if (iface_param->iface_num & 0x1) 2417 break; 2418 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2419 init_fw_cb->ipv4_ip_opts |= 2420 cpu_to_le16(IPOPT_REQ_VID_EN); 2421 else 2422 init_fw_cb->ipv4_ip_opts &= 2423 cpu_to_le16(~IPOPT_REQ_VID_EN); 2424 break; 2425 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2426 if (iface_param->iface_num & 0x1) 2427 break; 2428 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2429 init_fw_cb->ipv4_ip_opts |= 2430 cpu_to_le16(IPOPT_USE_VID_EN); 2431 else 2432 init_fw_cb->ipv4_ip_opts &= 2433 cpu_to_le16(~IPOPT_USE_VID_EN); 2434 break; 2435 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2436 if (iface_param->iface_num & 0x1) 2437 break; 2438 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2439 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2440 init_fw_cb->ipv4_dhcp_vid_len = 2441 strlen(init_fw_cb->ipv4_dhcp_vid); 2442 break; 2443 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2444 if (iface_param->iface_num & 0x1) 2445 break; 2446 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2447 init_fw_cb->ipv4_ip_opts |= 2448 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2449 else 2450 init_fw_cb->ipv4_ip_opts &= 2451 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2452 break; 2453 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2454 if (iface_param->iface_num & 0x1) 2455 break; 2456 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2457 init_fw_cb->ipv4_ip_opts |= 2458 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2459 else 2460 init_fw_cb->ipv4_ip_opts &= 2461 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2462 break; 2463 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2464 if (iface_param->iface_num & 0x1) 2465 break; 2466 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2467 init_fw_cb->ipv4_ip_opts |= 2468 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2469 else 2470 init_fw_cb->ipv4_ip_opts &= 2471 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2472 break; 2473 case ISCSI_NET_PARAM_REDIRECT_EN: 2474 if (iface_param->iface_num & 0x1) 2475 break; 2476 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2477 init_fw_cb->ipv4_ip_opts |= 2478 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2479 else 2480 init_fw_cb->ipv4_ip_opts &= 2481 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2482 break; 2483 case ISCSI_NET_PARAM_IPV4_TTL: 2484 if (iface_param->iface_num & 0x1) 2485 break; 2486 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2487 break; 2488 default: 2489 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2490 iface_param->param); 2491 break; 2492 } 2493 } 2494 2495 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2496 struct iscsi_iface_param_info *iface_param, 2497 struct addr_ctrl_blk *init_fw_cb) 2498 { 2499 switch (iface_param->param) { 2500 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2501 if (iface_param->iface_num & 0x1) 2502 break; 2503 init_fw_cb->def_timeout = 2504 cpu_to_le16(*(uint16_t *)iface_param->value); 2505 break; 2506 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2507 if (iface_param->iface_num & 0x1) 2508 break; 2509 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2510 init_fw_cb->iscsi_opts |= 2511 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2512 else 2513 init_fw_cb->iscsi_opts &= 2514 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2515 break; 2516 case ISCSI_IFACE_PARAM_DATADGST_EN: 2517 if (iface_param->iface_num & 0x1) 2518 break; 2519 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2520 init_fw_cb->iscsi_opts |= 2521 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2522 else 2523 init_fw_cb->iscsi_opts &= 2524 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2525 break; 2526 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2527 if (iface_param->iface_num & 0x1) 2528 break; 2529 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2530 init_fw_cb->iscsi_opts |= 2531 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2532 else 2533 init_fw_cb->iscsi_opts &= 2534 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2535 break; 2536 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2537 if (iface_param->iface_num & 0x1) 2538 break; 2539 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2540 init_fw_cb->iscsi_opts |= 2541 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2542 else 2543 init_fw_cb->iscsi_opts &= 2544 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2545 break; 2546 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2547 if (iface_param->iface_num & 0x1) 2548 break; 2549 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2550 init_fw_cb->iscsi_opts |= 2551 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2552 else 2553 init_fw_cb->iscsi_opts &= 2554 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2555 break; 2556 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2557 if (iface_param->iface_num & 0x1) 2558 break; 2559 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2560 init_fw_cb->iscsi_opts |= 2561 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2562 else 2563 init_fw_cb->iscsi_opts &= 2564 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2565 break; 2566 case ISCSI_IFACE_PARAM_ERL: 2567 if (iface_param->iface_num & 0x1) 2568 break; 2569 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2570 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2571 ISCSIOPTS_ERL); 2572 break; 2573 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2574 if (iface_param->iface_num & 0x1) 2575 break; 2576 init_fw_cb->iscsi_max_pdu_size = 2577 cpu_to_le32(*(uint32_t *)iface_param->value) / 2578 BYTE_UNITS; 2579 break; 2580 case ISCSI_IFACE_PARAM_FIRST_BURST: 2581 if (iface_param->iface_num & 0x1) 2582 break; 2583 init_fw_cb->iscsi_fburst_len = 2584 cpu_to_le32(*(uint32_t *)iface_param->value) / 2585 BYTE_UNITS; 2586 break; 2587 case ISCSI_IFACE_PARAM_MAX_R2T: 2588 if (iface_param->iface_num & 0x1) 2589 break; 2590 init_fw_cb->iscsi_max_outstnd_r2t = 2591 cpu_to_le16(*(uint16_t *)iface_param->value); 2592 break; 2593 case ISCSI_IFACE_PARAM_MAX_BURST: 2594 if (iface_param->iface_num & 0x1) 2595 break; 2596 init_fw_cb->iscsi_max_burst_len = 2597 cpu_to_le32(*(uint32_t *)iface_param->value) / 2598 BYTE_UNITS; 2599 break; 2600 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2601 if (iface_param->iface_num & 0x1) 2602 break; 2603 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2604 init_fw_cb->iscsi_opts |= 2605 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2606 else 2607 init_fw_cb->iscsi_opts &= 2608 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2609 break; 2610 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2611 if (iface_param->iface_num & 0x1) 2612 break; 2613 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2614 init_fw_cb->iscsi_opts |= 2615 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2616 else 2617 init_fw_cb->iscsi_opts &= 2618 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2619 break; 2620 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2621 if (iface_param->iface_num & 0x1) 2622 break; 2623 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2624 init_fw_cb->iscsi_opts |= 2625 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2626 else 2627 init_fw_cb->iscsi_opts &= 2628 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2629 break; 2630 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2631 if (iface_param->iface_num & 0x1) 2632 break; 2633 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2634 init_fw_cb->iscsi_opts |= 2635 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2636 else 2637 init_fw_cb->iscsi_opts &= 2638 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2639 break; 2640 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2641 if (iface_param->iface_num & 0x1) 2642 break; 2643 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2644 init_fw_cb->iscsi_opts |= 2645 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2646 else 2647 init_fw_cb->iscsi_opts &= 2648 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2649 break; 2650 default: 2651 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2652 iface_param->param); 2653 break; 2654 } 2655 } 2656 2657 static void 2658 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2659 { 2660 struct addr_ctrl_blk_def *acb; 2661 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2662 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2663 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2664 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2665 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2666 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2667 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2668 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2669 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2670 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2671 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2672 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2673 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2674 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2675 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2676 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2677 } 2678 2679 static int 2680 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2681 { 2682 struct scsi_qla_host *ha = to_qla_host(shost); 2683 int rval = 0; 2684 struct iscsi_iface_param_info *iface_param = NULL; 2685 struct addr_ctrl_blk *init_fw_cb = NULL; 2686 dma_addr_t init_fw_cb_dma; 2687 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2688 uint32_t mbox_sts[MBOX_REG_COUNT]; 2689 uint32_t rem = len; 2690 struct nlattr *attr; 2691 2692 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2693 sizeof(struct addr_ctrl_blk), 2694 &init_fw_cb_dma, GFP_KERNEL); 2695 if (!init_fw_cb) { 2696 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2697 __func__); 2698 return -ENOMEM; 2699 } 2700 2701 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2702 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2703 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2704 2705 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2706 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2707 rval = -EIO; 2708 goto exit_init_fw_cb; 2709 } 2710 2711 nla_for_each_attr(attr, data, len, rem) { 2712 iface_param = nla_data(attr); 2713 2714 if (iface_param->param_type == ISCSI_NET_PARAM) { 2715 switch (iface_param->iface_type) { 2716 case ISCSI_IFACE_TYPE_IPV4: 2717 switch (iface_param->iface_num) { 2718 case 0: 2719 qla4xxx_set_ipv4(ha, iface_param, 2720 init_fw_cb); 2721 break; 2722 default: 2723 /* Cannot have more than one IPv4 interface */ 2724 ql4_printk(KERN_ERR, ha, 2725 "Invalid IPv4 iface number = %d\n", 2726 iface_param->iface_num); 2727 break; 2728 } 2729 break; 2730 case ISCSI_IFACE_TYPE_IPV6: 2731 switch (iface_param->iface_num) { 2732 case 0: 2733 case 1: 2734 qla4xxx_set_ipv6(ha, iface_param, 2735 init_fw_cb); 2736 break; 2737 default: 2738 /* Cannot have more than two IPv6 interface */ 2739 ql4_printk(KERN_ERR, ha, 2740 "Invalid IPv6 iface number = %d\n", 2741 iface_param->iface_num); 2742 break; 2743 } 2744 break; 2745 default: 2746 ql4_printk(KERN_ERR, ha, 2747 "Invalid iface type\n"); 2748 break; 2749 } 2750 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2751 qla4xxx_set_iscsi_param(ha, iface_param, 2752 init_fw_cb); 2753 } else { 2754 continue; 2755 } 2756 } 2757 2758 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2759 2760 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2761 sizeof(struct addr_ctrl_blk), 2762 FLASH_OPT_RMW_COMMIT); 2763 if (rval != QLA_SUCCESS) { 2764 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2765 __func__); 2766 rval = -EIO; 2767 goto exit_init_fw_cb; 2768 } 2769 2770 rval = qla4xxx_disable_acb(ha); 2771 if (rval != QLA_SUCCESS) { 2772 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2773 __func__); 2774 rval = -EIO; 2775 goto exit_init_fw_cb; 2776 } 2777 2778 wait_for_completion_timeout(&ha->disable_acb_comp, 2779 DISABLE_ACB_TOV * HZ); 2780 2781 qla4xxx_initcb_to_acb(init_fw_cb); 2782 2783 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2784 if (rval != QLA_SUCCESS) { 2785 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2786 __func__); 2787 rval = -EIO; 2788 goto exit_init_fw_cb; 2789 } 2790 2791 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2792 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2793 init_fw_cb_dma); 2794 2795 exit_init_fw_cb: 2796 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2797 init_fw_cb, init_fw_cb_dma); 2798 2799 return rval; 2800 } 2801 2802 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2803 enum iscsi_param param, char *buf) 2804 { 2805 struct iscsi_session *sess = cls_sess->dd_data; 2806 struct ddb_entry *ddb_entry = sess->dd_data; 2807 struct scsi_qla_host *ha = ddb_entry->ha; 2808 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2809 struct ql4_chap_table chap_tbl; 2810 int rval, len; 2811 uint16_t idx; 2812 2813 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2814 switch (param) { 2815 case ISCSI_PARAM_CHAP_IN_IDX: 2816 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2817 sess->password_in, BIDI_CHAP, 2818 &idx); 2819 if (rval) 2820 len = sprintf(buf, "\n"); 2821 else 2822 len = sprintf(buf, "%hu\n", idx); 2823 break; 2824 case ISCSI_PARAM_CHAP_OUT_IDX: 2825 if (ddb_entry->ddb_type == FLASH_DDB) { 2826 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2827 idx = ddb_entry->chap_tbl_idx; 2828 rval = QLA_SUCCESS; 2829 } else { 2830 rval = QLA_ERROR; 2831 } 2832 } else { 2833 rval = qla4xxx_get_chap_index(ha, sess->username, 2834 sess->password, 2835 LOCAL_CHAP, &idx); 2836 } 2837 if (rval) 2838 len = sprintf(buf, "\n"); 2839 else 2840 len = sprintf(buf, "%hu\n", idx); 2841 break; 2842 case ISCSI_PARAM_USERNAME: 2843 case ISCSI_PARAM_PASSWORD: 2844 /* First, populate session username and password for FLASH DDB, 2845 * if not already done. This happens when session login fails 2846 * for a FLASH DDB. 2847 */ 2848 if (ddb_entry->ddb_type == FLASH_DDB && 2849 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2850 !sess->username && !sess->password) { 2851 idx = ddb_entry->chap_tbl_idx; 2852 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2853 chap_tbl.secret, 2854 idx); 2855 if (!rval) { 2856 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2857 (char *)chap_tbl.name, 2858 strlen((char *)chap_tbl.name)); 2859 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2860 (char *)chap_tbl.secret, 2861 chap_tbl.secret_len); 2862 } 2863 } 2864 /* allow fall-through */ 2865 default: 2866 return iscsi_session_get_param(cls_sess, param, buf); 2867 } 2868 2869 return len; 2870 } 2871 2872 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2873 enum iscsi_param param, char *buf) 2874 { 2875 struct iscsi_conn *conn; 2876 struct qla_conn *qla_conn; 2877 struct sockaddr *dst_addr; 2878 2879 conn = cls_conn->dd_data; 2880 qla_conn = conn->dd_data; 2881 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2882 2883 switch (param) { 2884 case ISCSI_PARAM_CONN_PORT: 2885 case ISCSI_PARAM_CONN_ADDRESS: 2886 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2887 dst_addr, param, buf); 2888 default: 2889 return iscsi_conn_get_param(cls_conn, param, buf); 2890 } 2891 } 2892 2893 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2894 { 2895 uint32_t mbx_sts = 0; 2896 uint16_t tmp_ddb_index; 2897 int ret; 2898 2899 get_ddb_index: 2900 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2901 2902 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2903 DEBUG2(ql4_printk(KERN_INFO, ha, 2904 "Free DDB index not available\n")); 2905 ret = QLA_ERROR; 2906 goto exit_get_ddb_index; 2907 } 2908 2909 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2910 goto get_ddb_index; 2911 2912 DEBUG2(ql4_printk(KERN_INFO, ha, 2913 "Found a free DDB index at %d\n", tmp_ddb_index)); 2914 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2915 if (ret == QLA_ERROR) { 2916 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2917 ql4_printk(KERN_INFO, ha, 2918 "DDB index = %d not available trying next\n", 2919 tmp_ddb_index); 2920 goto get_ddb_index; 2921 } 2922 DEBUG2(ql4_printk(KERN_INFO, ha, 2923 "Free FW DDB not available\n")); 2924 } 2925 2926 *ddb_index = tmp_ddb_index; 2927 2928 exit_get_ddb_index: 2929 return ret; 2930 } 2931 2932 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2933 struct ddb_entry *ddb_entry, 2934 char *existing_ipaddr, 2935 char *user_ipaddr) 2936 { 2937 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2938 char formatted_ipaddr[DDB_IPADDR_LEN]; 2939 int status = QLA_SUCCESS, ret = 0; 2940 2941 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2942 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2943 '\0', NULL); 2944 if (ret == 0) { 2945 status = QLA_ERROR; 2946 goto out_match; 2947 } 2948 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2949 } else { 2950 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2951 '\0', NULL); 2952 if (ret == 0) { 2953 status = QLA_ERROR; 2954 goto out_match; 2955 } 2956 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 2957 } 2958 2959 if (strcmp(existing_ipaddr, formatted_ipaddr)) 2960 status = QLA_ERROR; 2961 2962 out_match: 2963 return status; 2964 } 2965 2966 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 2967 struct iscsi_cls_conn *cls_conn) 2968 { 2969 int idx = 0, max_ddbs, rval; 2970 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 2971 struct iscsi_session *sess, *existing_sess; 2972 struct iscsi_conn *conn, *existing_conn; 2973 struct ddb_entry *ddb_entry; 2974 2975 sess = cls_sess->dd_data; 2976 conn = cls_conn->dd_data; 2977 2978 if (sess->targetname == NULL || 2979 conn->persistent_address == NULL || 2980 conn->persistent_port == 0) 2981 return QLA_ERROR; 2982 2983 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 2984 MAX_DEV_DB_ENTRIES; 2985 2986 for (idx = 0; idx < max_ddbs; idx++) { 2987 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 2988 if (ddb_entry == NULL) 2989 continue; 2990 2991 if (ddb_entry->ddb_type != FLASH_DDB) 2992 continue; 2993 2994 existing_sess = ddb_entry->sess->dd_data; 2995 existing_conn = ddb_entry->conn->dd_data; 2996 2997 if (existing_sess->targetname == NULL || 2998 existing_conn->persistent_address == NULL || 2999 existing_conn->persistent_port == 0) 3000 continue; 3001 3002 DEBUG2(ql4_printk(KERN_INFO, ha, 3003 "IQN = %s User IQN = %s\n", 3004 existing_sess->targetname, 3005 sess->targetname)); 3006 3007 DEBUG2(ql4_printk(KERN_INFO, ha, 3008 "IP = %s User IP = %s\n", 3009 existing_conn->persistent_address, 3010 conn->persistent_address)); 3011 3012 DEBUG2(ql4_printk(KERN_INFO, ha, 3013 "Port = %d User Port = %d\n", 3014 existing_conn->persistent_port, 3015 conn->persistent_port)); 3016 3017 if (strcmp(existing_sess->targetname, sess->targetname)) 3018 continue; 3019 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3020 existing_conn->persistent_address, 3021 conn->persistent_address); 3022 if (rval == QLA_ERROR) 3023 continue; 3024 if (existing_conn->persistent_port != conn->persistent_port) 3025 continue; 3026 break; 3027 } 3028 3029 if (idx == max_ddbs) 3030 return QLA_ERROR; 3031 3032 DEBUG2(ql4_printk(KERN_INFO, ha, 3033 "Match found in fwdb sessions\n")); 3034 return QLA_SUCCESS; 3035 } 3036 3037 static struct iscsi_cls_session * 3038 qla4xxx_session_create(struct iscsi_endpoint *ep, 3039 uint16_t cmds_max, uint16_t qdepth, 3040 uint32_t initial_cmdsn) 3041 { 3042 struct iscsi_cls_session *cls_sess; 3043 struct scsi_qla_host *ha; 3044 struct qla_endpoint *qla_ep; 3045 struct ddb_entry *ddb_entry; 3046 uint16_t ddb_index; 3047 struct iscsi_session *sess; 3048 struct sockaddr *dst_addr; 3049 int ret; 3050 3051 if (!ep) { 3052 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3053 return NULL; 3054 } 3055 3056 qla_ep = ep->dd_data; 3057 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 3058 ha = to_qla_host(qla_ep->host); 3059 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3060 ha->host_no)); 3061 3062 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3063 if (ret == QLA_ERROR) 3064 return NULL; 3065 3066 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3067 cmds_max, sizeof(struct ddb_entry), 3068 sizeof(struct ql4_task_data), 3069 initial_cmdsn, ddb_index); 3070 if (!cls_sess) 3071 return NULL; 3072 3073 sess = cls_sess->dd_data; 3074 ddb_entry = sess->dd_data; 3075 ddb_entry->fw_ddb_index = ddb_index; 3076 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3077 ddb_entry->ha = ha; 3078 ddb_entry->sess = cls_sess; 3079 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3080 ddb_entry->ddb_change = qla4xxx_ddb_change; 3081 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3082 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3083 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3084 ha->tot_ddbs++; 3085 3086 return cls_sess; 3087 } 3088 3089 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3090 { 3091 struct iscsi_session *sess; 3092 struct ddb_entry *ddb_entry; 3093 struct scsi_qla_host *ha; 3094 unsigned long flags, wtime; 3095 struct dev_db_entry *fw_ddb_entry = NULL; 3096 dma_addr_t fw_ddb_entry_dma; 3097 uint32_t ddb_state; 3098 int ret; 3099 3100 sess = cls_sess->dd_data; 3101 ddb_entry = sess->dd_data; 3102 ha = ddb_entry->ha; 3103 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3104 ha->host_no)); 3105 3106 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3107 &fw_ddb_entry_dma, GFP_KERNEL); 3108 if (!fw_ddb_entry) { 3109 ql4_printk(KERN_ERR, ha, 3110 "%s: Unable to allocate dma buffer\n", __func__); 3111 goto destroy_session; 3112 } 3113 3114 wtime = jiffies + (HZ * LOGOUT_TOV); 3115 do { 3116 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3117 fw_ddb_entry, fw_ddb_entry_dma, 3118 NULL, NULL, &ddb_state, NULL, 3119 NULL, NULL); 3120 if (ret == QLA_ERROR) 3121 goto destroy_session; 3122 3123 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3124 (ddb_state == DDB_DS_SESSION_FAILED)) 3125 goto destroy_session; 3126 3127 schedule_timeout_uninterruptible(HZ); 3128 } while ((time_after(wtime, jiffies))); 3129 3130 destroy_session: 3131 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3132 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3133 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3134 spin_lock_irqsave(&ha->hardware_lock, flags); 3135 qla4xxx_free_ddb(ha, ddb_entry); 3136 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3137 3138 iscsi_session_teardown(cls_sess); 3139 3140 if (fw_ddb_entry) 3141 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3142 fw_ddb_entry, fw_ddb_entry_dma); 3143 } 3144 3145 static struct iscsi_cls_conn * 3146 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3147 { 3148 struct iscsi_cls_conn *cls_conn; 3149 struct iscsi_session *sess; 3150 struct ddb_entry *ddb_entry; 3151 struct scsi_qla_host *ha; 3152 3153 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3154 conn_idx); 3155 if (!cls_conn) { 3156 pr_info("%s: Can not create connection for conn_idx = %u\n", 3157 __func__, conn_idx); 3158 return NULL; 3159 } 3160 3161 sess = cls_sess->dd_data; 3162 ddb_entry = sess->dd_data; 3163 ddb_entry->conn = cls_conn; 3164 3165 ha = ddb_entry->ha; 3166 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3167 conn_idx)); 3168 return cls_conn; 3169 } 3170 3171 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3172 struct iscsi_cls_conn *cls_conn, 3173 uint64_t transport_fd, int is_leading) 3174 { 3175 struct iscsi_conn *conn; 3176 struct qla_conn *qla_conn; 3177 struct iscsi_endpoint *ep; 3178 struct ddb_entry *ddb_entry; 3179 struct scsi_qla_host *ha; 3180 struct iscsi_session *sess; 3181 3182 sess = cls_session->dd_data; 3183 ddb_entry = sess->dd_data; 3184 ha = ddb_entry->ha; 3185 3186 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3187 cls_session->sid, cls_conn->cid)); 3188 3189 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3190 return -EINVAL; 3191 ep = iscsi_lookup_endpoint(transport_fd); 3192 conn = cls_conn->dd_data; 3193 qla_conn = conn->dd_data; 3194 qla_conn->qla_ep = ep->dd_data; 3195 return 0; 3196 } 3197 3198 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3199 { 3200 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3201 struct iscsi_session *sess; 3202 struct ddb_entry *ddb_entry; 3203 struct scsi_qla_host *ha; 3204 struct dev_db_entry *fw_ddb_entry = NULL; 3205 dma_addr_t fw_ddb_entry_dma; 3206 uint32_t mbx_sts = 0; 3207 int ret = 0; 3208 int status = QLA_SUCCESS; 3209 3210 sess = cls_sess->dd_data; 3211 ddb_entry = sess->dd_data; 3212 ha = ddb_entry->ha; 3213 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3214 cls_sess->sid, cls_conn->cid)); 3215 3216 /* Check if we have matching FW DDB, if yes then do not 3217 * login to this target. This could cause target to logout previous 3218 * connection 3219 */ 3220 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3221 if (ret == QLA_SUCCESS) { 3222 ql4_printk(KERN_INFO, ha, 3223 "Session already exist in FW.\n"); 3224 ret = -EEXIST; 3225 goto exit_conn_start; 3226 } 3227 3228 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3229 &fw_ddb_entry_dma, GFP_KERNEL); 3230 if (!fw_ddb_entry) { 3231 ql4_printk(KERN_ERR, ha, 3232 "%s: Unable to allocate dma buffer\n", __func__); 3233 ret = -ENOMEM; 3234 goto exit_conn_start; 3235 } 3236 3237 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3238 if (ret) { 3239 /* If iscsid is stopped and started then no need to do 3240 * set param again since ddb state will be already 3241 * active and FW does not allow set ddb to an 3242 * active session. 3243 */ 3244 if (mbx_sts) 3245 if (ddb_entry->fw_ddb_device_state == 3246 DDB_DS_SESSION_ACTIVE) { 3247 ddb_entry->unblock_sess(ddb_entry->sess); 3248 goto exit_set_param; 3249 } 3250 3251 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3252 __func__, ddb_entry->fw_ddb_index); 3253 goto exit_conn_start; 3254 } 3255 3256 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3257 if (status == QLA_ERROR) { 3258 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3259 sess->targetname); 3260 ret = -EINVAL; 3261 goto exit_conn_start; 3262 } 3263 3264 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3265 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3266 3267 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3268 ddb_entry->fw_ddb_device_state)); 3269 3270 exit_set_param: 3271 ret = 0; 3272 3273 exit_conn_start: 3274 if (fw_ddb_entry) 3275 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3276 fw_ddb_entry, fw_ddb_entry_dma); 3277 return ret; 3278 } 3279 3280 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3281 { 3282 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3283 struct iscsi_session *sess; 3284 struct scsi_qla_host *ha; 3285 struct ddb_entry *ddb_entry; 3286 int options; 3287 3288 sess = cls_sess->dd_data; 3289 ddb_entry = sess->dd_data; 3290 ha = ddb_entry->ha; 3291 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3292 cls_conn->cid)); 3293 3294 options = LOGOUT_OPTION_CLOSE_SESSION; 3295 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3296 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3297 } 3298 3299 static void qla4xxx_task_work(struct work_struct *wdata) 3300 { 3301 struct ql4_task_data *task_data; 3302 struct scsi_qla_host *ha; 3303 struct passthru_status *sts; 3304 struct iscsi_task *task; 3305 struct iscsi_hdr *hdr; 3306 uint8_t *data; 3307 uint32_t data_len; 3308 struct iscsi_conn *conn; 3309 int hdr_len; 3310 itt_t itt; 3311 3312 task_data = container_of(wdata, struct ql4_task_data, task_work); 3313 ha = task_data->ha; 3314 task = task_data->task; 3315 sts = &task_data->sts; 3316 hdr_len = sizeof(struct iscsi_hdr); 3317 3318 DEBUG3(printk(KERN_INFO "Status returned\n")); 3319 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3320 DEBUG3(printk(KERN_INFO "Response buffer")); 3321 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3322 3323 conn = task->conn; 3324 3325 switch (sts->completionStatus) { 3326 case PASSTHRU_STATUS_COMPLETE: 3327 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3328 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3329 itt = sts->handle; 3330 hdr->itt = itt; 3331 data = task_data->resp_buffer + hdr_len; 3332 data_len = task_data->resp_len - hdr_len; 3333 iscsi_complete_pdu(conn, hdr, data, data_len); 3334 break; 3335 default: 3336 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3337 sts->completionStatus); 3338 break; 3339 } 3340 return; 3341 } 3342 3343 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3344 { 3345 struct ql4_task_data *task_data; 3346 struct iscsi_session *sess; 3347 struct ddb_entry *ddb_entry; 3348 struct scsi_qla_host *ha; 3349 int hdr_len; 3350 3351 sess = task->conn->session; 3352 ddb_entry = sess->dd_data; 3353 ha = ddb_entry->ha; 3354 task_data = task->dd_data; 3355 memset(task_data, 0, sizeof(struct ql4_task_data)); 3356 3357 if (task->sc) { 3358 ql4_printk(KERN_INFO, ha, 3359 "%s: SCSI Commands not implemented\n", __func__); 3360 return -EINVAL; 3361 } 3362 3363 hdr_len = sizeof(struct iscsi_hdr); 3364 task_data->ha = ha; 3365 task_data->task = task; 3366 3367 if (task->data_count) { 3368 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3369 task->data_count, 3370 PCI_DMA_TODEVICE); 3371 } 3372 3373 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3374 __func__, task->conn->max_recv_dlength, hdr_len)); 3375 3376 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3377 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3378 task_data->resp_len, 3379 &task_data->resp_dma, 3380 GFP_ATOMIC); 3381 if (!task_data->resp_buffer) 3382 goto exit_alloc_pdu; 3383 3384 task_data->req_len = task->data_count + hdr_len; 3385 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3386 task_data->req_len, 3387 &task_data->req_dma, 3388 GFP_ATOMIC); 3389 if (!task_data->req_buffer) 3390 goto exit_alloc_pdu; 3391 3392 task->hdr = task_data->req_buffer; 3393 3394 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3395 3396 return 0; 3397 3398 exit_alloc_pdu: 3399 if (task_data->resp_buffer) 3400 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3401 task_data->resp_buffer, task_data->resp_dma); 3402 3403 if (task_data->req_buffer) 3404 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3405 task_data->req_buffer, task_data->req_dma); 3406 return -ENOMEM; 3407 } 3408 3409 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3410 { 3411 struct ql4_task_data *task_data; 3412 struct iscsi_session *sess; 3413 struct ddb_entry *ddb_entry; 3414 struct scsi_qla_host *ha; 3415 int hdr_len; 3416 3417 hdr_len = sizeof(struct iscsi_hdr); 3418 sess = task->conn->session; 3419 ddb_entry = sess->dd_data; 3420 ha = ddb_entry->ha; 3421 task_data = task->dd_data; 3422 3423 if (task->data_count) { 3424 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3425 task->data_count, PCI_DMA_TODEVICE); 3426 } 3427 3428 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3429 __func__, task->conn->max_recv_dlength, hdr_len)); 3430 3431 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3432 task_data->resp_buffer, task_data->resp_dma); 3433 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3434 task_data->req_buffer, task_data->req_dma); 3435 return; 3436 } 3437 3438 static int qla4xxx_task_xmit(struct iscsi_task *task) 3439 { 3440 struct scsi_cmnd *sc = task->sc; 3441 struct iscsi_session *sess = task->conn->session; 3442 struct ddb_entry *ddb_entry = sess->dd_data; 3443 struct scsi_qla_host *ha = ddb_entry->ha; 3444 3445 if (!sc) 3446 return qla4xxx_send_passthru0(task); 3447 3448 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3449 __func__); 3450 return -ENOSYS; 3451 } 3452 3453 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3454 struct iscsi_bus_flash_conn *conn, 3455 struct dev_db_entry *fw_ddb_entry) 3456 { 3457 unsigned long options = 0; 3458 int rc = 0; 3459 3460 options = le16_to_cpu(fw_ddb_entry->options); 3461 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3462 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3463 rc = iscsi_switch_str_param(&sess->portal_type, 3464 PORTAL_TYPE_IPV6); 3465 if (rc) 3466 goto exit_copy; 3467 } else { 3468 rc = iscsi_switch_str_param(&sess->portal_type, 3469 PORTAL_TYPE_IPV4); 3470 if (rc) 3471 goto exit_copy; 3472 } 3473 3474 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3475 &options); 3476 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3477 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3478 3479 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3480 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3481 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3482 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3483 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3484 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3485 &options); 3486 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3487 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3488 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3489 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3490 &options); 3491 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3492 sess->discovery_auth_optional = 3493 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3494 if (test_bit(ISCSIOPT_ERL1, &options)) 3495 sess->erl |= BIT_1; 3496 if (test_bit(ISCSIOPT_ERL0, &options)) 3497 sess->erl |= BIT_0; 3498 3499 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3500 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3501 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3502 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3503 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3504 conn->tcp_timer_scale |= BIT_3; 3505 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3506 conn->tcp_timer_scale |= BIT_2; 3507 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3508 conn->tcp_timer_scale |= BIT_1; 3509 3510 conn->tcp_timer_scale >>= 1; 3511 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3512 3513 options = le16_to_cpu(fw_ddb_entry->ip_options); 3514 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3515 3516 conn->max_recv_dlength = BYTE_UNITS * 3517 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3518 conn->max_xmit_dlength = BYTE_UNITS * 3519 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3520 sess->first_burst = BYTE_UNITS * 3521 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3522 sess->max_burst = BYTE_UNITS * 3523 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3524 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3525 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3526 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3527 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3528 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3529 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3530 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3531 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3532 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3533 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3534 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3535 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3536 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3537 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3538 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3539 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3540 3541 sess->default_taskmgmt_timeout = 3542 le16_to_cpu(fw_ddb_entry->def_timeout); 3543 conn->port = le16_to_cpu(fw_ddb_entry->port); 3544 3545 options = le16_to_cpu(fw_ddb_entry->options); 3546 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3547 if (!conn->ipaddress) { 3548 rc = -ENOMEM; 3549 goto exit_copy; 3550 } 3551 3552 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3553 if (!conn->redirect_ipaddr) { 3554 rc = -ENOMEM; 3555 goto exit_copy; 3556 } 3557 3558 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3559 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3560 3561 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3562 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3563 3564 conn->link_local_ipv6_addr = kmemdup( 3565 fw_ddb_entry->link_local_ipv6_addr, 3566 IPv6_ADDR_LEN, GFP_KERNEL); 3567 if (!conn->link_local_ipv6_addr) { 3568 rc = -ENOMEM; 3569 goto exit_copy; 3570 } 3571 } else { 3572 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3573 } 3574 3575 if (fw_ddb_entry->iscsi_name[0]) { 3576 rc = iscsi_switch_str_param(&sess->targetname, 3577 (char *)fw_ddb_entry->iscsi_name); 3578 if (rc) 3579 goto exit_copy; 3580 } 3581 3582 if (fw_ddb_entry->iscsi_alias[0]) { 3583 rc = iscsi_switch_str_param(&sess->targetalias, 3584 (char *)fw_ddb_entry->iscsi_alias); 3585 if (rc) 3586 goto exit_copy; 3587 } 3588 3589 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3590 3591 exit_copy: 3592 return rc; 3593 } 3594 3595 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3596 struct iscsi_bus_flash_conn *conn, 3597 struct dev_db_entry *fw_ddb_entry) 3598 { 3599 uint16_t options; 3600 int rc = 0; 3601 3602 options = le16_to_cpu(fw_ddb_entry->options); 3603 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3604 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3605 options |= BIT_8; 3606 else 3607 options &= ~BIT_8; 3608 3609 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3610 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3611 SET_BITVAL(sess->entry_state, options, BIT_3); 3612 fw_ddb_entry->options = cpu_to_le16(options); 3613 3614 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3615 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3616 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3617 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3618 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3619 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3620 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3621 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3622 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3623 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3624 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3625 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3626 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3627 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3628 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3629 3630 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3631 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3632 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3633 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3634 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3635 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3636 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3637 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3638 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3639 3640 options = le16_to_cpu(fw_ddb_entry->ip_options); 3641 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3642 fw_ddb_entry->ip_options = cpu_to_le16(options); 3643 3644 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3645 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3646 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3647 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3648 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3649 fw_ddb_entry->iscsi_first_burst_len = 3650 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3651 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3652 BYTE_UNITS); 3653 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3654 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3655 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3656 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3657 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3658 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3659 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3660 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3661 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3662 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3663 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3664 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3665 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3666 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3667 fw_ddb_entry->port = cpu_to_le16(conn->port); 3668 fw_ddb_entry->def_timeout = 3669 cpu_to_le16(sess->default_taskmgmt_timeout); 3670 3671 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3672 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3673 else 3674 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3675 3676 if (conn->ipaddress) 3677 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3678 sizeof(fw_ddb_entry->ip_addr)); 3679 3680 if (conn->redirect_ipaddr) 3681 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3682 sizeof(fw_ddb_entry->tgt_addr)); 3683 3684 if (conn->link_local_ipv6_addr) 3685 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3686 conn->link_local_ipv6_addr, 3687 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3688 3689 if (sess->targetname) 3690 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3691 sizeof(fw_ddb_entry->iscsi_name)); 3692 3693 if (sess->targetalias) 3694 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3695 sizeof(fw_ddb_entry->iscsi_alias)); 3696 3697 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3698 3699 return rc; 3700 } 3701 3702 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3703 struct iscsi_session *sess, 3704 struct dev_db_entry *fw_ddb_entry) 3705 { 3706 unsigned long options = 0; 3707 uint16_t ddb_link; 3708 uint16_t disc_parent; 3709 char ip_addr[DDB_IPADDR_LEN]; 3710 3711 options = le16_to_cpu(fw_ddb_entry->options); 3712 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3713 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3714 &options); 3715 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3716 3717 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3718 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3719 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3720 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3721 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3722 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3723 &options); 3724 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3725 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3726 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3727 &options); 3728 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3729 sess->discovery_auth_optional = 3730 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3731 if (test_bit(ISCSIOPT_ERL1, &options)) 3732 sess->erl |= BIT_1; 3733 if (test_bit(ISCSIOPT_ERL0, &options)) 3734 sess->erl |= BIT_0; 3735 3736 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3737 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3738 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3739 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3740 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3741 conn->tcp_timer_scale |= BIT_3; 3742 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3743 conn->tcp_timer_scale |= BIT_2; 3744 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3745 conn->tcp_timer_scale |= BIT_1; 3746 3747 conn->tcp_timer_scale >>= 1; 3748 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3749 3750 options = le16_to_cpu(fw_ddb_entry->ip_options); 3751 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3752 3753 conn->max_recv_dlength = BYTE_UNITS * 3754 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3755 conn->max_xmit_dlength = BYTE_UNITS * 3756 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3757 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3758 sess->first_burst = BYTE_UNITS * 3759 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3760 sess->max_burst = BYTE_UNITS * 3761 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3762 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3763 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3764 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3765 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3766 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3767 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3768 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3769 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3770 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3771 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3772 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3773 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3774 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3775 3776 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3777 if (ddb_link == DDB_ISNS) 3778 disc_parent = ISCSI_DISC_PARENT_ISNS; 3779 else if (ddb_link == DDB_NO_LINK) 3780 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3781 else if (ddb_link < MAX_DDB_ENTRIES) 3782 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3783 else 3784 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3785 3786 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3787 iscsi_get_discovery_parent_name(disc_parent), 0); 3788 3789 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3790 (char *)fw_ddb_entry->iscsi_alias, 0); 3791 3792 options = le16_to_cpu(fw_ddb_entry->options); 3793 if (options & DDB_OPT_IPV6_DEVICE) { 3794 memset(ip_addr, 0, sizeof(ip_addr)); 3795 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3796 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3797 (char *)ip_addr, 0); 3798 } 3799 } 3800 3801 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3802 struct dev_db_entry *fw_ddb_entry, 3803 struct iscsi_cls_session *cls_sess, 3804 struct iscsi_cls_conn *cls_conn) 3805 { 3806 int buflen = 0; 3807 struct iscsi_session *sess; 3808 struct ddb_entry *ddb_entry; 3809 struct ql4_chap_table chap_tbl; 3810 struct iscsi_conn *conn; 3811 char ip_addr[DDB_IPADDR_LEN]; 3812 uint16_t options = 0; 3813 3814 sess = cls_sess->dd_data; 3815 ddb_entry = sess->dd_data; 3816 conn = cls_conn->dd_data; 3817 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3818 3819 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3820 3821 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3822 3823 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3824 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3825 3826 memset(ip_addr, 0, sizeof(ip_addr)); 3827 options = le16_to_cpu(fw_ddb_entry->options); 3828 if (options & DDB_OPT_IPV6_DEVICE) { 3829 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3830 3831 memset(ip_addr, 0, sizeof(ip_addr)); 3832 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3833 } else { 3834 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3835 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3836 } 3837 3838 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3839 (char *)ip_addr, buflen); 3840 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3841 (char *)fw_ddb_entry->iscsi_name, buflen); 3842 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3843 (char *)ha->name_string, buflen); 3844 3845 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3846 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3847 chap_tbl.secret, 3848 ddb_entry->chap_tbl_idx)) { 3849 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3850 (char *)chap_tbl.name, 3851 strlen((char *)chap_tbl.name)); 3852 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3853 (char *)chap_tbl.secret, 3854 chap_tbl.secret_len); 3855 } 3856 } 3857 } 3858 3859 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3860 struct ddb_entry *ddb_entry) 3861 { 3862 struct iscsi_cls_session *cls_sess; 3863 struct iscsi_cls_conn *cls_conn; 3864 uint32_t ddb_state; 3865 dma_addr_t fw_ddb_entry_dma; 3866 struct dev_db_entry *fw_ddb_entry; 3867 3868 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3869 &fw_ddb_entry_dma, GFP_KERNEL); 3870 if (!fw_ddb_entry) { 3871 ql4_printk(KERN_ERR, ha, 3872 "%s: Unable to allocate dma buffer\n", __func__); 3873 goto exit_session_conn_fwddb_param; 3874 } 3875 3876 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3877 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3878 NULL, NULL, NULL) == QLA_ERROR) { 3879 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3880 "get_ddb_entry for fw_ddb_index %d\n", 3881 ha->host_no, __func__, 3882 ddb_entry->fw_ddb_index)); 3883 goto exit_session_conn_fwddb_param; 3884 } 3885 3886 cls_sess = ddb_entry->sess; 3887 3888 cls_conn = ddb_entry->conn; 3889 3890 /* Update params */ 3891 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3892 3893 exit_session_conn_fwddb_param: 3894 if (fw_ddb_entry) 3895 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3896 fw_ddb_entry, fw_ddb_entry_dma); 3897 } 3898 3899 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3900 struct ddb_entry *ddb_entry) 3901 { 3902 struct iscsi_cls_session *cls_sess; 3903 struct iscsi_cls_conn *cls_conn; 3904 struct iscsi_session *sess; 3905 struct iscsi_conn *conn; 3906 uint32_t ddb_state; 3907 dma_addr_t fw_ddb_entry_dma; 3908 struct dev_db_entry *fw_ddb_entry; 3909 3910 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3911 &fw_ddb_entry_dma, GFP_KERNEL); 3912 if (!fw_ddb_entry) { 3913 ql4_printk(KERN_ERR, ha, 3914 "%s: Unable to allocate dma buffer\n", __func__); 3915 goto exit_session_conn_param; 3916 } 3917 3918 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3919 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3920 NULL, NULL, NULL) == QLA_ERROR) { 3921 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3922 "get_ddb_entry for fw_ddb_index %d\n", 3923 ha->host_no, __func__, 3924 ddb_entry->fw_ddb_index)); 3925 goto exit_session_conn_param; 3926 } 3927 3928 cls_sess = ddb_entry->sess; 3929 sess = cls_sess->dd_data; 3930 3931 cls_conn = ddb_entry->conn; 3932 conn = cls_conn->dd_data; 3933 3934 /* Update timers after login */ 3935 ddb_entry->default_relogin_timeout = 3936 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3937 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3938 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3939 ddb_entry->default_time2wait = 3940 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3941 3942 /* Update params */ 3943 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3944 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3945 3946 memcpy(sess->initiatorname, ha->name_string, 3947 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3948 3949 exit_session_conn_param: 3950 if (fw_ddb_entry) 3951 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3952 fw_ddb_entry, fw_ddb_entry_dma); 3953 } 3954 3955 /* 3956 * Timer routines 3957 */ 3958 3959 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, 3960 unsigned long interval) 3961 { 3962 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 3963 __func__, ha->host->host_no)); 3964 init_timer(&ha->timer); 3965 ha->timer.expires = jiffies + interval * HZ; 3966 ha->timer.data = (unsigned long)ha; 3967 ha->timer.function = (void (*)(unsigned long))func; 3968 add_timer(&ha->timer); 3969 ha->timer_active = 1; 3970 } 3971 3972 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 3973 { 3974 del_timer_sync(&ha->timer); 3975 ha->timer_active = 0; 3976 } 3977 3978 /*** 3979 * qla4xxx_mark_device_missing - blocks the session 3980 * @cls_session: Pointer to the session to be blocked 3981 * @ddb_entry: Pointer to device database entry 3982 * 3983 * This routine marks a device missing and close connection. 3984 **/ 3985 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 3986 { 3987 iscsi_block_session(cls_session); 3988 } 3989 3990 /** 3991 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 3992 * @ha: Pointer to host adapter structure. 3993 * 3994 * This routine marks a device missing and resets the relogin retry count. 3995 **/ 3996 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 3997 { 3998 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 3999 } 4000 4001 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4002 struct ddb_entry *ddb_entry, 4003 struct scsi_cmnd *cmd) 4004 { 4005 struct srb *srb; 4006 4007 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4008 if (!srb) 4009 return srb; 4010 4011 kref_init(&srb->srb_ref); 4012 srb->ha = ha; 4013 srb->ddb = ddb_entry; 4014 srb->cmd = cmd; 4015 srb->flags = 0; 4016 CMD_SP(cmd) = (void *)srb; 4017 4018 return srb; 4019 } 4020 4021 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4022 { 4023 struct scsi_cmnd *cmd = srb->cmd; 4024 4025 if (srb->flags & SRB_DMA_VALID) { 4026 scsi_dma_unmap(cmd); 4027 srb->flags &= ~SRB_DMA_VALID; 4028 } 4029 CMD_SP(cmd) = NULL; 4030 } 4031 4032 void qla4xxx_srb_compl(struct kref *ref) 4033 { 4034 struct srb *srb = container_of(ref, struct srb, srb_ref); 4035 struct scsi_cmnd *cmd = srb->cmd; 4036 struct scsi_qla_host *ha = srb->ha; 4037 4038 qla4xxx_srb_free_dma(ha, srb); 4039 4040 mempool_free(srb, ha->srb_mempool); 4041 4042 cmd->scsi_done(cmd); 4043 } 4044 4045 /** 4046 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4047 * @host: scsi host 4048 * @cmd: Pointer to Linux's SCSI command structure 4049 * 4050 * Remarks: 4051 * This routine is invoked by Linux to send a SCSI command to the driver. 4052 * The mid-level driver tries to ensure that queuecommand never gets 4053 * invoked concurrently with itself or the interrupt handler (although 4054 * the interrupt handler may call this routine as part of request- 4055 * completion handling). Unfortunely, it sometimes calls the scheduler 4056 * in interrupt context which is a big NO! NO!. 4057 **/ 4058 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4059 { 4060 struct scsi_qla_host *ha = to_qla_host(host); 4061 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4062 struct iscsi_cls_session *sess = ddb_entry->sess; 4063 struct srb *srb; 4064 int rval; 4065 4066 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4067 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4068 cmd->result = DID_NO_CONNECT << 16; 4069 else 4070 cmd->result = DID_REQUEUE << 16; 4071 goto qc_fail_command; 4072 } 4073 4074 if (!sess) { 4075 cmd->result = DID_IMM_RETRY << 16; 4076 goto qc_fail_command; 4077 } 4078 4079 rval = iscsi_session_chkready(sess); 4080 if (rval) { 4081 cmd->result = rval; 4082 goto qc_fail_command; 4083 } 4084 4085 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4086 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4087 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4088 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4089 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4090 !test_bit(AF_ONLINE, &ha->flags) || 4091 !test_bit(AF_LINK_UP, &ha->flags) || 4092 test_bit(AF_LOOPBACK, &ha->flags) || 4093 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4094 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4095 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4096 goto qc_host_busy; 4097 4098 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4099 if (!srb) 4100 goto qc_host_busy; 4101 4102 rval = qla4xxx_send_command_to_isp(ha, srb); 4103 if (rval != QLA_SUCCESS) 4104 goto qc_host_busy_free_sp; 4105 4106 return 0; 4107 4108 qc_host_busy_free_sp: 4109 qla4xxx_srb_free_dma(ha, srb); 4110 mempool_free(srb, ha->srb_mempool); 4111 4112 qc_host_busy: 4113 return SCSI_MLQUEUE_HOST_BUSY; 4114 4115 qc_fail_command: 4116 cmd->scsi_done(cmd); 4117 4118 return 0; 4119 } 4120 4121 /** 4122 * qla4xxx_mem_free - frees memory allocated to adapter 4123 * @ha: Pointer to host adapter structure. 4124 * 4125 * Frees memory previously allocated by qla4xxx_mem_alloc 4126 **/ 4127 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4128 { 4129 if (ha->queues) 4130 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4131 ha->queues_dma); 4132 4133 if (ha->fw_dump) 4134 vfree(ha->fw_dump); 4135 4136 ha->queues_len = 0; 4137 ha->queues = NULL; 4138 ha->queues_dma = 0; 4139 ha->request_ring = NULL; 4140 ha->request_dma = 0; 4141 ha->response_ring = NULL; 4142 ha->response_dma = 0; 4143 ha->shadow_regs = NULL; 4144 ha->shadow_regs_dma = 0; 4145 ha->fw_dump = NULL; 4146 ha->fw_dump_size = 0; 4147 4148 /* Free srb pool. */ 4149 if (ha->srb_mempool) 4150 mempool_destroy(ha->srb_mempool); 4151 4152 ha->srb_mempool = NULL; 4153 4154 if (ha->chap_dma_pool) 4155 dma_pool_destroy(ha->chap_dma_pool); 4156 4157 if (ha->chap_list) 4158 vfree(ha->chap_list); 4159 ha->chap_list = NULL; 4160 4161 if (ha->fw_ddb_dma_pool) 4162 dma_pool_destroy(ha->fw_ddb_dma_pool); 4163 4164 /* release io space registers */ 4165 if (is_qla8022(ha)) { 4166 if (ha->nx_pcibase) 4167 iounmap( 4168 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4169 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4170 if (ha->nx_pcibase) 4171 iounmap( 4172 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4173 } else if (ha->reg) { 4174 iounmap(ha->reg); 4175 } 4176 4177 if (ha->reset_tmplt.buff) 4178 vfree(ha->reset_tmplt.buff); 4179 4180 pci_release_regions(ha->pdev); 4181 } 4182 4183 /** 4184 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4185 * @ha: Pointer to host adapter structure 4186 * 4187 * Allocates DMA memory for request and response queues. Also allocates memory 4188 * for srbs. 4189 **/ 4190 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4191 { 4192 unsigned long align; 4193 4194 /* Allocate contiguous block of DMA memory for queues. */ 4195 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4196 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4197 sizeof(struct shadow_regs) + 4198 MEM_ALIGN_VALUE + 4199 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4200 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4201 &ha->queues_dma, GFP_KERNEL); 4202 if (ha->queues == NULL) { 4203 ql4_printk(KERN_WARNING, ha, 4204 "Memory Allocation failed - queues.\n"); 4205 4206 goto mem_alloc_error_exit; 4207 } 4208 memset(ha->queues, 0, ha->queues_len); 4209 4210 /* 4211 * As per RISC alignment requirements -- the bus-address must be a 4212 * multiple of the request-ring size (in bytes). 4213 */ 4214 align = 0; 4215 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4216 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4217 (MEM_ALIGN_VALUE - 1)); 4218 4219 /* Update request and response queue pointers. */ 4220 ha->request_dma = ha->queues_dma + align; 4221 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4222 ha->response_dma = ha->queues_dma + align + 4223 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4224 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4225 (REQUEST_QUEUE_DEPTH * 4226 QUEUE_SIZE)); 4227 ha->shadow_regs_dma = ha->queues_dma + align + 4228 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4229 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4230 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4231 (REQUEST_QUEUE_DEPTH * 4232 QUEUE_SIZE) + 4233 (RESPONSE_QUEUE_DEPTH * 4234 QUEUE_SIZE)); 4235 4236 /* Allocate memory for srb pool. */ 4237 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4238 mempool_free_slab, srb_cachep); 4239 if (ha->srb_mempool == NULL) { 4240 ql4_printk(KERN_WARNING, ha, 4241 "Memory Allocation failed - SRB Pool.\n"); 4242 4243 goto mem_alloc_error_exit; 4244 } 4245 4246 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4247 CHAP_DMA_BLOCK_SIZE, 8, 0); 4248 4249 if (ha->chap_dma_pool == NULL) { 4250 ql4_printk(KERN_WARNING, ha, 4251 "%s: chap_dma_pool allocation failed..\n", __func__); 4252 goto mem_alloc_error_exit; 4253 } 4254 4255 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4256 DDB_DMA_BLOCK_SIZE, 8, 0); 4257 4258 if (ha->fw_ddb_dma_pool == NULL) { 4259 ql4_printk(KERN_WARNING, ha, 4260 "%s: fw_ddb_dma_pool allocation failed..\n", 4261 __func__); 4262 goto mem_alloc_error_exit; 4263 } 4264 4265 return QLA_SUCCESS; 4266 4267 mem_alloc_error_exit: 4268 qla4xxx_mem_free(ha); 4269 return QLA_ERROR; 4270 } 4271 4272 /** 4273 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4274 * @ha: adapter block pointer. 4275 * 4276 * Note: The caller should not hold the idc lock. 4277 **/ 4278 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4279 { 4280 uint32_t temp, temp_state, temp_val; 4281 int status = QLA_SUCCESS; 4282 4283 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4284 4285 temp_state = qla82xx_get_temp_state(temp); 4286 temp_val = qla82xx_get_temp_val(temp); 4287 4288 if (temp_state == QLA82XX_TEMP_PANIC) { 4289 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4290 " exceeds maximum allowed. Hardware has been shut" 4291 " down.\n", temp_val); 4292 status = QLA_ERROR; 4293 } else if (temp_state == QLA82XX_TEMP_WARN) { 4294 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4295 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4296 " degrees C exceeds operating range." 4297 " Immediate action needed.\n", temp_val); 4298 } else { 4299 if (ha->temperature == QLA82XX_TEMP_WARN) 4300 ql4_printk(KERN_INFO, ha, "Device temperature is" 4301 " now %d degrees C in normal range.\n", 4302 temp_val); 4303 } 4304 ha->temperature = temp_state; 4305 return status; 4306 } 4307 4308 /** 4309 * qla4_8xxx_check_fw_alive - Check firmware health 4310 * @ha: Pointer to host adapter structure. 4311 * 4312 * Context: Interrupt 4313 **/ 4314 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4315 { 4316 uint32_t fw_heartbeat_counter; 4317 int status = QLA_SUCCESS; 4318 4319 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4320 QLA8XXX_PEG_ALIVE_COUNTER); 4321 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4322 if (fw_heartbeat_counter == 0xffffffff) { 4323 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4324 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4325 ha->host_no, __func__)); 4326 return status; 4327 } 4328 4329 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4330 ha->seconds_since_last_heartbeat++; 4331 /* FW not alive after 2 seconds */ 4332 if (ha->seconds_since_last_heartbeat == 2) { 4333 ha->seconds_since_last_heartbeat = 0; 4334 qla4_8xxx_dump_peg_reg(ha); 4335 status = QLA_ERROR; 4336 } 4337 } else 4338 ha->seconds_since_last_heartbeat = 0; 4339 4340 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4341 return status; 4342 } 4343 4344 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4345 { 4346 uint32_t halt_status; 4347 int halt_status_unrecoverable = 0; 4348 4349 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4350 4351 if (is_qla8022(ha)) { 4352 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4353 __func__); 4354 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4355 CRB_NIU_XG_PAUSE_CTL_P0 | 4356 CRB_NIU_XG_PAUSE_CTL_P1); 4357 4358 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4359 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4360 __func__); 4361 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4362 halt_status_unrecoverable = 1; 4363 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4364 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4365 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4366 __func__); 4367 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4368 halt_status_unrecoverable = 1; 4369 } 4370 4371 /* 4372 * Since we cannot change dev_state in interrupt context, 4373 * set appropriate DPC flag then wakeup DPC 4374 */ 4375 if (halt_status_unrecoverable) { 4376 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4377 } else { 4378 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4379 __func__); 4380 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4381 } 4382 qla4xxx_mailbox_premature_completion(ha); 4383 qla4xxx_wake_dpc(ha); 4384 } 4385 4386 /** 4387 * qla4_8xxx_watchdog - Poll dev state 4388 * @ha: Pointer to host adapter structure. 4389 * 4390 * Context: Interrupt 4391 **/ 4392 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4393 { 4394 uint32_t dev_state; 4395 uint32_t idc_ctrl; 4396 4397 if (is_qla8032(ha) && 4398 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4399 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4400 __func__, ha->func_num); 4401 4402 /* don't poll if reset is going on */ 4403 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4404 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4405 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4406 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4407 4408 if (qla4_8xxx_check_temp(ha)) { 4409 if (is_qla8022(ha)) { 4410 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4411 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4412 CRB_NIU_XG_PAUSE_CTL_P0 | 4413 CRB_NIU_XG_PAUSE_CTL_P1); 4414 } 4415 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4416 qla4xxx_wake_dpc(ha); 4417 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4418 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4419 4420 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4421 __func__); 4422 4423 if (is_qla8032(ha) || is_qla8042(ha)) { 4424 idc_ctrl = qla4_83xx_rd_reg(ha, 4425 QLA83XX_IDC_DRV_CTRL); 4426 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4427 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4428 __func__); 4429 qla4xxx_mailbox_premature_completion( 4430 ha); 4431 } 4432 } 4433 4434 if ((is_qla8032(ha) || is_qla8042(ha)) || 4435 (is_qla8022(ha) && !ql4xdontresethba)) { 4436 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4437 qla4xxx_wake_dpc(ha); 4438 } 4439 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4440 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4441 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4442 __func__); 4443 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4444 qla4xxx_wake_dpc(ha); 4445 } else { 4446 /* Check firmware health */ 4447 if (qla4_8xxx_check_fw_alive(ha)) 4448 qla4_8xxx_process_fw_error(ha); 4449 } 4450 } 4451 } 4452 4453 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4454 { 4455 struct iscsi_session *sess; 4456 struct ddb_entry *ddb_entry; 4457 struct scsi_qla_host *ha; 4458 4459 sess = cls_sess->dd_data; 4460 ddb_entry = sess->dd_data; 4461 ha = ddb_entry->ha; 4462 4463 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4464 return; 4465 4466 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4467 !iscsi_is_session_online(cls_sess)) { 4468 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4469 INVALID_ENTRY) { 4470 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4471 0) { 4472 atomic_set(&ddb_entry->retry_relogin_timer, 4473 INVALID_ENTRY); 4474 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4475 set_bit(DF_RELOGIN, &ddb_entry->flags); 4476 DEBUG2(ql4_printk(KERN_INFO, ha, 4477 "%s: index [%d] login device\n", 4478 __func__, ddb_entry->fw_ddb_index)); 4479 } else 4480 atomic_dec(&ddb_entry->retry_relogin_timer); 4481 } 4482 } 4483 4484 /* Wait for relogin to timeout */ 4485 if (atomic_read(&ddb_entry->relogin_timer) && 4486 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4487 /* 4488 * If the relogin times out and the device is 4489 * still NOT ONLINE then try and relogin again. 4490 */ 4491 if (!iscsi_is_session_online(cls_sess)) { 4492 /* Reset retry relogin timer */ 4493 atomic_inc(&ddb_entry->relogin_retry_count); 4494 DEBUG2(ql4_printk(KERN_INFO, ha, 4495 "%s: index[%d] relogin timed out-retrying" 4496 " relogin (%d), retry (%d)\n", __func__, 4497 ddb_entry->fw_ddb_index, 4498 atomic_read(&ddb_entry->relogin_retry_count), 4499 ddb_entry->default_time2wait + 4)); 4500 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4501 atomic_set(&ddb_entry->retry_relogin_timer, 4502 ddb_entry->default_time2wait + 4); 4503 } 4504 } 4505 } 4506 4507 /** 4508 * qla4xxx_timer - checks every second for work to do. 4509 * @ha: Pointer to host adapter structure. 4510 **/ 4511 static void qla4xxx_timer(struct scsi_qla_host *ha) 4512 { 4513 int start_dpc = 0; 4514 uint16_t w; 4515 4516 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4517 4518 /* If we are in the middle of AER/EEH processing 4519 * skip any processing and reschedule the timer 4520 */ 4521 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4522 mod_timer(&ha->timer, jiffies + HZ); 4523 return; 4524 } 4525 4526 /* Hardware read to trigger an EEH error during mailbox waits. */ 4527 if (!pci_channel_offline(ha->pdev)) 4528 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4529 4530 if (is_qla80XX(ha)) 4531 qla4_8xxx_watchdog(ha); 4532 4533 if (is_qla40XX(ha)) { 4534 /* Check for heartbeat interval. */ 4535 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4536 ha->heartbeat_interval != 0) { 4537 ha->seconds_since_last_heartbeat++; 4538 if (ha->seconds_since_last_heartbeat > 4539 ha->heartbeat_interval + 2) 4540 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4541 } 4542 } 4543 4544 /* Process any deferred work. */ 4545 if (!list_empty(&ha->work_list)) 4546 start_dpc++; 4547 4548 /* Wakeup the dpc routine for this adapter, if needed. */ 4549 if (start_dpc || 4550 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4551 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4552 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4553 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4554 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4555 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4556 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4557 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4558 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4559 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4560 test_bit(DPC_AEN, &ha->dpc_flags)) { 4561 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4562 " - dpc flags = 0x%lx\n", 4563 ha->host_no, __func__, ha->dpc_flags)); 4564 qla4xxx_wake_dpc(ha); 4565 } 4566 4567 /* Reschedule timer thread to call us back in one second */ 4568 mod_timer(&ha->timer, jiffies + HZ); 4569 4570 DEBUG2(ha->seconds_since_last_intr++); 4571 } 4572 4573 /** 4574 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4575 * @ha: Pointer to host adapter structure. 4576 * 4577 * This routine stalls the driver until all outstanding commands are returned. 4578 * Caller must release the Hardware Lock prior to calling this routine. 4579 **/ 4580 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4581 { 4582 uint32_t index = 0; 4583 unsigned long flags; 4584 struct scsi_cmnd *cmd; 4585 unsigned long wtime; 4586 uint32_t wtmo; 4587 4588 if (is_qla40XX(ha)) 4589 wtmo = WAIT_CMD_TOV; 4590 else 4591 wtmo = ha->nx_reset_timeout / 2; 4592 4593 wtime = jiffies + (wtmo * HZ); 4594 4595 DEBUG2(ql4_printk(KERN_INFO, ha, 4596 "Wait up to %u seconds for cmds to complete\n", 4597 wtmo)); 4598 4599 while (!time_after_eq(jiffies, wtime)) { 4600 spin_lock_irqsave(&ha->hardware_lock, flags); 4601 /* Find a command that hasn't completed. */ 4602 for (index = 0; index < ha->host->can_queue; index++) { 4603 cmd = scsi_host_find_tag(ha->host, index); 4604 /* 4605 * We cannot just check if the index is valid, 4606 * becase if we are run from the scsi eh, then 4607 * the scsi/block layer is going to prevent 4608 * the tag from being released. 4609 */ 4610 if (cmd != NULL && CMD_SP(cmd)) 4611 break; 4612 } 4613 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4614 4615 /* If No Commands are pending, wait is complete */ 4616 if (index == ha->host->can_queue) 4617 return QLA_SUCCESS; 4618 4619 msleep(1000); 4620 } 4621 /* If we timed out on waiting for commands to come back 4622 * return ERROR. */ 4623 return QLA_ERROR; 4624 } 4625 4626 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4627 { 4628 uint32_t ctrl_status; 4629 unsigned long flags = 0; 4630 4631 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4632 4633 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4634 return QLA_ERROR; 4635 4636 spin_lock_irqsave(&ha->hardware_lock, flags); 4637 4638 /* 4639 * If the SCSI Reset Interrupt bit is set, clear it. 4640 * Otherwise, the Soft Reset won't work. 4641 */ 4642 ctrl_status = readw(&ha->reg->ctrl_status); 4643 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4644 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4645 4646 /* Issue Soft Reset */ 4647 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4648 readl(&ha->reg->ctrl_status); 4649 4650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4651 return QLA_SUCCESS; 4652 } 4653 4654 /** 4655 * qla4xxx_soft_reset - performs soft reset. 4656 * @ha: Pointer to host adapter structure. 4657 **/ 4658 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4659 { 4660 uint32_t max_wait_time; 4661 unsigned long flags = 0; 4662 int status; 4663 uint32_t ctrl_status; 4664 4665 status = qla4xxx_hw_reset(ha); 4666 if (status != QLA_SUCCESS) 4667 return status; 4668 4669 status = QLA_ERROR; 4670 /* Wait until the Network Reset Intr bit is cleared */ 4671 max_wait_time = RESET_INTR_TOV; 4672 do { 4673 spin_lock_irqsave(&ha->hardware_lock, flags); 4674 ctrl_status = readw(&ha->reg->ctrl_status); 4675 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4676 4677 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4678 break; 4679 4680 msleep(1000); 4681 } while ((--max_wait_time)); 4682 4683 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4684 DEBUG2(printk(KERN_WARNING 4685 "scsi%ld: Network Reset Intr not cleared by " 4686 "Network function, clearing it now!\n", 4687 ha->host_no)); 4688 spin_lock_irqsave(&ha->hardware_lock, flags); 4689 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4690 readl(&ha->reg->ctrl_status); 4691 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4692 } 4693 4694 /* Wait until the firmware tells us the Soft Reset is done */ 4695 max_wait_time = SOFT_RESET_TOV; 4696 do { 4697 spin_lock_irqsave(&ha->hardware_lock, flags); 4698 ctrl_status = readw(&ha->reg->ctrl_status); 4699 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4700 4701 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4702 status = QLA_SUCCESS; 4703 break; 4704 } 4705 4706 msleep(1000); 4707 } while ((--max_wait_time)); 4708 4709 /* 4710 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4711 * after the soft reset has taken place. 4712 */ 4713 spin_lock_irqsave(&ha->hardware_lock, flags); 4714 ctrl_status = readw(&ha->reg->ctrl_status); 4715 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4716 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4717 readl(&ha->reg->ctrl_status); 4718 } 4719 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4720 4721 /* If soft reset fails then most probably the bios on other 4722 * function is also enabled. 4723 * Since the initialization is sequential the other fn 4724 * wont be able to acknowledge the soft reset. 4725 * Issue a force soft reset to workaround this scenario. 4726 */ 4727 if (max_wait_time == 0) { 4728 /* Issue Force Soft Reset */ 4729 spin_lock_irqsave(&ha->hardware_lock, flags); 4730 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4731 readl(&ha->reg->ctrl_status); 4732 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4733 /* Wait until the firmware tells us the Soft Reset is done */ 4734 max_wait_time = SOFT_RESET_TOV; 4735 do { 4736 spin_lock_irqsave(&ha->hardware_lock, flags); 4737 ctrl_status = readw(&ha->reg->ctrl_status); 4738 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4739 4740 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4741 status = QLA_SUCCESS; 4742 break; 4743 } 4744 4745 msleep(1000); 4746 } while ((--max_wait_time)); 4747 } 4748 4749 return status; 4750 } 4751 4752 /** 4753 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4754 * @ha: Pointer to host adapter structure. 4755 * @res: returned scsi status 4756 * 4757 * This routine is called just prior to a HARD RESET to return all 4758 * outstanding commands back to the Operating System. 4759 * Caller should make sure that the following locks are released 4760 * before this calling routine: Hardware lock, and io_request_lock. 4761 **/ 4762 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4763 { 4764 struct srb *srb; 4765 int i; 4766 unsigned long flags; 4767 4768 spin_lock_irqsave(&ha->hardware_lock, flags); 4769 for (i = 0; i < ha->host->can_queue; i++) { 4770 srb = qla4xxx_del_from_active_array(ha, i); 4771 if (srb != NULL) { 4772 srb->cmd->result = res; 4773 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4774 } 4775 } 4776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4777 } 4778 4779 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4780 { 4781 clear_bit(AF_ONLINE, &ha->flags); 4782 4783 /* Disable the board */ 4784 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4785 4786 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4787 qla4xxx_mark_all_devices_missing(ha); 4788 clear_bit(AF_INIT_DONE, &ha->flags); 4789 } 4790 4791 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4792 { 4793 struct iscsi_session *sess; 4794 struct ddb_entry *ddb_entry; 4795 4796 sess = cls_session->dd_data; 4797 ddb_entry = sess->dd_data; 4798 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4799 4800 if (ddb_entry->ddb_type == FLASH_DDB) 4801 iscsi_block_session(ddb_entry->sess); 4802 else 4803 iscsi_session_failure(cls_session->dd_data, 4804 ISCSI_ERR_CONN_FAILED); 4805 } 4806 4807 /** 4808 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4809 * @ha: Pointer to host adapter structure. 4810 **/ 4811 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4812 { 4813 int status = QLA_ERROR; 4814 uint8_t reset_chip = 0; 4815 uint32_t dev_state; 4816 unsigned long wait; 4817 4818 /* Stall incoming I/O until we are done */ 4819 scsi_block_requests(ha->host); 4820 clear_bit(AF_ONLINE, &ha->flags); 4821 clear_bit(AF_LINK_UP, &ha->flags); 4822 4823 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4824 4825 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4826 4827 if ((is_qla8032(ha) || is_qla8042(ha)) && 4828 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4829 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4830 __func__); 4831 /* disable pause frame for ISP83xx */ 4832 qla4_83xx_disable_pause(ha); 4833 } 4834 4835 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4836 4837 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4838 reset_chip = 1; 4839 4840 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4841 * do not reset adapter, jump to initialize_adapter */ 4842 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4843 status = QLA_SUCCESS; 4844 goto recover_ha_init_adapter; 4845 } 4846 4847 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4848 * from eh_host_reset or ioctl module */ 4849 if (is_qla80XX(ha) && !reset_chip && 4850 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4851 4852 DEBUG2(ql4_printk(KERN_INFO, ha, 4853 "scsi%ld: %s - Performing stop_firmware...\n", 4854 ha->host_no, __func__)); 4855 status = ha->isp_ops->reset_firmware(ha); 4856 if (status == QLA_SUCCESS) { 4857 ha->isp_ops->disable_intrs(ha); 4858 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4859 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4860 } else { 4861 /* If the stop_firmware fails then 4862 * reset the entire chip */ 4863 reset_chip = 1; 4864 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4865 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4866 } 4867 } 4868 4869 /* Issue full chip reset if recovering from a catastrophic error, 4870 * or if stop_firmware fails for ISP-8xxx. 4871 * This is the default case for ISP-4xxx */ 4872 if (is_qla40XX(ha) || reset_chip) { 4873 if (is_qla40XX(ha)) 4874 goto chip_reset; 4875 4876 /* Check if 8XXX firmware is alive or not 4877 * We may have arrived here from NEED_RESET 4878 * detection only */ 4879 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4880 goto chip_reset; 4881 4882 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4883 while (time_before(jiffies, wait)) { 4884 if (qla4_8xxx_check_fw_alive(ha)) { 4885 qla4xxx_mailbox_premature_completion(ha); 4886 break; 4887 } 4888 4889 set_current_state(TASK_UNINTERRUPTIBLE); 4890 schedule_timeout(HZ); 4891 } 4892 chip_reset: 4893 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4894 qla4xxx_cmd_wait(ha); 4895 4896 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4897 DEBUG2(ql4_printk(KERN_INFO, ha, 4898 "scsi%ld: %s - Performing chip reset..\n", 4899 ha->host_no, __func__)); 4900 status = ha->isp_ops->reset_chip(ha); 4901 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4902 } 4903 4904 /* Flush any pending ddb changed AENs */ 4905 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4906 4907 recover_ha_init_adapter: 4908 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4909 if (status == QLA_SUCCESS) { 4910 /* For ISP-4xxx, force function 1 to always initialize 4911 * before function 3 to prevent both funcions from 4912 * stepping on top of the other */ 4913 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4914 ssleep(6); 4915 4916 /* NOTE: AF_ONLINE flag set upon successful completion of 4917 * qla4xxx_initialize_adapter */ 4918 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4919 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4920 status = qla4_8xxx_check_init_adapter_retry(ha); 4921 if (status == QLA_ERROR) { 4922 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4923 ha->host_no, __func__); 4924 qla4xxx_dead_adapter_cleanup(ha); 4925 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4926 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4927 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4928 &ha->dpc_flags); 4929 goto exit_recover; 4930 } 4931 } 4932 } 4933 4934 /* Retry failed adapter initialization, if necessary 4935 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4936 * case to prevent ping-pong resets between functions */ 4937 if (!test_bit(AF_ONLINE, &ha->flags) && 4938 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4939 /* Adapter initialization failed, see if we can retry 4940 * resetting the ha. 4941 * Since we don't want to block the DPC for too long 4942 * with multiple resets in the same thread, 4943 * utilize DPC to retry */ 4944 if (is_qla80XX(ha)) { 4945 ha->isp_ops->idc_lock(ha); 4946 dev_state = qla4_8xxx_rd_direct(ha, 4947 QLA8XXX_CRB_DEV_STATE); 4948 ha->isp_ops->idc_unlock(ha); 4949 if (dev_state == QLA8XXX_DEV_FAILED) { 4950 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4951 "recover adapter. H/W is in Failed " 4952 "state\n", __func__); 4953 qla4xxx_dead_adapter_cleanup(ha); 4954 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4955 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4956 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4957 &ha->dpc_flags); 4958 status = QLA_ERROR; 4959 4960 goto exit_recover; 4961 } 4962 } 4963 4964 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 4965 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 4966 DEBUG2(printk("scsi%ld: recover adapter - retrying " 4967 "(%d) more times\n", ha->host_no, 4968 ha->retry_reset_ha_cnt)); 4969 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4970 status = QLA_ERROR; 4971 } else { 4972 if (ha->retry_reset_ha_cnt > 0) { 4973 /* Schedule another Reset HA--DPC will retry */ 4974 ha->retry_reset_ha_cnt--; 4975 DEBUG2(printk("scsi%ld: recover adapter - " 4976 "retry remaining %d\n", 4977 ha->host_no, 4978 ha->retry_reset_ha_cnt)); 4979 status = QLA_ERROR; 4980 } 4981 4982 if (ha->retry_reset_ha_cnt == 0) { 4983 /* Recover adapter retries have been exhausted. 4984 * Adapter DEAD */ 4985 DEBUG2(printk("scsi%ld: recover adapter " 4986 "failed - board disabled\n", 4987 ha->host_no)); 4988 qla4xxx_dead_adapter_cleanup(ha); 4989 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4990 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4991 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4992 &ha->dpc_flags); 4993 status = QLA_ERROR; 4994 } 4995 } 4996 } else { 4997 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4998 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4999 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5000 } 5001 5002 exit_recover: 5003 ha->adapter_error_count++; 5004 5005 if (test_bit(AF_ONLINE, &ha->flags)) 5006 ha->isp_ops->enable_intrs(ha); 5007 5008 scsi_unblock_requests(ha->host); 5009 5010 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5011 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5012 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5013 5014 return status; 5015 } 5016 5017 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5018 { 5019 struct iscsi_session *sess; 5020 struct ddb_entry *ddb_entry; 5021 struct scsi_qla_host *ha; 5022 5023 sess = cls_session->dd_data; 5024 ddb_entry = sess->dd_data; 5025 ha = ddb_entry->ha; 5026 if (!iscsi_is_session_online(cls_session)) { 5027 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5028 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5029 " unblock session\n", ha->host_no, __func__, 5030 ddb_entry->fw_ddb_index); 5031 iscsi_unblock_session(ddb_entry->sess); 5032 } else { 5033 /* Trigger relogin */ 5034 if (ddb_entry->ddb_type == FLASH_DDB) { 5035 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5036 test_bit(DF_DISABLE_RELOGIN, 5037 &ddb_entry->flags))) 5038 qla4xxx_arm_relogin_timer(ddb_entry); 5039 } else 5040 iscsi_session_failure(cls_session->dd_data, 5041 ISCSI_ERR_CONN_FAILED); 5042 } 5043 } 5044 } 5045 5046 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5047 { 5048 struct iscsi_session *sess; 5049 struct ddb_entry *ddb_entry; 5050 struct scsi_qla_host *ha; 5051 5052 sess = cls_session->dd_data; 5053 ddb_entry = sess->dd_data; 5054 ha = ddb_entry->ha; 5055 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5056 " unblock session\n", ha->host_no, __func__, 5057 ddb_entry->fw_ddb_index); 5058 5059 iscsi_unblock_session(ddb_entry->sess); 5060 5061 /* Start scan target */ 5062 if (test_bit(AF_ONLINE, &ha->flags)) { 5063 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5064 " start scan\n", ha->host_no, __func__, 5065 ddb_entry->fw_ddb_index); 5066 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5067 } 5068 return QLA_SUCCESS; 5069 } 5070 5071 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5072 { 5073 struct iscsi_session *sess; 5074 struct ddb_entry *ddb_entry; 5075 struct scsi_qla_host *ha; 5076 int status = QLA_SUCCESS; 5077 5078 sess = cls_session->dd_data; 5079 ddb_entry = sess->dd_data; 5080 ha = ddb_entry->ha; 5081 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5082 " unblock user space session\n", ha->host_no, __func__, 5083 ddb_entry->fw_ddb_index); 5084 5085 if (!iscsi_is_session_online(cls_session)) { 5086 iscsi_conn_start(ddb_entry->conn); 5087 iscsi_conn_login_event(ddb_entry->conn, 5088 ISCSI_CONN_STATE_LOGGED_IN); 5089 } else { 5090 ql4_printk(KERN_INFO, ha, 5091 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5092 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5093 cls_session->sid); 5094 status = QLA_ERROR; 5095 } 5096 5097 return status; 5098 } 5099 5100 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5101 { 5102 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5103 } 5104 5105 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5106 { 5107 uint16_t relogin_timer; 5108 struct iscsi_session *sess; 5109 struct ddb_entry *ddb_entry; 5110 struct scsi_qla_host *ha; 5111 5112 sess = cls_sess->dd_data; 5113 ddb_entry = sess->dd_data; 5114 ha = ddb_entry->ha; 5115 5116 relogin_timer = max(ddb_entry->default_relogin_timeout, 5117 (uint16_t)RELOGIN_TOV); 5118 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5119 5120 DEBUG2(ql4_printk(KERN_INFO, ha, 5121 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5122 ddb_entry->fw_ddb_index, relogin_timer)); 5123 5124 qla4xxx_login_flash_ddb(cls_sess); 5125 } 5126 5127 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5128 { 5129 struct iscsi_session *sess; 5130 struct ddb_entry *ddb_entry; 5131 struct scsi_qla_host *ha; 5132 5133 sess = cls_sess->dd_data; 5134 ddb_entry = sess->dd_data; 5135 ha = ddb_entry->ha; 5136 5137 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5138 return; 5139 5140 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5141 return; 5142 5143 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5144 !iscsi_is_session_online(cls_sess)) { 5145 DEBUG2(ql4_printk(KERN_INFO, ha, 5146 "relogin issued\n")); 5147 qla4xxx_relogin_flash_ddb(cls_sess); 5148 } 5149 } 5150 5151 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5152 { 5153 if (ha->dpc_thread) 5154 queue_work(ha->dpc_thread, &ha->dpc_work); 5155 } 5156 5157 static struct qla4_work_evt * 5158 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5159 enum qla4_work_type type) 5160 { 5161 struct qla4_work_evt *e; 5162 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5163 5164 e = kzalloc(size, GFP_ATOMIC); 5165 if (!e) 5166 return NULL; 5167 5168 INIT_LIST_HEAD(&e->list); 5169 e->type = type; 5170 return e; 5171 } 5172 5173 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5174 struct qla4_work_evt *e) 5175 { 5176 unsigned long flags; 5177 5178 spin_lock_irqsave(&ha->work_lock, flags); 5179 list_add_tail(&e->list, &ha->work_list); 5180 spin_unlock_irqrestore(&ha->work_lock, flags); 5181 qla4xxx_wake_dpc(ha); 5182 } 5183 5184 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5185 enum iscsi_host_event_code aen_code, 5186 uint32_t data_size, uint8_t *data) 5187 { 5188 struct qla4_work_evt *e; 5189 5190 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5191 if (!e) 5192 return QLA_ERROR; 5193 5194 e->u.aen.code = aen_code; 5195 e->u.aen.data_size = data_size; 5196 memcpy(e->u.aen.data, data, data_size); 5197 5198 qla4xxx_post_work(ha, e); 5199 5200 return QLA_SUCCESS; 5201 } 5202 5203 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5204 uint32_t status, uint32_t pid, 5205 uint32_t data_size, uint8_t *data) 5206 { 5207 struct qla4_work_evt *e; 5208 5209 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5210 if (!e) 5211 return QLA_ERROR; 5212 5213 e->u.ping.status = status; 5214 e->u.ping.pid = pid; 5215 e->u.ping.data_size = data_size; 5216 memcpy(e->u.ping.data, data, data_size); 5217 5218 qla4xxx_post_work(ha, e); 5219 5220 return QLA_SUCCESS; 5221 } 5222 5223 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5224 { 5225 struct qla4_work_evt *e, *tmp; 5226 unsigned long flags; 5227 LIST_HEAD(work); 5228 5229 spin_lock_irqsave(&ha->work_lock, flags); 5230 list_splice_init(&ha->work_list, &work); 5231 spin_unlock_irqrestore(&ha->work_lock, flags); 5232 5233 list_for_each_entry_safe(e, tmp, &work, list) { 5234 list_del_init(&e->list); 5235 5236 switch (e->type) { 5237 case QLA4_EVENT_AEN: 5238 iscsi_post_host_event(ha->host_no, 5239 &qla4xxx_iscsi_transport, 5240 e->u.aen.code, 5241 e->u.aen.data_size, 5242 e->u.aen.data); 5243 break; 5244 case QLA4_EVENT_PING_STATUS: 5245 iscsi_ping_comp_event(ha->host_no, 5246 &qla4xxx_iscsi_transport, 5247 e->u.ping.status, 5248 e->u.ping.pid, 5249 e->u.ping.data_size, 5250 e->u.ping.data); 5251 break; 5252 default: 5253 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5254 "supported", e->type); 5255 } 5256 kfree(e); 5257 } 5258 } 5259 5260 /** 5261 * qla4xxx_do_dpc - dpc routine 5262 * @data: in our case pointer to adapter structure 5263 * 5264 * This routine is a task that is schedule by the interrupt handler 5265 * to perform the background processing for interrupts. We put it 5266 * on a task queue that is consumed whenever the scheduler runs; that's 5267 * so you can do anything (i.e. put the process to sleep etc). In fact, 5268 * the mid-level tries to sleep when it reaches the driver threshold 5269 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5270 **/ 5271 static void qla4xxx_do_dpc(struct work_struct *work) 5272 { 5273 struct scsi_qla_host *ha = 5274 container_of(work, struct scsi_qla_host, dpc_work); 5275 int status = QLA_ERROR; 5276 5277 DEBUG2(ql4_printk(KERN_INFO, ha, 5278 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5279 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5280 5281 /* Initialization not yet finished. Don't do anything yet. */ 5282 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5283 return; 5284 5285 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5286 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5287 ha->host_no, __func__, ha->flags)); 5288 return; 5289 } 5290 5291 /* post events to application */ 5292 qla4xxx_do_work(ha); 5293 5294 if (is_qla80XX(ha)) { 5295 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5296 if (is_qla8032(ha) || is_qla8042(ha)) { 5297 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5298 __func__); 5299 /* disable pause frame for ISP83xx */ 5300 qla4_83xx_disable_pause(ha); 5301 } 5302 5303 ha->isp_ops->idc_lock(ha); 5304 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5305 QLA8XXX_DEV_FAILED); 5306 ha->isp_ops->idc_unlock(ha); 5307 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5308 qla4_8xxx_device_state_handler(ha); 5309 } 5310 5311 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5312 if (is_qla8042(ha)) { 5313 if (ha->idc_info.info2 & 5314 ENABLE_INTERNAL_LOOPBACK) { 5315 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5316 __func__); 5317 status = qla4_84xx_config_acb(ha, 5318 ACB_CONFIG_DISABLE); 5319 if (status != QLA_SUCCESS) { 5320 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5321 __func__); 5322 } 5323 } 5324 } 5325 qla4_83xx_post_idc_ack(ha); 5326 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5327 } 5328 5329 if (is_qla8042(ha) && 5330 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5331 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5332 __func__); 5333 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5334 QLA_SUCCESS) { 5335 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5336 __func__); 5337 } 5338 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5339 } 5340 5341 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5342 qla4_8xxx_need_qsnt_handler(ha); 5343 } 5344 } 5345 5346 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5347 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5348 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5349 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5350 if ((is_qla8022(ha) && ql4xdontresethba) || 5351 ((is_qla8032(ha) || is_qla8042(ha)) && 5352 qla4_83xx_idc_dontreset(ha))) { 5353 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5354 ha->host_no, __func__)); 5355 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5356 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5357 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5358 goto dpc_post_reset_ha; 5359 } 5360 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5361 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5362 qla4xxx_recover_adapter(ha); 5363 5364 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5365 uint8_t wait_time = RESET_INTR_TOV; 5366 5367 while ((readw(&ha->reg->ctrl_status) & 5368 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5369 if (--wait_time == 0) 5370 break; 5371 msleep(1000); 5372 } 5373 if (wait_time == 0) 5374 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5375 "bit not cleared-- resetting\n", 5376 ha->host_no, __func__)); 5377 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5378 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5379 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5380 status = qla4xxx_recover_adapter(ha); 5381 } 5382 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5383 if (status == QLA_SUCCESS) 5384 ha->isp_ops->enable_intrs(ha); 5385 } 5386 } 5387 5388 dpc_post_reset_ha: 5389 /* ---- process AEN? --- */ 5390 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5391 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5392 5393 /* ---- Get DHCP IP Address? --- */ 5394 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5395 qla4xxx_get_dhcp_ip_address(ha); 5396 5397 /* ---- relogin device? --- */ 5398 if (adapter_up(ha) && 5399 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5400 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5401 } 5402 5403 /* ---- link change? --- */ 5404 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5405 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5406 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5407 /* ---- link down? --- */ 5408 qla4xxx_mark_all_devices_missing(ha); 5409 } else { 5410 /* ---- link up? --- * 5411 * F/W will auto login to all devices ONLY ONCE after 5412 * link up during driver initialization and runtime 5413 * fatal error recovery. Therefore, the driver must 5414 * manually relogin to devices when recovering from 5415 * connection failures, logouts, expired KATO, etc. */ 5416 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5417 qla4xxx_build_ddb_list(ha, ha->is_reset); 5418 iscsi_host_for_each_session(ha->host, 5419 qla4xxx_login_flash_ddb); 5420 } else 5421 qla4xxx_relogin_all_devices(ha); 5422 } 5423 } 5424 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5425 if (qla4xxx_sysfs_ddb_export(ha)) 5426 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5427 __func__); 5428 } 5429 } 5430 5431 /** 5432 * qla4xxx_free_adapter - release the adapter 5433 * @ha: pointer to adapter structure 5434 **/ 5435 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5436 { 5437 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5438 5439 /* Turn-off interrupts on the card. */ 5440 ha->isp_ops->disable_intrs(ha); 5441 5442 if (is_qla40XX(ha)) { 5443 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5444 &ha->reg->ctrl_status); 5445 readl(&ha->reg->ctrl_status); 5446 } else if (is_qla8022(ha)) { 5447 writel(0, &ha->qla4_82xx_reg->host_int); 5448 readl(&ha->qla4_82xx_reg->host_int); 5449 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5450 writel(0, &ha->qla4_83xx_reg->risc_intr); 5451 readl(&ha->qla4_83xx_reg->risc_intr); 5452 } 5453 5454 /* Remove timer thread, if present */ 5455 if (ha->timer_active) 5456 qla4xxx_stop_timer(ha); 5457 5458 /* Kill the kernel thread for this host */ 5459 if (ha->dpc_thread) 5460 destroy_workqueue(ha->dpc_thread); 5461 5462 /* Kill the kernel thread for this host */ 5463 if (ha->task_wq) 5464 destroy_workqueue(ha->task_wq); 5465 5466 /* Put firmware in known state */ 5467 ha->isp_ops->reset_firmware(ha); 5468 5469 if (is_qla80XX(ha)) { 5470 ha->isp_ops->idc_lock(ha); 5471 qla4_8xxx_clear_drv_active(ha); 5472 ha->isp_ops->idc_unlock(ha); 5473 } 5474 5475 /* Detach interrupts */ 5476 qla4xxx_free_irqs(ha); 5477 5478 /* free extra memory */ 5479 qla4xxx_mem_free(ha); 5480 } 5481 5482 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5483 { 5484 int status = 0; 5485 unsigned long mem_base, mem_len, db_base, db_len; 5486 struct pci_dev *pdev = ha->pdev; 5487 5488 status = pci_request_regions(pdev, DRIVER_NAME); 5489 if (status) { 5490 printk(KERN_WARNING 5491 "scsi(%ld) Failed to reserve PIO regions (%s) " 5492 "status=%d\n", ha->host_no, pci_name(pdev), status); 5493 goto iospace_error_exit; 5494 } 5495 5496 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5497 __func__, pdev->revision)); 5498 ha->revision_id = pdev->revision; 5499 5500 /* remap phys address */ 5501 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5502 mem_len = pci_resource_len(pdev, 0); 5503 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5504 __func__, mem_base, mem_len)); 5505 5506 /* mapping of pcibase pointer */ 5507 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5508 if (!ha->nx_pcibase) { 5509 printk(KERN_ERR 5510 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5511 pci_release_regions(ha->pdev); 5512 goto iospace_error_exit; 5513 } 5514 5515 /* Mapping of IO base pointer, door bell read and write pointer */ 5516 5517 /* mapping of IO base pointer */ 5518 if (is_qla8022(ha)) { 5519 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5520 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5521 (ha->pdev->devfn << 11)); 5522 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5523 QLA82XX_CAM_RAM_DB2); 5524 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5525 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5526 ((uint8_t *)ha->nx_pcibase); 5527 } 5528 5529 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 5530 db_len = pci_resource_len(pdev, 4); 5531 5532 return 0; 5533 iospace_error_exit: 5534 return -ENOMEM; 5535 } 5536 5537 /*** 5538 * qla4xxx_iospace_config - maps registers 5539 * @ha: pointer to adapter structure 5540 * 5541 * This routines maps HBA's registers from the pci address space 5542 * into the kernel virtual address space for memory mapped i/o. 5543 **/ 5544 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5545 { 5546 unsigned long pio, pio_len, pio_flags; 5547 unsigned long mmio, mmio_len, mmio_flags; 5548 5549 pio = pci_resource_start(ha->pdev, 0); 5550 pio_len = pci_resource_len(ha->pdev, 0); 5551 pio_flags = pci_resource_flags(ha->pdev, 0); 5552 if (pio_flags & IORESOURCE_IO) { 5553 if (pio_len < MIN_IOBASE_LEN) { 5554 ql4_printk(KERN_WARNING, ha, 5555 "Invalid PCI I/O region size\n"); 5556 pio = 0; 5557 } 5558 } else { 5559 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5560 pio = 0; 5561 } 5562 5563 /* Use MMIO operations for all accesses. */ 5564 mmio = pci_resource_start(ha->pdev, 1); 5565 mmio_len = pci_resource_len(ha->pdev, 1); 5566 mmio_flags = pci_resource_flags(ha->pdev, 1); 5567 5568 if (!(mmio_flags & IORESOURCE_MEM)) { 5569 ql4_printk(KERN_ERR, ha, 5570 "region #0 not an MMIO resource, aborting\n"); 5571 5572 goto iospace_error_exit; 5573 } 5574 5575 if (mmio_len < MIN_IOBASE_LEN) { 5576 ql4_printk(KERN_ERR, ha, 5577 "Invalid PCI mem region size, aborting\n"); 5578 goto iospace_error_exit; 5579 } 5580 5581 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5582 ql4_printk(KERN_WARNING, ha, 5583 "Failed to reserve PIO/MMIO regions\n"); 5584 5585 goto iospace_error_exit; 5586 } 5587 5588 ha->pio_address = pio; 5589 ha->pio_length = pio_len; 5590 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5591 if (!ha->reg) { 5592 ql4_printk(KERN_ERR, ha, 5593 "cannot remap MMIO, aborting\n"); 5594 5595 goto iospace_error_exit; 5596 } 5597 5598 return 0; 5599 5600 iospace_error_exit: 5601 return -ENOMEM; 5602 } 5603 5604 static struct isp_operations qla4xxx_isp_ops = { 5605 .iospace_config = qla4xxx_iospace_config, 5606 .pci_config = qla4xxx_pci_config, 5607 .disable_intrs = qla4xxx_disable_intrs, 5608 .enable_intrs = qla4xxx_enable_intrs, 5609 .start_firmware = qla4xxx_start_firmware, 5610 .intr_handler = qla4xxx_intr_handler, 5611 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5612 .reset_chip = qla4xxx_soft_reset, 5613 .reset_firmware = qla4xxx_hw_reset, 5614 .queue_iocb = qla4xxx_queue_iocb, 5615 .complete_iocb = qla4xxx_complete_iocb, 5616 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5617 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5618 .get_sys_info = qla4xxx_get_sys_info, 5619 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5620 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5621 }; 5622 5623 static struct isp_operations qla4_82xx_isp_ops = { 5624 .iospace_config = qla4_8xxx_iospace_config, 5625 .pci_config = qla4_8xxx_pci_config, 5626 .disable_intrs = qla4_82xx_disable_intrs, 5627 .enable_intrs = qla4_82xx_enable_intrs, 5628 .start_firmware = qla4_8xxx_load_risc, 5629 .restart_firmware = qla4_82xx_try_start_fw, 5630 .intr_handler = qla4_82xx_intr_handler, 5631 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5632 .need_reset = qla4_8xxx_need_reset, 5633 .reset_chip = qla4_82xx_isp_reset, 5634 .reset_firmware = qla4_8xxx_stop_firmware, 5635 .queue_iocb = qla4_82xx_queue_iocb, 5636 .complete_iocb = qla4_82xx_complete_iocb, 5637 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5638 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5639 .get_sys_info = qla4_8xxx_get_sys_info, 5640 .rd_reg_direct = qla4_82xx_rd_32, 5641 .wr_reg_direct = qla4_82xx_wr_32, 5642 .rd_reg_indirect = qla4_82xx_md_rd_32, 5643 .wr_reg_indirect = qla4_82xx_md_wr_32, 5644 .idc_lock = qla4_82xx_idc_lock, 5645 .idc_unlock = qla4_82xx_idc_unlock, 5646 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5647 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5648 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5649 }; 5650 5651 static struct isp_operations qla4_83xx_isp_ops = { 5652 .iospace_config = qla4_8xxx_iospace_config, 5653 .pci_config = qla4_8xxx_pci_config, 5654 .disable_intrs = qla4_83xx_disable_intrs, 5655 .enable_intrs = qla4_83xx_enable_intrs, 5656 .start_firmware = qla4_8xxx_load_risc, 5657 .restart_firmware = qla4_83xx_start_firmware, 5658 .intr_handler = qla4_83xx_intr_handler, 5659 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5660 .need_reset = qla4_8xxx_need_reset, 5661 .reset_chip = qla4_83xx_isp_reset, 5662 .reset_firmware = qla4_8xxx_stop_firmware, 5663 .queue_iocb = qla4_83xx_queue_iocb, 5664 .complete_iocb = qla4_83xx_complete_iocb, 5665 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5666 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5667 .get_sys_info = qla4_8xxx_get_sys_info, 5668 .rd_reg_direct = qla4_83xx_rd_reg, 5669 .wr_reg_direct = qla4_83xx_wr_reg, 5670 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5671 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5672 .idc_lock = qla4_83xx_drv_lock, 5673 .idc_unlock = qla4_83xx_drv_unlock, 5674 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5675 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5676 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5677 }; 5678 5679 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5680 { 5681 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5682 } 5683 5684 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5685 { 5686 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5687 } 5688 5689 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5690 { 5691 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5692 } 5693 5694 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5695 { 5696 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5697 } 5698 5699 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5700 { 5701 struct scsi_qla_host *ha = data; 5702 char *str = buf; 5703 int rc; 5704 5705 switch (type) { 5706 case ISCSI_BOOT_ETH_FLAGS: 5707 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5708 break; 5709 case ISCSI_BOOT_ETH_INDEX: 5710 rc = sprintf(str, "0\n"); 5711 break; 5712 case ISCSI_BOOT_ETH_MAC: 5713 rc = sysfs_format_mac(str, ha->my_mac, 5714 MAC_ADDR_LEN); 5715 break; 5716 default: 5717 rc = -ENOSYS; 5718 break; 5719 } 5720 return rc; 5721 } 5722 5723 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5724 { 5725 int rc; 5726 5727 switch (type) { 5728 case ISCSI_BOOT_ETH_FLAGS: 5729 case ISCSI_BOOT_ETH_MAC: 5730 case ISCSI_BOOT_ETH_INDEX: 5731 rc = S_IRUGO; 5732 break; 5733 default: 5734 rc = 0; 5735 break; 5736 } 5737 return rc; 5738 } 5739 5740 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5741 { 5742 struct scsi_qla_host *ha = data; 5743 char *str = buf; 5744 int rc; 5745 5746 switch (type) { 5747 case ISCSI_BOOT_INI_INITIATOR_NAME: 5748 rc = sprintf(str, "%s\n", ha->name_string); 5749 break; 5750 default: 5751 rc = -ENOSYS; 5752 break; 5753 } 5754 return rc; 5755 } 5756 5757 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5758 { 5759 int rc; 5760 5761 switch (type) { 5762 case ISCSI_BOOT_INI_INITIATOR_NAME: 5763 rc = S_IRUGO; 5764 break; 5765 default: 5766 rc = 0; 5767 break; 5768 } 5769 return rc; 5770 } 5771 5772 static ssize_t 5773 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5774 char *buf) 5775 { 5776 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5777 char *str = buf; 5778 int rc; 5779 5780 switch (type) { 5781 case ISCSI_BOOT_TGT_NAME: 5782 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5783 break; 5784 case ISCSI_BOOT_TGT_IP_ADDR: 5785 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5786 rc = sprintf(buf, "%pI4\n", 5787 &boot_conn->dest_ipaddr.ip_address); 5788 else 5789 rc = sprintf(str, "%pI6\n", 5790 &boot_conn->dest_ipaddr.ip_address); 5791 break; 5792 case ISCSI_BOOT_TGT_PORT: 5793 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5794 break; 5795 case ISCSI_BOOT_TGT_CHAP_NAME: 5796 rc = sprintf(str, "%.*s\n", 5797 boot_conn->chap.target_chap_name_length, 5798 (char *)&boot_conn->chap.target_chap_name); 5799 break; 5800 case ISCSI_BOOT_TGT_CHAP_SECRET: 5801 rc = sprintf(str, "%.*s\n", 5802 boot_conn->chap.target_secret_length, 5803 (char *)&boot_conn->chap.target_secret); 5804 break; 5805 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5806 rc = sprintf(str, "%.*s\n", 5807 boot_conn->chap.intr_chap_name_length, 5808 (char *)&boot_conn->chap.intr_chap_name); 5809 break; 5810 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5811 rc = sprintf(str, "%.*s\n", 5812 boot_conn->chap.intr_secret_length, 5813 (char *)&boot_conn->chap.intr_secret); 5814 break; 5815 case ISCSI_BOOT_TGT_FLAGS: 5816 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5817 break; 5818 case ISCSI_BOOT_TGT_NIC_ASSOC: 5819 rc = sprintf(str, "0\n"); 5820 break; 5821 default: 5822 rc = -ENOSYS; 5823 break; 5824 } 5825 return rc; 5826 } 5827 5828 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5829 { 5830 struct scsi_qla_host *ha = data; 5831 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5832 5833 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5834 } 5835 5836 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5837 { 5838 struct scsi_qla_host *ha = data; 5839 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5840 5841 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5842 } 5843 5844 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5845 { 5846 int rc; 5847 5848 switch (type) { 5849 case ISCSI_BOOT_TGT_NAME: 5850 case ISCSI_BOOT_TGT_IP_ADDR: 5851 case ISCSI_BOOT_TGT_PORT: 5852 case ISCSI_BOOT_TGT_CHAP_NAME: 5853 case ISCSI_BOOT_TGT_CHAP_SECRET: 5854 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5855 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5856 case ISCSI_BOOT_TGT_NIC_ASSOC: 5857 case ISCSI_BOOT_TGT_FLAGS: 5858 rc = S_IRUGO; 5859 break; 5860 default: 5861 rc = 0; 5862 break; 5863 } 5864 return rc; 5865 } 5866 5867 static void qla4xxx_boot_release(void *data) 5868 { 5869 struct scsi_qla_host *ha = data; 5870 5871 scsi_host_put(ha->host); 5872 } 5873 5874 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5875 { 5876 dma_addr_t buf_dma; 5877 uint32_t addr, pri_addr, sec_addr; 5878 uint32_t offset; 5879 uint16_t func_num; 5880 uint8_t val; 5881 uint8_t *buf = NULL; 5882 size_t size = 13 * sizeof(uint8_t); 5883 int ret = QLA_SUCCESS; 5884 5885 func_num = PCI_FUNC(ha->pdev->devfn); 5886 5887 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5888 __func__, ha->pdev->device, func_num); 5889 5890 if (is_qla40XX(ha)) { 5891 if (func_num == 1) { 5892 addr = NVRAM_PORT0_BOOT_MODE; 5893 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5894 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5895 } else if (func_num == 3) { 5896 addr = NVRAM_PORT1_BOOT_MODE; 5897 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5898 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5899 } else { 5900 ret = QLA_ERROR; 5901 goto exit_boot_info; 5902 } 5903 5904 /* Check Boot Mode */ 5905 val = rd_nvram_byte(ha, addr); 5906 if (!(val & 0x07)) { 5907 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5908 "options : 0x%x\n", __func__, val)); 5909 ret = QLA_ERROR; 5910 goto exit_boot_info; 5911 } 5912 5913 /* get primary valid target index */ 5914 val = rd_nvram_byte(ha, pri_addr); 5915 if (val & BIT_7) 5916 ddb_index[0] = (val & 0x7f); 5917 5918 /* get secondary valid target index */ 5919 val = rd_nvram_byte(ha, sec_addr); 5920 if (val & BIT_7) 5921 ddb_index[1] = (val & 0x7f); 5922 5923 } else if (is_qla80XX(ha)) { 5924 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5925 &buf_dma, GFP_KERNEL); 5926 if (!buf) { 5927 DEBUG2(ql4_printk(KERN_ERR, ha, 5928 "%s: Unable to allocate dma buffer\n", 5929 __func__)); 5930 ret = QLA_ERROR; 5931 goto exit_boot_info; 5932 } 5933 5934 if (ha->port_num == 0) 5935 offset = BOOT_PARAM_OFFSET_PORT0; 5936 else if (ha->port_num == 1) 5937 offset = BOOT_PARAM_OFFSET_PORT1; 5938 else { 5939 ret = QLA_ERROR; 5940 goto exit_boot_info_free; 5941 } 5942 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5943 offset; 5944 if (qla4xxx_get_flash(ha, buf_dma, addr, 5945 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5946 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5947 " failed\n", ha->host_no, __func__)); 5948 ret = QLA_ERROR; 5949 goto exit_boot_info_free; 5950 } 5951 /* Check Boot Mode */ 5952 if (!(buf[1] & 0x07)) { 5953 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5954 " : 0x%x\n", buf[1])); 5955 ret = QLA_ERROR; 5956 goto exit_boot_info_free; 5957 } 5958 5959 /* get primary valid target index */ 5960 if (buf[2] & BIT_7) 5961 ddb_index[0] = buf[2] & 0x7f; 5962 5963 /* get secondary valid target index */ 5964 if (buf[11] & BIT_7) 5965 ddb_index[1] = buf[11] & 0x7f; 5966 } else { 5967 ret = QLA_ERROR; 5968 goto exit_boot_info; 5969 } 5970 5971 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 5972 " target ID %d\n", __func__, ddb_index[0], 5973 ddb_index[1])); 5974 5975 exit_boot_info_free: 5976 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 5977 exit_boot_info: 5978 ha->pri_ddb_idx = ddb_index[0]; 5979 ha->sec_ddb_idx = ddb_index[1]; 5980 return ret; 5981 } 5982 5983 /** 5984 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 5985 * @ha: pointer to adapter structure 5986 * @username: CHAP username to be returned 5987 * @password: CHAP password to be returned 5988 * 5989 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 5990 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 5991 * So from the CHAP cache find the first BIDI CHAP entry and set it 5992 * to the boot record in sysfs. 5993 **/ 5994 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 5995 char *password) 5996 { 5997 int i, ret = -EINVAL; 5998 int max_chap_entries = 0; 5999 struct ql4_chap_table *chap_table; 6000 6001 if (is_qla80XX(ha)) 6002 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6003 sizeof(struct ql4_chap_table); 6004 else 6005 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6006 6007 if (!ha->chap_list) { 6008 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6009 return ret; 6010 } 6011 6012 mutex_lock(&ha->chap_sem); 6013 for (i = 0; i < max_chap_entries; i++) { 6014 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6015 if (chap_table->cookie != 6016 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 6017 continue; 6018 } 6019 6020 if (chap_table->flags & BIT_7) /* local */ 6021 continue; 6022 6023 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6024 continue; 6025 6026 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6027 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6028 ret = 0; 6029 break; 6030 } 6031 mutex_unlock(&ha->chap_sem); 6032 6033 return ret; 6034 } 6035 6036 6037 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6038 struct ql4_boot_session_info *boot_sess, 6039 uint16_t ddb_index) 6040 { 6041 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6042 struct dev_db_entry *fw_ddb_entry; 6043 dma_addr_t fw_ddb_entry_dma; 6044 uint16_t idx; 6045 uint16_t options; 6046 int ret = QLA_SUCCESS; 6047 6048 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6049 &fw_ddb_entry_dma, GFP_KERNEL); 6050 if (!fw_ddb_entry) { 6051 DEBUG2(ql4_printk(KERN_ERR, ha, 6052 "%s: Unable to allocate dma buffer.\n", 6053 __func__)); 6054 ret = QLA_ERROR; 6055 return ret; 6056 } 6057 6058 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6059 fw_ddb_entry_dma, ddb_index)) { 6060 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6061 "index [%d]\n", __func__, ddb_index)); 6062 ret = QLA_ERROR; 6063 goto exit_boot_target; 6064 } 6065 6066 /* Update target name and IP from DDB */ 6067 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6068 min(sizeof(boot_sess->target_name), 6069 sizeof(fw_ddb_entry->iscsi_name))); 6070 6071 options = le16_to_cpu(fw_ddb_entry->options); 6072 if (options & DDB_OPT_IPV6_DEVICE) { 6073 memcpy(&boot_conn->dest_ipaddr.ip_address, 6074 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6075 } else { 6076 boot_conn->dest_ipaddr.ip_type = 0x1; 6077 memcpy(&boot_conn->dest_ipaddr.ip_address, 6078 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6079 } 6080 6081 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6082 6083 /* update chap information */ 6084 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6085 6086 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6087 6088 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6089 6090 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6091 target_chap_name, 6092 (char *)&boot_conn->chap.target_secret, 6093 idx); 6094 if (ret) { 6095 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6096 ret = QLA_ERROR; 6097 goto exit_boot_target; 6098 } 6099 6100 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6101 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6102 } 6103 6104 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6105 6106 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6107 6108 ret = qla4xxx_get_bidi_chap(ha, 6109 (char *)&boot_conn->chap.intr_chap_name, 6110 (char *)&boot_conn->chap.intr_secret); 6111 6112 if (ret) { 6113 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6114 ret = QLA_ERROR; 6115 goto exit_boot_target; 6116 } 6117 6118 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6119 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6120 } 6121 6122 exit_boot_target: 6123 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6124 fw_ddb_entry, fw_ddb_entry_dma); 6125 return ret; 6126 } 6127 6128 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6129 { 6130 uint16_t ddb_index[2]; 6131 int ret = QLA_ERROR; 6132 int rval; 6133 6134 memset(ddb_index, 0, sizeof(ddb_index)); 6135 ddb_index[0] = 0xffff; 6136 ddb_index[1] = 0xffff; 6137 ret = get_fw_boot_info(ha, ddb_index); 6138 if (ret != QLA_SUCCESS) { 6139 DEBUG2(ql4_printk(KERN_INFO, ha, 6140 "%s: No boot target configured.\n", __func__)); 6141 return ret; 6142 } 6143 6144 if (ql4xdisablesysfsboot) 6145 return QLA_SUCCESS; 6146 6147 if (ddb_index[0] == 0xffff) 6148 goto sec_target; 6149 6150 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6151 ddb_index[0]); 6152 if (rval != QLA_SUCCESS) { 6153 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6154 "configured\n", __func__)); 6155 } else 6156 ret = QLA_SUCCESS; 6157 6158 sec_target: 6159 if (ddb_index[1] == 0xffff) 6160 goto exit_get_boot_info; 6161 6162 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6163 ddb_index[1]); 6164 if (rval != QLA_SUCCESS) { 6165 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6166 " configured\n", __func__)); 6167 } else 6168 ret = QLA_SUCCESS; 6169 6170 exit_get_boot_info: 6171 return ret; 6172 } 6173 6174 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6175 { 6176 struct iscsi_boot_kobj *boot_kobj; 6177 6178 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6179 return QLA_ERROR; 6180 6181 if (ql4xdisablesysfsboot) { 6182 ql4_printk(KERN_INFO, ha, 6183 "%s: syfsboot disabled - driver will trigger login " 6184 "and publish session for discovery .\n", __func__); 6185 return QLA_SUCCESS; 6186 } 6187 6188 6189 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6190 if (!ha->boot_kset) 6191 goto kset_free; 6192 6193 if (!scsi_host_get(ha->host)) 6194 goto kset_free; 6195 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6196 qla4xxx_show_boot_tgt_pri_info, 6197 qla4xxx_tgt_get_attr_visibility, 6198 qla4xxx_boot_release); 6199 if (!boot_kobj) 6200 goto put_host; 6201 6202 if (!scsi_host_get(ha->host)) 6203 goto kset_free; 6204 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6205 qla4xxx_show_boot_tgt_sec_info, 6206 qla4xxx_tgt_get_attr_visibility, 6207 qla4xxx_boot_release); 6208 if (!boot_kobj) 6209 goto put_host; 6210 6211 if (!scsi_host_get(ha->host)) 6212 goto kset_free; 6213 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6214 qla4xxx_show_boot_ini_info, 6215 qla4xxx_ini_get_attr_visibility, 6216 qla4xxx_boot_release); 6217 if (!boot_kobj) 6218 goto put_host; 6219 6220 if (!scsi_host_get(ha->host)) 6221 goto kset_free; 6222 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6223 qla4xxx_show_boot_eth_info, 6224 qla4xxx_eth_get_attr_visibility, 6225 qla4xxx_boot_release); 6226 if (!boot_kobj) 6227 goto put_host; 6228 6229 return QLA_SUCCESS; 6230 6231 put_host: 6232 scsi_host_put(ha->host); 6233 kset_free: 6234 iscsi_boot_destroy_kset(ha->boot_kset); 6235 return -ENOMEM; 6236 } 6237 6238 6239 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6240 struct ql4_tuple_ddb *tddb) 6241 { 6242 struct scsi_qla_host *ha; 6243 struct iscsi_cls_session *cls_sess; 6244 struct iscsi_cls_conn *cls_conn; 6245 struct iscsi_session *sess; 6246 struct iscsi_conn *conn; 6247 6248 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6249 ha = ddb_entry->ha; 6250 cls_sess = ddb_entry->sess; 6251 sess = cls_sess->dd_data; 6252 cls_conn = ddb_entry->conn; 6253 conn = cls_conn->dd_data; 6254 6255 tddb->tpgt = sess->tpgt; 6256 tddb->port = conn->persistent_port; 6257 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6258 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6259 } 6260 6261 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6262 struct ql4_tuple_ddb *tddb, 6263 uint8_t *flash_isid) 6264 { 6265 uint16_t options = 0; 6266 6267 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6268 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6269 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6270 6271 options = le16_to_cpu(fw_ddb_entry->options); 6272 if (options & DDB_OPT_IPV6_DEVICE) 6273 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6274 else 6275 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6276 6277 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6278 6279 if (flash_isid == NULL) 6280 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6281 sizeof(tddb->isid)); 6282 else 6283 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6284 } 6285 6286 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6287 struct ql4_tuple_ddb *old_tddb, 6288 struct ql4_tuple_ddb *new_tddb, 6289 uint8_t is_isid_compare) 6290 { 6291 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6292 return QLA_ERROR; 6293 6294 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6295 return QLA_ERROR; 6296 6297 if (old_tddb->port != new_tddb->port) 6298 return QLA_ERROR; 6299 6300 /* For multi sessions, driver generates the ISID, so do not compare 6301 * ISID in reset path since it would be a comparison between the 6302 * driver generated ISID and firmware generated ISID. This could 6303 * lead to adding duplicated DDBs in the list as driver generated 6304 * ISID would not match firmware generated ISID. 6305 */ 6306 if (is_isid_compare) { 6307 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x" 6308 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", 6309 __func__, old_tddb->isid[5], old_tddb->isid[4], 6310 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1], 6311 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4], 6312 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1], 6313 new_tddb->isid[0])); 6314 6315 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6316 sizeof(old_tddb->isid))) 6317 return QLA_ERROR; 6318 } 6319 6320 DEBUG2(ql4_printk(KERN_INFO, ha, 6321 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6322 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6323 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6324 new_tddb->ip_addr, new_tddb->iscsi_name)); 6325 6326 return QLA_SUCCESS; 6327 } 6328 6329 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6330 struct dev_db_entry *fw_ddb_entry, 6331 uint32_t *index) 6332 { 6333 struct ddb_entry *ddb_entry; 6334 struct ql4_tuple_ddb *fw_tddb = NULL; 6335 struct ql4_tuple_ddb *tmp_tddb = NULL; 6336 int idx; 6337 int ret = QLA_ERROR; 6338 6339 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6340 if (!fw_tddb) { 6341 DEBUG2(ql4_printk(KERN_WARNING, ha, 6342 "Memory Allocation failed.\n")); 6343 ret = QLA_SUCCESS; 6344 goto exit_check; 6345 } 6346 6347 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6348 if (!tmp_tddb) { 6349 DEBUG2(ql4_printk(KERN_WARNING, ha, 6350 "Memory Allocation failed.\n")); 6351 ret = QLA_SUCCESS; 6352 goto exit_check; 6353 } 6354 6355 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6356 6357 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6358 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6359 if (ddb_entry == NULL) 6360 continue; 6361 6362 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6363 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6364 ret = QLA_SUCCESS; /* found */ 6365 if (index != NULL) 6366 *index = idx; 6367 goto exit_check; 6368 } 6369 } 6370 6371 exit_check: 6372 if (fw_tddb) 6373 vfree(fw_tddb); 6374 if (tmp_tddb) 6375 vfree(tmp_tddb); 6376 return ret; 6377 } 6378 6379 /** 6380 * qla4xxx_check_existing_isid - check if target with same isid exist 6381 * in target list 6382 * @list_nt: list of target 6383 * @isid: isid to check 6384 * 6385 * This routine return QLA_SUCCESS if target with same isid exist 6386 **/ 6387 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6388 { 6389 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6390 struct dev_db_entry *fw_ddb_entry; 6391 6392 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6393 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6394 6395 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6396 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6397 return QLA_SUCCESS; 6398 } 6399 } 6400 return QLA_ERROR; 6401 } 6402 6403 /** 6404 * qla4xxx_update_isid - compare ddbs and updated isid 6405 * @ha: Pointer to host adapter structure. 6406 * @list_nt: list of nt target 6407 * @fw_ddb_entry: firmware ddb entry 6408 * 6409 * This routine update isid if ddbs have same iqn, same isid and 6410 * different IP addr. 6411 * Return QLA_SUCCESS if isid is updated. 6412 **/ 6413 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6414 struct list_head *list_nt, 6415 struct dev_db_entry *fw_ddb_entry) 6416 { 6417 uint8_t base_value, i; 6418 6419 base_value = fw_ddb_entry->isid[1] & 0x1f; 6420 for (i = 0; i < 8; i++) { 6421 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6422 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6423 break; 6424 } 6425 6426 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6427 return QLA_ERROR; 6428 6429 return QLA_SUCCESS; 6430 } 6431 6432 /** 6433 * qla4xxx_should_update_isid - check if isid need to update 6434 * @ha: Pointer to host adapter structure. 6435 * @old_tddb: ddb tuple 6436 * @new_tddb: ddb tuple 6437 * 6438 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6439 * same isid 6440 **/ 6441 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6442 struct ql4_tuple_ddb *old_tddb, 6443 struct ql4_tuple_ddb *new_tddb) 6444 { 6445 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6446 /* Same ip */ 6447 if (old_tddb->port == new_tddb->port) 6448 return QLA_ERROR; 6449 } 6450 6451 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6452 /* different iqn */ 6453 return QLA_ERROR; 6454 6455 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6456 sizeof(old_tddb->isid))) 6457 /* different isid */ 6458 return QLA_ERROR; 6459 6460 return QLA_SUCCESS; 6461 } 6462 6463 /** 6464 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6465 * @ha: Pointer to host adapter structure. 6466 * @list_nt: list of nt target. 6467 * @fw_ddb_entry: firmware ddb entry. 6468 * 6469 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6470 * duplicate ddb in list_nt. 6471 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6472 * Note: This function also update isid of DDB if required. 6473 **/ 6474 6475 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6476 struct list_head *list_nt, 6477 struct dev_db_entry *fw_ddb_entry) 6478 { 6479 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6480 struct ql4_tuple_ddb *fw_tddb = NULL; 6481 struct ql4_tuple_ddb *tmp_tddb = NULL; 6482 int rval, ret = QLA_ERROR; 6483 6484 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6485 if (!fw_tddb) { 6486 DEBUG2(ql4_printk(KERN_WARNING, ha, 6487 "Memory Allocation failed.\n")); 6488 ret = QLA_SUCCESS; 6489 goto exit_check; 6490 } 6491 6492 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6493 if (!tmp_tddb) { 6494 DEBUG2(ql4_printk(KERN_WARNING, ha, 6495 "Memory Allocation failed.\n")); 6496 ret = QLA_SUCCESS; 6497 goto exit_check; 6498 } 6499 6500 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6501 6502 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6503 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6504 nt_ddb_idx->flash_isid); 6505 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6506 /* found duplicate ddb */ 6507 if (ret == QLA_SUCCESS) 6508 goto exit_check; 6509 } 6510 6511 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6512 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6513 6514 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6515 if (ret == QLA_SUCCESS) { 6516 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6517 if (rval == QLA_SUCCESS) 6518 ret = QLA_ERROR; 6519 else 6520 ret = QLA_SUCCESS; 6521 6522 goto exit_check; 6523 } 6524 } 6525 6526 exit_check: 6527 if (fw_tddb) 6528 vfree(fw_tddb); 6529 if (tmp_tddb) 6530 vfree(tmp_tddb); 6531 return ret; 6532 } 6533 6534 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6535 { 6536 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6537 6538 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6539 list_del_init(&ddb_idx->list); 6540 vfree(ddb_idx); 6541 } 6542 } 6543 6544 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6545 struct dev_db_entry *fw_ddb_entry) 6546 { 6547 struct iscsi_endpoint *ep; 6548 struct sockaddr_in *addr; 6549 struct sockaddr_in6 *addr6; 6550 struct sockaddr *t_addr; 6551 struct sockaddr_storage *dst_addr; 6552 char *ip; 6553 6554 /* TODO: need to destroy on unload iscsi_endpoint*/ 6555 dst_addr = vmalloc(sizeof(*dst_addr)); 6556 if (!dst_addr) 6557 return NULL; 6558 6559 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6560 t_addr = (struct sockaddr *)dst_addr; 6561 t_addr->sa_family = AF_INET6; 6562 addr6 = (struct sockaddr_in6 *)dst_addr; 6563 ip = (char *)&addr6->sin6_addr; 6564 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6565 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6566 6567 } else { 6568 t_addr = (struct sockaddr *)dst_addr; 6569 t_addr->sa_family = AF_INET; 6570 addr = (struct sockaddr_in *)dst_addr; 6571 ip = (char *)&addr->sin_addr; 6572 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6573 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6574 } 6575 6576 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6577 vfree(dst_addr); 6578 return ep; 6579 } 6580 6581 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6582 { 6583 if (ql4xdisablesysfsboot) 6584 return QLA_SUCCESS; 6585 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6586 return QLA_ERROR; 6587 return QLA_SUCCESS; 6588 } 6589 6590 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6591 struct ddb_entry *ddb_entry, 6592 uint16_t idx) 6593 { 6594 uint16_t def_timeout; 6595 6596 ddb_entry->ddb_type = FLASH_DDB; 6597 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6598 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6599 ddb_entry->ha = ha; 6600 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6601 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6602 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6603 6604 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6605 atomic_set(&ddb_entry->relogin_timer, 0); 6606 atomic_set(&ddb_entry->relogin_retry_count, 0); 6607 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6608 ddb_entry->default_relogin_timeout = 6609 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6610 def_timeout : LOGIN_TOV; 6611 ddb_entry->default_time2wait = 6612 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6613 6614 if (ql4xdisablesysfsboot && 6615 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6616 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6617 } 6618 6619 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6620 { 6621 uint32_t idx = 0; 6622 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6623 uint32_t sts[MBOX_REG_COUNT]; 6624 uint32_t ip_state; 6625 unsigned long wtime; 6626 int ret; 6627 6628 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6629 do { 6630 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6631 if (ip_idx[idx] == -1) 6632 continue; 6633 6634 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6635 6636 if (ret == QLA_ERROR) { 6637 ip_idx[idx] = -1; 6638 continue; 6639 } 6640 6641 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6642 6643 DEBUG2(ql4_printk(KERN_INFO, ha, 6644 "Waiting for IP state for idx = %d, state = 0x%x\n", 6645 ip_idx[idx], ip_state)); 6646 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6647 ip_state == IP_ADDRSTATE_INVALID || 6648 ip_state == IP_ADDRSTATE_PREFERRED || 6649 ip_state == IP_ADDRSTATE_DEPRICATED || 6650 ip_state == IP_ADDRSTATE_DISABLING) 6651 ip_idx[idx] = -1; 6652 } 6653 6654 /* Break if all IP states checked */ 6655 if ((ip_idx[0] == -1) && 6656 (ip_idx[1] == -1) && 6657 (ip_idx[2] == -1) && 6658 (ip_idx[3] == -1)) 6659 break; 6660 schedule_timeout_uninterruptible(HZ); 6661 } while (time_after(wtime, jiffies)); 6662 } 6663 6664 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6665 struct dev_db_entry *flash_ddb_entry) 6666 { 6667 uint16_t options = 0; 6668 size_t ip_len = IP_ADDR_LEN; 6669 6670 options = le16_to_cpu(fw_ddb_entry->options); 6671 if (options & DDB_OPT_IPV6_DEVICE) 6672 ip_len = IPv6_ADDR_LEN; 6673 6674 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6675 return QLA_ERROR; 6676 6677 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6678 sizeof(fw_ddb_entry->isid))) 6679 return QLA_ERROR; 6680 6681 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6682 sizeof(fw_ddb_entry->port))) 6683 return QLA_ERROR; 6684 6685 return QLA_SUCCESS; 6686 } 6687 6688 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6689 struct dev_db_entry *fw_ddb_entry, 6690 uint32_t fw_idx, uint32_t *flash_index) 6691 { 6692 struct dev_db_entry *flash_ddb_entry; 6693 dma_addr_t flash_ddb_entry_dma; 6694 uint32_t idx = 0; 6695 int max_ddbs; 6696 int ret = QLA_ERROR, status; 6697 6698 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6699 MAX_DEV_DB_ENTRIES; 6700 6701 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6702 &flash_ddb_entry_dma); 6703 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6704 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6705 goto exit_find_st_idx; 6706 } 6707 6708 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6709 flash_ddb_entry_dma, fw_idx); 6710 if (status == QLA_SUCCESS) { 6711 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6712 if (status == QLA_SUCCESS) { 6713 *flash_index = fw_idx; 6714 ret = QLA_SUCCESS; 6715 goto exit_find_st_idx; 6716 } 6717 } 6718 6719 for (idx = 0; idx < max_ddbs; idx++) { 6720 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6721 flash_ddb_entry_dma, idx); 6722 if (status == QLA_ERROR) 6723 continue; 6724 6725 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6726 if (status == QLA_SUCCESS) { 6727 *flash_index = idx; 6728 ret = QLA_SUCCESS; 6729 goto exit_find_st_idx; 6730 } 6731 } 6732 6733 if (idx == max_ddbs) 6734 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6735 fw_idx); 6736 6737 exit_find_st_idx: 6738 if (flash_ddb_entry) 6739 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6740 flash_ddb_entry_dma); 6741 6742 return ret; 6743 } 6744 6745 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6746 struct list_head *list_st) 6747 { 6748 struct qla_ddb_index *st_ddb_idx; 6749 int max_ddbs; 6750 int fw_idx_size; 6751 struct dev_db_entry *fw_ddb_entry; 6752 dma_addr_t fw_ddb_dma; 6753 int ret; 6754 uint32_t idx = 0, next_idx = 0; 6755 uint32_t state = 0, conn_err = 0; 6756 uint32_t flash_index = -1; 6757 uint16_t conn_id = 0; 6758 6759 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6760 &fw_ddb_dma); 6761 if (fw_ddb_entry == NULL) { 6762 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6763 goto exit_st_list; 6764 } 6765 6766 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6767 MAX_DEV_DB_ENTRIES; 6768 fw_idx_size = sizeof(struct qla_ddb_index); 6769 6770 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6771 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6772 NULL, &next_idx, &state, 6773 &conn_err, NULL, &conn_id); 6774 if (ret == QLA_ERROR) 6775 break; 6776 6777 /* Ignore DDB if invalid state (unassigned) */ 6778 if (state == DDB_DS_UNASSIGNED) 6779 goto continue_next_st; 6780 6781 /* Check if ST, add to the list_st */ 6782 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6783 goto continue_next_st; 6784 6785 st_ddb_idx = vzalloc(fw_idx_size); 6786 if (!st_ddb_idx) 6787 break; 6788 6789 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6790 &flash_index); 6791 if (ret == QLA_ERROR) { 6792 ql4_printk(KERN_ERR, ha, 6793 "No flash entry for ST at idx [%d]\n", idx); 6794 st_ddb_idx->flash_ddb_idx = idx; 6795 } else { 6796 ql4_printk(KERN_INFO, ha, 6797 "ST at idx [%d] is stored at flash [%d]\n", 6798 idx, flash_index); 6799 st_ddb_idx->flash_ddb_idx = flash_index; 6800 } 6801 6802 st_ddb_idx->fw_ddb_idx = idx; 6803 6804 list_add_tail(&st_ddb_idx->list, list_st); 6805 continue_next_st: 6806 if (next_idx == 0) 6807 break; 6808 } 6809 6810 exit_st_list: 6811 if (fw_ddb_entry) 6812 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6813 } 6814 6815 /** 6816 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6817 * @ha: pointer to adapter structure 6818 * @list_ddb: List from which failed ddb to be removed 6819 * 6820 * Iterate over the list of DDBs and find and remove DDBs that are either in 6821 * no connection active state or failed state 6822 **/ 6823 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6824 struct list_head *list_ddb) 6825 { 6826 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6827 uint32_t next_idx = 0; 6828 uint32_t state = 0, conn_err = 0; 6829 int ret; 6830 6831 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6832 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6833 NULL, 0, NULL, &next_idx, &state, 6834 &conn_err, NULL, NULL); 6835 if (ret == QLA_ERROR) 6836 continue; 6837 6838 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6839 state == DDB_DS_SESSION_FAILED) { 6840 list_del_init(&ddb_idx->list); 6841 vfree(ddb_idx); 6842 } 6843 } 6844 } 6845 6846 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6847 struct ddb_entry *ddb_entry, 6848 struct dev_db_entry *fw_ddb_entry) 6849 { 6850 struct iscsi_cls_session *cls_sess; 6851 struct iscsi_session *sess; 6852 uint32_t max_ddbs = 0; 6853 uint16_t ddb_link = -1; 6854 6855 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6856 MAX_DEV_DB_ENTRIES; 6857 6858 cls_sess = ddb_entry->sess; 6859 sess = cls_sess->dd_data; 6860 6861 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6862 if (ddb_link < max_ddbs) 6863 sess->discovery_parent_idx = ddb_link; 6864 else 6865 sess->discovery_parent_idx = DDB_NO_LINK; 6866 } 6867 6868 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6869 struct dev_db_entry *fw_ddb_entry, 6870 int is_reset, uint16_t idx) 6871 { 6872 struct iscsi_cls_session *cls_sess; 6873 struct iscsi_session *sess; 6874 struct iscsi_cls_conn *cls_conn; 6875 struct iscsi_endpoint *ep; 6876 uint16_t cmds_max = 32; 6877 uint16_t conn_id = 0; 6878 uint32_t initial_cmdsn = 0; 6879 int ret = QLA_SUCCESS; 6880 6881 struct ddb_entry *ddb_entry = NULL; 6882 6883 /* Create session object, with INVALID_ENTRY, 6884 * the targer_id would get set when we issue the login 6885 */ 6886 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6887 cmds_max, sizeof(struct ddb_entry), 6888 sizeof(struct ql4_task_data), 6889 initial_cmdsn, INVALID_ENTRY); 6890 if (!cls_sess) { 6891 ret = QLA_ERROR; 6892 goto exit_setup; 6893 } 6894 6895 /* 6896 * so calling module_put function to decrement the 6897 * reference count. 6898 **/ 6899 module_put(qla4xxx_iscsi_transport.owner); 6900 sess = cls_sess->dd_data; 6901 ddb_entry = sess->dd_data; 6902 ddb_entry->sess = cls_sess; 6903 6904 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6905 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6906 sizeof(struct dev_db_entry)); 6907 6908 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6909 6910 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6911 6912 if (!cls_conn) { 6913 ret = QLA_ERROR; 6914 goto exit_setup; 6915 } 6916 6917 ddb_entry->conn = cls_conn; 6918 6919 /* Setup ep, for displaying attributes in sysfs */ 6920 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6921 if (ep) { 6922 ep->conn = cls_conn; 6923 cls_conn->ep = ep; 6924 } else { 6925 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6926 ret = QLA_ERROR; 6927 goto exit_setup; 6928 } 6929 6930 /* Update sess/conn params */ 6931 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6932 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6933 6934 if (is_reset == RESET_ADAPTER) { 6935 iscsi_block_session(cls_sess); 6936 /* Use the relogin path to discover new devices 6937 * by short-circuting the logic of setting 6938 * timer to relogin - instead set the flags 6939 * to initiate login right away. 6940 */ 6941 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6942 set_bit(DF_RELOGIN, &ddb_entry->flags); 6943 } 6944 6945 exit_setup: 6946 return ret; 6947 } 6948 6949 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6950 struct list_head *list_ddb, 6951 struct dev_db_entry *fw_ddb_entry) 6952 { 6953 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6954 uint16_t ddb_link; 6955 6956 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6957 6958 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6959 if (ddb_idx->fw_ddb_idx == ddb_link) { 6960 DEBUG2(ql4_printk(KERN_INFO, ha, 6961 "Updating NT parent idx from [%d] to [%d]\n", 6962 ddb_link, ddb_idx->flash_ddb_idx)); 6963 fw_ddb_entry->ddb_link = 6964 cpu_to_le16(ddb_idx->flash_ddb_idx); 6965 return; 6966 } 6967 } 6968 } 6969 6970 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 6971 struct list_head *list_nt, 6972 struct list_head *list_st, 6973 int is_reset) 6974 { 6975 struct dev_db_entry *fw_ddb_entry; 6976 struct ddb_entry *ddb_entry = NULL; 6977 dma_addr_t fw_ddb_dma; 6978 int max_ddbs; 6979 int fw_idx_size; 6980 int ret; 6981 uint32_t idx = 0, next_idx = 0; 6982 uint32_t state = 0, conn_err = 0; 6983 uint32_t ddb_idx = -1; 6984 uint16_t conn_id = 0; 6985 uint16_t ddb_link = -1; 6986 struct qla_ddb_index *nt_ddb_idx; 6987 6988 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6989 &fw_ddb_dma); 6990 if (fw_ddb_entry == NULL) { 6991 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6992 goto exit_nt_list; 6993 } 6994 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6995 MAX_DEV_DB_ENTRIES; 6996 fw_idx_size = sizeof(struct qla_ddb_index); 6997 6998 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6999 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7000 NULL, &next_idx, &state, 7001 &conn_err, NULL, &conn_id); 7002 if (ret == QLA_ERROR) 7003 break; 7004 7005 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7006 goto continue_next_nt; 7007 7008 /* Check if NT, then add to list it */ 7009 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7010 goto continue_next_nt; 7011 7012 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7013 if (ddb_link < max_ddbs) 7014 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7015 7016 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7017 state == DDB_DS_SESSION_FAILED) && 7018 (is_reset == INIT_ADAPTER)) 7019 goto continue_next_nt; 7020 7021 DEBUG2(ql4_printk(KERN_INFO, ha, 7022 "Adding DDB to session = 0x%x\n", idx)); 7023 7024 if (is_reset == INIT_ADAPTER) { 7025 nt_ddb_idx = vmalloc(fw_idx_size); 7026 if (!nt_ddb_idx) 7027 break; 7028 7029 nt_ddb_idx->fw_ddb_idx = idx; 7030 7031 /* Copy original isid as it may get updated in function 7032 * qla4xxx_update_isid(). We need original isid in 7033 * function qla4xxx_compare_tuple_ddb to find duplicate 7034 * target */ 7035 memcpy(&nt_ddb_idx->flash_isid[0], 7036 &fw_ddb_entry->isid[0], 7037 sizeof(nt_ddb_idx->flash_isid)); 7038 7039 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7040 fw_ddb_entry); 7041 if (ret == QLA_SUCCESS) { 7042 /* free nt_ddb_idx and do not add to list_nt */ 7043 vfree(nt_ddb_idx); 7044 goto continue_next_nt; 7045 } 7046 7047 /* Copy updated isid */ 7048 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7049 sizeof(struct dev_db_entry)); 7050 7051 list_add_tail(&nt_ddb_idx->list, list_nt); 7052 } else if (is_reset == RESET_ADAPTER) { 7053 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7054 &ddb_idx); 7055 if (ret == QLA_SUCCESS) { 7056 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7057 ddb_idx); 7058 if (ddb_entry != NULL) 7059 qla4xxx_update_sess_disc_idx(ha, 7060 ddb_entry, 7061 fw_ddb_entry); 7062 goto continue_next_nt; 7063 } 7064 } 7065 7066 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7067 if (ret == QLA_ERROR) 7068 goto exit_nt_list; 7069 7070 continue_next_nt: 7071 if (next_idx == 0) 7072 break; 7073 } 7074 7075 exit_nt_list: 7076 if (fw_ddb_entry) 7077 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7078 } 7079 7080 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7081 struct list_head *list_nt, 7082 uint16_t target_id) 7083 { 7084 struct dev_db_entry *fw_ddb_entry; 7085 dma_addr_t fw_ddb_dma; 7086 int max_ddbs; 7087 int fw_idx_size; 7088 int ret; 7089 uint32_t idx = 0, next_idx = 0; 7090 uint32_t state = 0, conn_err = 0; 7091 uint16_t conn_id = 0; 7092 struct qla_ddb_index *nt_ddb_idx; 7093 7094 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7095 &fw_ddb_dma); 7096 if (fw_ddb_entry == NULL) { 7097 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7098 goto exit_new_nt_list; 7099 } 7100 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7101 MAX_DEV_DB_ENTRIES; 7102 fw_idx_size = sizeof(struct qla_ddb_index); 7103 7104 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7105 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7106 NULL, &next_idx, &state, 7107 &conn_err, NULL, &conn_id); 7108 if (ret == QLA_ERROR) 7109 break; 7110 7111 /* Check if NT, then add it to list */ 7112 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7113 goto continue_next_new_nt; 7114 7115 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7116 goto continue_next_new_nt; 7117 7118 DEBUG2(ql4_printk(KERN_INFO, ha, 7119 "Adding DDB to session = 0x%x\n", idx)); 7120 7121 nt_ddb_idx = vmalloc(fw_idx_size); 7122 if (!nt_ddb_idx) 7123 break; 7124 7125 nt_ddb_idx->fw_ddb_idx = idx; 7126 7127 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7128 if (ret == QLA_SUCCESS) { 7129 /* free nt_ddb_idx and do not add to list_nt */ 7130 vfree(nt_ddb_idx); 7131 goto continue_next_new_nt; 7132 } 7133 7134 if (target_id < max_ddbs) 7135 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7136 7137 list_add_tail(&nt_ddb_idx->list, list_nt); 7138 7139 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7140 idx); 7141 if (ret == QLA_ERROR) 7142 goto exit_new_nt_list; 7143 7144 continue_next_new_nt: 7145 if (next_idx == 0) 7146 break; 7147 } 7148 7149 exit_new_nt_list: 7150 if (fw_ddb_entry) 7151 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7152 } 7153 7154 /** 7155 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7156 * @dev: dev associated with the sysfs entry 7157 * @data: pointer to flashnode session object 7158 * 7159 * Returns: 7160 * 1: if flashnode entry is non-persistent 7161 * 0: if flashnode entry is persistent 7162 **/ 7163 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7164 { 7165 struct iscsi_bus_flash_session *fnode_sess; 7166 7167 if (!iscsi_flashnode_bus_match(dev, NULL)) 7168 return 0; 7169 7170 fnode_sess = iscsi_dev_to_flash_session(dev); 7171 7172 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7173 } 7174 7175 /** 7176 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7177 * @ha: pointer to host 7178 * @fw_ddb_entry: flash ddb data 7179 * @idx: target index 7180 * @user: if set then this call is made from userland else from kernel 7181 * 7182 * Returns: 7183 * On sucess: QLA_SUCCESS 7184 * On failure: QLA_ERROR 7185 * 7186 * This create separate sysfs entries for session and connection attributes of 7187 * the given fw ddb entry. 7188 * If this is invoked as a result of a userspace call then the entry is marked 7189 * as nonpersistent using flash_state field. 7190 **/ 7191 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7192 struct dev_db_entry *fw_ddb_entry, 7193 uint16_t *idx, int user) 7194 { 7195 struct iscsi_bus_flash_session *fnode_sess = NULL; 7196 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7197 int rc = QLA_ERROR; 7198 7199 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7200 &qla4xxx_iscsi_transport, 0); 7201 if (!fnode_sess) { 7202 ql4_printk(KERN_ERR, ha, 7203 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7204 __func__, *idx, ha->host_no); 7205 goto exit_tgt_create; 7206 } 7207 7208 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7209 &qla4xxx_iscsi_transport, 0); 7210 if (!fnode_conn) { 7211 ql4_printk(KERN_ERR, ha, 7212 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7213 __func__, *idx, ha->host_no); 7214 goto free_sess; 7215 } 7216 7217 if (user) { 7218 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7219 } else { 7220 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7221 7222 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7223 fnode_sess->is_boot_target = 1; 7224 else 7225 fnode_sess->is_boot_target = 0; 7226 } 7227 7228 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7229 fw_ddb_entry); 7230 7231 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7232 __func__, fnode_sess->dev.kobj.name); 7233 7234 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7235 __func__, fnode_conn->dev.kobj.name); 7236 7237 return QLA_SUCCESS; 7238 7239 free_sess: 7240 iscsi_destroy_flashnode_sess(fnode_sess); 7241 7242 exit_tgt_create: 7243 return QLA_ERROR; 7244 } 7245 7246 /** 7247 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7248 * @shost: pointer to host 7249 * @buf: type of ddb entry (ipv4/ipv6) 7250 * @len: length of buf 7251 * 7252 * This creates new ddb entry in the flash by finding first free index and 7253 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7254 **/ 7255 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7256 int len) 7257 { 7258 struct scsi_qla_host *ha = to_qla_host(shost); 7259 struct dev_db_entry *fw_ddb_entry = NULL; 7260 dma_addr_t fw_ddb_entry_dma; 7261 struct device *dev; 7262 uint16_t idx = 0; 7263 uint16_t max_ddbs = 0; 7264 uint32_t options = 0; 7265 uint32_t rval = QLA_ERROR; 7266 7267 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7268 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7269 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7270 __func__)); 7271 goto exit_ddb_add; 7272 } 7273 7274 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7275 MAX_DEV_DB_ENTRIES; 7276 7277 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7278 &fw_ddb_entry_dma, GFP_KERNEL); 7279 if (!fw_ddb_entry) { 7280 DEBUG2(ql4_printk(KERN_ERR, ha, 7281 "%s: Unable to allocate dma buffer\n", 7282 __func__)); 7283 goto exit_ddb_add; 7284 } 7285 7286 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7287 qla4xxx_sysfs_ddb_is_non_persistent); 7288 if (dev) { 7289 ql4_printk(KERN_ERR, ha, 7290 "%s: A non-persistent entry %s found\n", 7291 __func__, dev->kobj.name); 7292 put_device(dev); 7293 goto exit_ddb_add; 7294 } 7295 7296 /* Index 0 and 1 are reserved for boot target entries */ 7297 for (idx = 2; idx < max_ddbs; idx++) { 7298 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7299 fw_ddb_entry_dma, idx)) 7300 break; 7301 } 7302 7303 if (idx == max_ddbs) 7304 goto exit_ddb_add; 7305 7306 if (!strncasecmp("ipv6", buf, 4)) 7307 options |= IPV6_DEFAULT_DDB_ENTRY; 7308 7309 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7310 if (rval == QLA_ERROR) 7311 goto exit_ddb_add; 7312 7313 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7314 7315 exit_ddb_add: 7316 if (fw_ddb_entry) 7317 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7318 fw_ddb_entry, fw_ddb_entry_dma); 7319 if (rval == QLA_SUCCESS) 7320 return idx; 7321 else 7322 return -EIO; 7323 } 7324 7325 /** 7326 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7327 * @fnode_sess: pointer to session attrs of flash ddb entry 7328 * @fnode_conn: pointer to connection attrs of flash ddb entry 7329 * 7330 * This writes the contents of target ddb buffer to Flash with a valid cookie 7331 * value in order to make the ddb entry persistent. 7332 **/ 7333 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7334 struct iscsi_bus_flash_conn *fnode_conn) 7335 { 7336 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7337 struct scsi_qla_host *ha = to_qla_host(shost); 7338 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7339 struct dev_db_entry *fw_ddb_entry = NULL; 7340 dma_addr_t fw_ddb_entry_dma; 7341 uint32_t options = 0; 7342 int rval = 0; 7343 7344 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7345 &fw_ddb_entry_dma, GFP_KERNEL); 7346 if (!fw_ddb_entry) { 7347 DEBUG2(ql4_printk(KERN_ERR, ha, 7348 "%s: Unable to allocate dma buffer\n", 7349 __func__)); 7350 rval = -ENOMEM; 7351 goto exit_ddb_apply; 7352 } 7353 7354 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7355 options |= IPV6_DEFAULT_DDB_ENTRY; 7356 7357 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7358 if (rval == QLA_ERROR) 7359 goto exit_ddb_apply; 7360 7361 dev_db_start_offset += (fnode_sess->target_id * 7362 sizeof(*fw_ddb_entry)); 7363 7364 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7365 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7366 7367 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7368 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7369 7370 if (rval == QLA_SUCCESS) { 7371 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7372 ql4_printk(KERN_INFO, ha, 7373 "%s: flash node %u of host %lu written to flash\n", 7374 __func__, fnode_sess->target_id, ha->host_no); 7375 } else { 7376 rval = -EIO; 7377 ql4_printk(KERN_ERR, ha, 7378 "%s: Error while writing flash node %u of host %lu to flash\n", 7379 __func__, fnode_sess->target_id, ha->host_no); 7380 } 7381 7382 exit_ddb_apply: 7383 if (fw_ddb_entry) 7384 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7385 fw_ddb_entry, fw_ddb_entry_dma); 7386 return rval; 7387 } 7388 7389 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7390 struct dev_db_entry *fw_ddb_entry, 7391 uint16_t idx) 7392 { 7393 struct dev_db_entry *ddb_entry = NULL; 7394 dma_addr_t ddb_entry_dma; 7395 unsigned long wtime; 7396 uint32_t mbx_sts = 0; 7397 uint32_t state = 0, conn_err = 0; 7398 uint16_t tmo = 0; 7399 int ret = 0; 7400 7401 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7402 &ddb_entry_dma, GFP_KERNEL); 7403 if (!ddb_entry) { 7404 DEBUG2(ql4_printk(KERN_ERR, ha, 7405 "%s: Unable to allocate dma buffer\n", 7406 __func__)); 7407 return QLA_ERROR; 7408 } 7409 7410 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7411 7412 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7413 if (ret != QLA_SUCCESS) { 7414 DEBUG2(ql4_printk(KERN_ERR, ha, 7415 "%s: Unable to set ddb entry for index %d\n", 7416 __func__, idx)); 7417 goto exit_ddb_conn_open; 7418 } 7419 7420 qla4xxx_conn_open(ha, idx); 7421 7422 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7423 tmo = ((ha->def_timeout > LOGIN_TOV) && 7424 (ha->def_timeout < LOGIN_TOV * 10) ? 7425 ha->def_timeout : LOGIN_TOV); 7426 7427 DEBUG2(ql4_printk(KERN_INFO, ha, 7428 "Default time to wait for login to ddb %d\n", tmo)); 7429 7430 wtime = jiffies + (HZ * tmo); 7431 do { 7432 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7433 NULL, &state, &conn_err, NULL, 7434 NULL); 7435 if (ret == QLA_ERROR) 7436 continue; 7437 7438 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7439 state == DDB_DS_SESSION_FAILED) 7440 break; 7441 7442 schedule_timeout_uninterruptible(HZ / 10); 7443 } while (time_after(wtime, jiffies)); 7444 7445 exit_ddb_conn_open: 7446 if (ddb_entry) 7447 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7448 ddb_entry, ddb_entry_dma); 7449 return ret; 7450 } 7451 7452 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7453 struct dev_db_entry *fw_ddb_entry, 7454 uint16_t target_id) 7455 { 7456 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7457 struct list_head list_nt; 7458 uint16_t ddb_index; 7459 int ret = 0; 7460 7461 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7462 ql4_printk(KERN_WARNING, ha, 7463 "%s: A discovery already in progress!\n", __func__); 7464 return QLA_ERROR; 7465 } 7466 7467 INIT_LIST_HEAD(&list_nt); 7468 7469 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7470 7471 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7472 if (ret == QLA_ERROR) 7473 goto exit_login_st_clr_bit; 7474 7475 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7476 if (ret == QLA_ERROR) 7477 goto exit_login_st; 7478 7479 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7480 7481 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7482 list_del_init(&ddb_idx->list); 7483 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7484 vfree(ddb_idx); 7485 } 7486 7487 exit_login_st: 7488 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7489 ql4_printk(KERN_ERR, ha, 7490 "Unable to clear DDB index = 0x%x\n", ddb_index); 7491 } 7492 7493 clear_bit(ddb_index, ha->ddb_idx_map); 7494 7495 exit_login_st_clr_bit: 7496 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7497 return ret; 7498 } 7499 7500 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7501 struct dev_db_entry *fw_ddb_entry, 7502 uint16_t idx) 7503 { 7504 int ret = QLA_ERROR; 7505 7506 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7507 if (ret != QLA_SUCCESS) 7508 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7509 idx); 7510 else 7511 ret = -EPERM; 7512 7513 return ret; 7514 } 7515 7516 /** 7517 * qla4xxx_sysfs_ddb_login - Login to the specified target 7518 * @fnode_sess: pointer to session attrs of flash ddb entry 7519 * @fnode_conn: pointer to connection attrs of flash ddb entry 7520 * 7521 * This logs in to the specified target 7522 **/ 7523 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7524 struct iscsi_bus_flash_conn *fnode_conn) 7525 { 7526 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7527 struct scsi_qla_host *ha = to_qla_host(shost); 7528 struct dev_db_entry *fw_ddb_entry = NULL; 7529 dma_addr_t fw_ddb_entry_dma; 7530 uint32_t options = 0; 7531 int ret = 0; 7532 7533 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7534 ql4_printk(KERN_ERR, ha, 7535 "%s: Target info is not persistent\n", __func__); 7536 ret = -EIO; 7537 goto exit_ddb_login; 7538 } 7539 7540 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7541 &fw_ddb_entry_dma, GFP_KERNEL); 7542 if (!fw_ddb_entry) { 7543 DEBUG2(ql4_printk(KERN_ERR, ha, 7544 "%s: Unable to allocate dma buffer\n", 7545 __func__)); 7546 ret = -ENOMEM; 7547 goto exit_ddb_login; 7548 } 7549 7550 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7551 options |= IPV6_DEFAULT_DDB_ENTRY; 7552 7553 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7554 if (ret == QLA_ERROR) 7555 goto exit_ddb_login; 7556 7557 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7558 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7559 7560 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7561 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7562 fnode_sess->target_id); 7563 else 7564 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7565 fnode_sess->target_id); 7566 7567 if (ret > 0) 7568 ret = -EIO; 7569 7570 exit_ddb_login: 7571 if (fw_ddb_entry) 7572 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7573 fw_ddb_entry, fw_ddb_entry_dma); 7574 return ret; 7575 } 7576 7577 /** 7578 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7579 * @cls_sess: pointer to session to be logged out 7580 * 7581 * This performs session log out from the specified target 7582 **/ 7583 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7584 { 7585 struct iscsi_session *sess; 7586 struct ddb_entry *ddb_entry = NULL; 7587 struct scsi_qla_host *ha; 7588 struct dev_db_entry *fw_ddb_entry = NULL; 7589 dma_addr_t fw_ddb_entry_dma; 7590 unsigned long flags; 7591 unsigned long wtime; 7592 uint32_t ddb_state; 7593 int options; 7594 int ret = 0; 7595 7596 sess = cls_sess->dd_data; 7597 ddb_entry = sess->dd_data; 7598 ha = ddb_entry->ha; 7599 7600 if (ddb_entry->ddb_type != FLASH_DDB) { 7601 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7602 __func__); 7603 ret = -ENXIO; 7604 goto exit_ddb_logout; 7605 } 7606 7607 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7608 ql4_printk(KERN_ERR, ha, 7609 "%s: Logout from boot target entry is not permitted.\n", 7610 __func__); 7611 ret = -EPERM; 7612 goto exit_ddb_logout; 7613 } 7614 7615 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7616 &fw_ddb_entry_dma, GFP_KERNEL); 7617 if (!fw_ddb_entry) { 7618 ql4_printk(KERN_ERR, ha, 7619 "%s: Unable to allocate dma buffer\n", __func__); 7620 ret = -ENOMEM; 7621 goto exit_ddb_logout; 7622 } 7623 7624 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7625 goto ddb_logout_init; 7626 7627 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7628 fw_ddb_entry, fw_ddb_entry_dma, 7629 NULL, NULL, &ddb_state, NULL, 7630 NULL, NULL); 7631 if (ret == QLA_ERROR) 7632 goto ddb_logout_init; 7633 7634 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7635 goto ddb_logout_init; 7636 7637 /* wait until next relogin is triggered using DF_RELOGIN and 7638 * clear DF_RELOGIN to avoid invocation of further relogin 7639 */ 7640 wtime = jiffies + (HZ * RELOGIN_TOV); 7641 do { 7642 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7643 goto ddb_logout_init; 7644 7645 schedule_timeout_uninterruptible(HZ); 7646 } while ((time_after(wtime, jiffies))); 7647 7648 ddb_logout_init: 7649 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7650 atomic_set(&ddb_entry->relogin_timer, 0); 7651 7652 options = LOGOUT_OPTION_CLOSE_SESSION; 7653 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7654 7655 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7656 wtime = jiffies + (HZ * LOGOUT_TOV); 7657 do { 7658 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7659 fw_ddb_entry, fw_ddb_entry_dma, 7660 NULL, NULL, &ddb_state, NULL, 7661 NULL, NULL); 7662 if (ret == QLA_ERROR) 7663 goto ddb_logout_clr_sess; 7664 7665 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7666 (ddb_state == DDB_DS_SESSION_FAILED)) 7667 goto ddb_logout_clr_sess; 7668 7669 schedule_timeout_uninterruptible(HZ); 7670 } while ((time_after(wtime, jiffies))); 7671 7672 ddb_logout_clr_sess: 7673 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7674 /* 7675 * we have decremented the reference count of the driver 7676 * when we setup the session to have the driver unload 7677 * to be seamless without actually destroying the 7678 * session 7679 **/ 7680 try_module_get(qla4xxx_iscsi_transport.owner); 7681 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7682 7683 spin_lock_irqsave(&ha->hardware_lock, flags); 7684 qla4xxx_free_ddb(ha, ddb_entry); 7685 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7686 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7687 7688 iscsi_session_teardown(ddb_entry->sess); 7689 7690 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7691 ret = QLA_SUCCESS; 7692 7693 exit_ddb_logout: 7694 if (fw_ddb_entry) 7695 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7696 fw_ddb_entry, fw_ddb_entry_dma); 7697 return ret; 7698 } 7699 7700 /** 7701 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7702 * @fnode_sess: pointer to session attrs of flash ddb entry 7703 * @fnode_conn: pointer to connection attrs of flash ddb entry 7704 * 7705 * This performs log out from the specified target 7706 **/ 7707 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7708 struct iscsi_bus_flash_conn *fnode_conn) 7709 { 7710 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7711 struct scsi_qla_host *ha = to_qla_host(shost); 7712 struct ql4_tuple_ddb *flash_tddb = NULL; 7713 struct ql4_tuple_ddb *tmp_tddb = NULL; 7714 struct dev_db_entry *fw_ddb_entry = NULL; 7715 struct ddb_entry *ddb_entry = NULL; 7716 dma_addr_t fw_ddb_dma; 7717 uint32_t next_idx = 0; 7718 uint32_t state = 0, conn_err = 0; 7719 uint16_t conn_id = 0; 7720 int idx, index; 7721 int status, ret = 0; 7722 7723 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7724 &fw_ddb_dma); 7725 if (fw_ddb_entry == NULL) { 7726 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7727 ret = -ENOMEM; 7728 goto exit_ddb_logout; 7729 } 7730 7731 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7732 if (!flash_tddb) { 7733 ql4_printk(KERN_WARNING, ha, 7734 "%s:Memory Allocation failed.\n", __func__); 7735 ret = -ENOMEM; 7736 goto exit_ddb_logout; 7737 } 7738 7739 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7740 if (!tmp_tddb) { 7741 ql4_printk(KERN_WARNING, ha, 7742 "%s:Memory Allocation failed.\n", __func__); 7743 ret = -ENOMEM; 7744 goto exit_ddb_logout; 7745 } 7746 7747 if (!fnode_sess->targetname) { 7748 ql4_printk(KERN_ERR, ha, 7749 "%s:Cannot logout from SendTarget entry\n", 7750 __func__); 7751 ret = -EPERM; 7752 goto exit_ddb_logout; 7753 } 7754 7755 if (fnode_sess->is_boot_target) { 7756 ql4_printk(KERN_ERR, ha, 7757 "%s: Logout from boot target entry is not permitted.\n", 7758 __func__); 7759 ret = -EPERM; 7760 goto exit_ddb_logout; 7761 } 7762 7763 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7764 ISCSI_NAME_SIZE); 7765 7766 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7767 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7768 else 7769 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7770 7771 flash_tddb->tpgt = fnode_sess->tpgt; 7772 flash_tddb->port = fnode_conn->port; 7773 7774 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7775 7776 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7777 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7778 if (ddb_entry == NULL) 7779 continue; 7780 7781 if (ddb_entry->ddb_type != FLASH_DDB) 7782 continue; 7783 7784 index = ddb_entry->sess->target_id; 7785 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7786 fw_ddb_dma, NULL, &next_idx, 7787 &state, &conn_err, NULL, 7788 &conn_id); 7789 if (status == QLA_ERROR) { 7790 ret = -ENOMEM; 7791 break; 7792 } 7793 7794 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7795 7796 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7797 true); 7798 if (status == QLA_SUCCESS) { 7799 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7800 break; 7801 } 7802 } 7803 7804 if (idx == MAX_DDB_ENTRIES) 7805 ret = -ESRCH; 7806 7807 exit_ddb_logout: 7808 if (flash_tddb) 7809 vfree(flash_tddb); 7810 if (tmp_tddb) 7811 vfree(tmp_tddb); 7812 if (fw_ddb_entry) 7813 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7814 7815 return ret; 7816 } 7817 7818 static int 7819 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7820 int param, char *buf) 7821 { 7822 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7823 struct scsi_qla_host *ha = to_qla_host(shost); 7824 struct iscsi_bus_flash_conn *fnode_conn; 7825 struct ql4_chap_table chap_tbl; 7826 struct device *dev; 7827 int parent_type; 7828 int rc = 0; 7829 7830 dev = iscsi_find_flashnode_conn(fnode_sess); 7831 if (!dev) 7832 return -EIO; 7833 7834 fnode_conn = iscsi_dev_to_flash_conn(dev); 7835 7836 switch (param) { 7837 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7838 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7839 break; 7840 case ISCSI_FLASHNODE_PORTAL_TYPE: 7841 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7842 break; 7843 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7844 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7845 break; 7846 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7847 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7848 break; 7849 case ISCSI_FLASHNODE_ENTRY_EN: 7850 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7851 break; 7852 case ISCSI_FLASHNODE_HDR_DGST_EN: 7853 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7854 break; 7855 case ISCSI_FLASHNODE_DATA_DGST_EN: 7856 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7857 break; 7858 case ISCSI_FLASHNODE_IMM_DATA_EN: 7859 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7860 break; 7861 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7862 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7863 break; 7864 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7865 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7866 break; 7867 case ISCSI_FLASHNODE_PDU_INORDER: 7868 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7869 break; 7870 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7871 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7872 break; 7873 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7874 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7875 break; 7876 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7877 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7878 break; 7879 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7880 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7881 break; 7882 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7883 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7884 break; 7885 case ISCSI_FLASHNODE_ERL: 7886 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7887 break; 7888 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7889 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7890 break; 7891 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7892 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7893 break; 7894 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7895 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7896 break; 7897 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7898 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7899 break; 7900 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7901 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7902 break; 7903 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7904 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7905 break; 7906 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7907 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7908 break; 7909 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7910 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7911 break; 7912 case ISCSI_FLASHNODE_FIRST_BURST: 7913 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7914 break; 7915 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7916 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7917 break; 7918 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7919 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7920 break; 7921 case ISCSI_FLASHNODE_MAX_R2T: 7922 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7923 break; 7924 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7925 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7926 break; 7927 case ISCSI_FLASHNODE_ISID: 7928 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", 7929 fnode_sess->isid[0], fnode_sess->isid[1], 7930 fnode_sess->isid[2], fnode_sess->isid[3], 7931 fnode_sess->isid[4], fnode_sess->isid[5]); 7932 break; 7933 case ISCSI_FLASHNODE_TSID: 7934 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7935 break; 7936 case ISCSI_FLASHNODE_PORT: 7937 rc = sprintf(buf, "%d\n", fnode_conn->port); 7938 break; 7939 case ISCSI_FLASHNODE_MAX_BURST: 7940 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7941 break; 7942 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7943 rc = sprintf(buf, "%u\n", 7944 fnode_sess->default_taskmgmt_timeout); 7945 break; 7946 case ISCSI_FLASHNODE_IPADDR: 7947 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7948 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7949 else 7950 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7951 break; 7952 case ISCSI_FLASHNODE_ALIAS: 7953 if (fnode_sess->targetalias) 7954 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7955 else 7956 rc = sprintf(buf, "\n"); 7957 break; 7958 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7959 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7960 rc = sprintf(buf, "%pI6\n", 7961 fnode_conn->redirect_ipaddr); 7962 else 7963 rc = sprintf(buf, "%pI4\n", 7964 fnode_conn->redirect_ipaddr); 7965 break; 7966 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7967 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7968 break; 7969 case ISCSI_FLASHNODE_LOCAL_PORT: 7970 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 7971 break; 7972 case ISCSI_FLASHNODE_IPV4_TOS: 7973 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 7974 break; 7975 case ISCSI_FLASHNODE_IPV6_TC: 7976 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7977 rc = sprintf(buf, "%u\n", 7978 fnode_conn->ipv6_traffic_class); 7979 else 7980 rc = sprintf(buf, "\n"); 7981 break; 7982 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 7983 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 7984 break; 7985 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 7986 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7987 rc = sprintf(buf, "%pI6\n", 7988 fnode_conn->link_local_ipv6_addr); 7989 else 7990 rc = sprintf(buf, "\n"); 7991 break; 7992 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 7993 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 7994 break; 7995 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 7996 if (fnode_sess->discovery_parent_type == DDB_ISNS) 7997 parent_type = ISCSI_DISC_PARENT_ISNS; 7998 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 7999 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8000 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8001 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8002 else 8003 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8004 8005 rc = sprintf(buf, "%s\n", 8006 iscsi_get_discovery_parent_name(parent_type)); 8007 break; 8008 case ISCSI_FLASHNODE_NAME: 8009 if (fnode_sess->targetname) 8010 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8011 else 8012 rc = sprintf(buf, "\n"); 8013 break; 8014 case ISCSI_FLASHNODE_TPGT: 8015 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8016 break; 8017 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8018 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8019 break; 8020 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8021 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8022 break; 8023 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8024 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8025 break; 8026 case ISCSI_FLASHNODE_USERNAME: 8027 if (fnode_sess->chap_auth_en) { 8028 qla4xxx_get_uni_chap_at_index(ha, 8029 chap_tbl.name, 8030 chap_tbl.secret, 8031 fnode_sess->chap_out_idx); 8032 rc = sprintf(buf, "%s\n", chap_tbl.name); 8033 } else { 8034 rc = sprintf(buf, "\n"); 8035 } 8036 break; 8037 case ISCSI_FLASHNODE_PASSWORD: 8038 if (fnode_sess->chap_auth_en) { 8039 qla4xxx_get_uni_chap_at_index(ha, 8040 chap_tbl.name, 8041 chap_tbl.secret, 8042 fnode_sess->chap_out_idx); 8043 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8044 } else { 8045 rc = sprintf(buf, "\n"); 8046 } 8047 break; 8048 case ISCSI_FLASHNODE_STATSN: 8049 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8050 break; 8051 case ISCSI_FLASHNODE_EXP_STATSN: 8052 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8053 break; 8054 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8055 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8056 break; 8057 default: 8058 rc = -ENOSYS; 8059 break; 8060 } 8061 8062 put_device(dev); 8063 return rc; 8064 } 8065 8066 /** 8067 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8068 * @fnode_sess: pointer to session attrs of flash ddb entry 8069 * @fnode_conn: pointer to connection attrs of flash ddb entry 8070 * @data: Parameters and their values to update 8071 * @len: len of data 8072 * 8073 * This sets the parameter of flash ddb entry and writes them to flash 8074 **/ 8075 static int 8076 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8077 struct iscsi_bus_flash_conn *fnode_conn, 8078 void *data, int len) 8079 { 8080 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8081 struct scsi_qla_host *ha = to_qla_host(shost); 8082 struct iscsi_flashnode_param_info *fnode_param; 8083 struct ql4_chap_table chap_tbl; 8084 struct nlattr *attr; 8085 uint16_t chap_out_idx = INVALID_ENTRY; 8086 int rc = QLA_ERROR; 8087 uint32_t rem = len; 8088 8089 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8090 nla_for_each_attr(attr, data, len, rem) { 8091 fnode_param = nla_data(attr); 8092 8093 switch (fnode_param->param) { 8094 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8095 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8096 break; 8097 case ISCSI_FLASHNODE_PORTAL_TYPE: 8098 memcpy(fnode_sess->portal_type, fnode_param->value, 8099 strlen(fnode_sess->portal_type)); 8100 break; 8101 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8102 fnode_sess->auto_snd_tgt_disable = 8103 fnode_param->value[0]; 8104 break; 8105 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8106 fnode_sess->discovery_sess = fnode_param->value[0]; 8107 break; 8108 case ISCSI_FLASHNODE_ENTRY_EN: 8109 fnode_sess->entry_state = fnode_param->value[0]; 8110 break; 8111 case ISCSI_FLASHNODE_HDR_DGST_EN: 8112 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8113 break; 8114 case ISCSI_FLASHNODE_DATA_DGST_EN: 8115 fnode_conn->datadgst_en = fnode_param->value[0]; 8116 break; 8117 case ISCSI_FLASHNODE_IMM_DATA_EN: 8118 fnode_sess->imm_data_en = fnode_param->value[0]; 8119 break; 8120 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8121 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8122 break; 8123 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8124 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8125 break; 8126 case ISCSI_FLASHNODE_PDU_INORDER: 8127 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8128 break; 8129 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8130 fnode_sess->chap_auth_en = fnode_param->value[0]; 8131 /* Invalidate chap index if chap auth is disabled */ 8132 if (!fnode_sess->chap_auth_en) 8133 fnode_sess->chap_out_idx = INVALID_ENTRY; 8134 8135 break; 8136 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8137 fnode_conn->snack_req_en = fnode_param->value[0]; 8138 break; 8139 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8140 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8141 break; 8142 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8143 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8144 break; 8145 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8146 fnode_sess->discovery_auth_optional = 8147 fnode_param->value[0]; 8148 break; 8149 case ISCSI_FLASHNODE_ERL: 8150 fnode_sess->erl = fnode_param->value[0]; 8151 break; 8152 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8153 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8154 break; 8155 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8156 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8157 break; 8158 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8159 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8160 break; 8161 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8162 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8163 break; 8164 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8165 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8166 break; 8167 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8168 fnode_conn->fragment_disable = fnode_param->value[0]; 8169 break; 8170 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8171 fnode_conn->max_recv_dlength = 8172 *(unsigned *)fnode_param->value; 8173 break; 8174 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8175 fnode_conn->max_xmit_dlength = 8176 *(unsigned *)fnode_param->value; 8177 break; 8178 case ISCSI_FLASHNODE_FIRST_BURST: 8179 fnode_sess->first_burst = 8180 *(unsigned *)fnode_param->value; 8181 break; 8182 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8183 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8184 break; 8185 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8186 fnode_sess->time2retain = 8187 *(uint16_t *)fnode_param->value; 8188 break; 8189 case ISCSI_FLASHNODE_MAX_R2T: 8190 fnode_sess->max_r2t = 8191 *(uint16_t *)fnode_param->value; 8192 break; 8193 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8194 fnode_conn->keepalive_timeout = 8195 *(uint16_t *)fnode_param->value; 8196 break; 8197 case ISCSI_FLASHNODE_ISID: 8198 memcpy(fnode_sess->isid, fnode_param->value, 8199 sizeof(fnode_sess->isid)); 8200 break; 8201 case ISCSI_FLASHNODE_TSID: 8202 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8203 break; 8204 case ISCSI_FLASHNODE_PORT: 8205 fnode_conn->port = *(uint16_t *)fnode_param->value; 8206 break; 8207 case ISCSI_FLASHNODE_MAX_BURST: 8208 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8209 break; 8210 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8211 fnode_sess->default_taskmgmt_timeout = 8212 *(uint16_t *)fnode_param->value; 8213 break; 8214 case ISCSI_FLASHNODE_IPADDR: 8215 memcpy(fnode_conn->ipaddress, fnode_param->value, 8216 IPv6_ADDR_LEN); 8217 break; 8218 case ISCSI_FLASHNODE_ALIAS: 8219 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8220 (char *)fnode_param->value); 8221 break; 8222 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8223 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8224 IPv6_ADDR_LEN); 8225 break; 8226 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8227 fnode_conn->max_segment_size = 8228 *(unsigned *)fnode_param->value; 8229 break; 8230 case ISCSI_FLASHNODE_LOCAL_PORT: 8231 fnode_conn->local_port = 8232 *(uint16_t *)fnode_param->value; 8233 break; 8234 case ISCSI_FLASHNODE_IPV4_TOS: 8235 fnode_conn->ipv4_tos = fnode_param->value[0]; 8236 break; 8237 case ISCSI_FLASHNODE_IPV6_TC: 8238 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8239 break; 8240 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8241 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8242 break; 8243 case ISCSI_FLASHNODE_NAME: 8244 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8245 (char *)fnode_param->value); 8246 break; 8247 case ISCSI_FLASHNODE_TPGT: 8248 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8249 break; 8250 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8251 memcpy(fnode_conn->link_local_ipv6_addr, 8252 fnode_param->value, IPv6_ADDR_LEN); 8253 break; 8254 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8255 fnode_sess->discovery_parent_idx = 8256 *(uint16_t *)fnode_param->value; 8257 break; 8258 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8259 fnode_conn->tcp_xmit_wsf = 8260 *(uint8_t *)fnode_param->value; 8261 break; 8262 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8263 fnode_conn->tcp_recv_wsf = 8264 *(uint8_t *)fnode_param->value; 8265 break; 8266 case ISCSI_FLASHNODE_STATSN: 8267 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8268 break; 8269 case ISCSI_FLASHNODE_EXP_STATSN: 8270 fnode_conn->exp_statsn = 8271 *(uint32_t *)fnode_param->value; 8272 break; 8273 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8274 chap_out_idx = *(uint16_t *)fnode_param->value; 8275 if (!qla4xxx_get_uni_chap_at_index(ha, 8276 chap_tbl.name, 8277 chap_tbl.secret, 8278 chap_out_idx)) { 8279 fnode_sess->chap_out_idx = chap_out_idx; 8280 /* Enable chap auth if chap index is valid */ 8281 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8282 } 8283 break; 8284 default: 8285 ql4_printk(KERN_ERR, ha, 8286 "%s: No such sysfs attribute\n", __func__); 8287 rc = -ENOSYS; 8288 goto exit_set_param; 8289 } 8290 } 8291 8292 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8293 8294 exit_set_param: 8295 return rc; 8296 } 8297 8298 /** 8299 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8300 * @fnode_sess: pointer to session attrs of flash ddb entry 8301 * 8302 * This invalidates the flash ddb entry at the given index 8303 **/ 8304 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8305 { 8306 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8307 struct scsi_qla_host *ha = to_qla_host(shost); 8308 uint32_t dev_db_start_offset; 8309 uint32_t dev_db_end_offset; 8310 struct dev_db_entry *fw_ddb_entry = NULL; 8311 dma_addr_t fw_ddb_entry_dma; 8312 uint16_t *ddb_cookie = NULL; 8313 size_t ddb_size = 0; 8314 void *pddb = NULL; 8315 int target_id; 8316 int rc = 0; 8317 8318 if (fnode_sess->is_boot_target) { 8319 rc = -EPERM; 8320 DEBUG2(ql4_printk(KERN_ERR, ha, 8321 "%s: Deletion of boot target entry is not permitted.\n", 8322 __func__)); 8323 goto exit_ddb_del; 8324 } 8325 8326 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8327 goto sysfs_ddb_del; 8328 8329 if (is_qla40XX(ha)) { 8330 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8331 dev_db_end_offset = FLASH_OFFSET_DB_END; 8332 dev_db_start_offset += (fnode_sess->target_id * 8333 sizeof(*fw_ddb_entry)); 8334 ddb_size = sizeof(*fw_ddb_entry); 8335 } else { 8336 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8337 (ha->hw.flt_region_ddb << 2); 8338 /* flt_ddb_size is DDB table size for both ports 8339 * so divide it by 2 to calculate the offset for second port 8340 */ 8341 if (ha->port_num == 1) 8342 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8343 8344 dev_db_end_offset = dev_db_start_offset + 8345 (ha->hw.flt_ddb_size / 2); 8346 8347 dev_db_start_offset += (fnode_sess->target_id * 8348 sizeof(*fw_ddb_entry)); 8349 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8350 8351 ddb_size = sizeof(*ddb_cookie); 8352 } 8353 8354 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8355 __func__, dev_db_start_offset, dev_db_end_offset)); 8356 8357 if (dev_db_start_offset > dev_db_end_offset) { 8358 rc = -EIO; 8359 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8360 __func__, fnode_sess->target_id)); 8361 goto exit_ddb_del; 8362 } 8363 8364 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8365 &fw_ddb_entry_dma, GFP_KERNEL); 8366 if (!pddb) { 8367 rc = -ENOMEM; 8368 DEBUG2(ql4_printk(KERN_ERR, ha, 8369 "%s: Unable to allocate dma buffer\n", 8370 __func__)); 8371 goto exit_ddb_del; 8372 } 8373 8374 if (is_qla40XX(ha)) { 8375 fw_ddb_entry = pddb; 8376 memset(fw_ddb_entry, 0, ddb_size); 8377 ddb_cookie = &fw_ddb_entry->cookie; 8378 } else { 8379 ddb_cookie = pddb; 8380 } 8381 8382 /* invalidate the cookie */ 8383 *ddb_cookie = 0xFFEE; 8384 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8385 ddb_size, FLASH_OPT_RMW_COMMIT); 8386 8387 sysfs_ddb_del: 8388 target_id = fnode_sess->target_id; 8389 iscsi_destroy_flashnode_sess(fnode_sess); 8390 ql4_printk(KERN_INFO, ha, 8391 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8392 __func__, target_id, ha->host_no); 8393 exit_ddb_del: 8394 if (pddb) 8395 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8396 fw_ddb_entry_dma); 8397 return rc; 8398 } 8399 8400 /** 8401 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8402 * @ha: pointer to adapter structure 8403 * 8404 * Export the firmware DDB for all send targets and normal targets to sysfs. 8405 **/ 8406 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8407 { 8408 struct dev_db_entry *fw_ddb_entry = NULL; 8409 dma_addr_t fw_ddb_entry_dma; 8410 uint16_t max_ddbs; 8411 uint16_t idx = 0; 8412 int ret = QLA_SUCCESS; 8413 8414 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8415 sizeof(*fw_ddb_entry), 8416 &fw_ddb_entry_dma, GFP_KERNEL); 8417 if (!fw_ddb_entry) { 8418 DEBUG2(ql4_printk(KERN_ERR, ha, 8419 "%s: Unable to allocate dma buffer\n", 8420 __func__)); 8421 return -ENOMEM; 8422 } 8423 8424 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8425 MAX_DEV_DB_ENTRIES; 8426 8427 for (idx = 0; idx < max_ddbs; idx++) { 8428 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8429 idx)) 8430 continue; 8431 8432 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8433 if (ret) { 8434 ret = -EIO; 8435 break; 8436 } 8437 } 8438 8439 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8440 fw_ddb_entry_dma); 8441 8442 return ret; 8443 } 8444 8445 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8446 { 8447 iscsi_destroy_all_flashnode(ha->host); 8448 } 8449 8450 /** 8451 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8452 * @ha: pointer to adapter structure 8453 * @is_reset: Is this init path or reset path 8454 * 8455 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8456 * using connection open, then create the list of normal targets (nt) 8457 * from firmware DDBs. Based on the list of nt setup session and connection 8458 * objects. 8459 **/ 8460 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8461 { 8462 uint16_t tmo = 0; 8463 struct list_head list_st, list_nt; 8464 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8465 unsigned long wtime; 8466 8467 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8468 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8469 ha->is_reset = is_reset; 8470 return; 8471 } 8472 8473 INIT_LIST_HEAD(&list_st); 8474 INIT_LIST_HEAD(&list_nt); 8475 8476 qla4xxx_build_st_list(ha, &list_st); 8477 8478 /* Before issuing conn open mbox, ensure all IPs states are configured 8479 * Note, conn open fails if IPs are not configured 8480 */ 8481 qla4xxx_wait_for_ip_configuration(ha); 8482 8483 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8484 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8485 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8486 } 8487 8488 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8489 tmo = ((ha->def_timeout > LOGIN_TOV) && 8490 (ha->def_timeout < LOGIN_TOV * 10) ? 8491 ha->def_timeout : LOGIN_TOV); 8492 8493 DEBUG2(ql4_printk(KERN_INFO, ha, 8494 "Default time to wait for build ddb %d\n", tmo)); 8495 8496 wtime = jiffies + (HZ * tmo); 8497 do { 8498 if (list_empty(&list_st)) 8499 break; 8500 8501 qla4xxx_remove_failed_ddb(ha, &list_st); 8502 schedule_timeout_uninterruptible(HZ / 10); 8503 } while (time_after(wtime, jiffies)); 8504 8505 8506 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8507 8508 qla4xxx_free_ddb_list(&list_st); 8509 qla4xxx_free_ddb_list(&list_nt); 8510 8511 qla4xxx_free_ddb_index(ha); 8512 } 8513 8514 /** 8515 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8516 * response. 8517 * @ha: pointer to adapter structure 8518 * 8519 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8520 * set in DDB and we will wait for login response of boot targets during 8521 * probe. 8522 **/ 8523 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8524 { 8525 struct ddb_entry *ddb_entry; 8526 struct dev_db_entry *fw_ddb_entry = NULL; 8527 dma_addr_t fw_ddb_entry_dma; 8528 unsigned long wtime; 8529 uint32_t ddb_state; 8530 int max_ddbs, idx, ret; 8531 8532 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8533 MAX_DEV_DB_ENTRIES; 8534 8535 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8536 &fw_ddb_entry_dma, GFP_KERNEL); 8537 if (!fw_ddb_entry) { 8538 ql4_printk(KERN_ERR, ha, 8539 "%s: Unable to allocate dma buffer\n", __func__); 8540 goto exit_login_resp; 8541 } 8542 8543 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8544 8545 for (idx = 0; idx < max_ddbs; idx++) { 8546 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8547 if (ddb_entry == NULL) 8548 continue; 8549 8550 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8551 DEBUG2(ql4_printk(KERN_INFO, ha, 8552 "%s: DDB index [%d]\n", __func__, 8553 ddb_entry->fw_ddb_index)); 8554 do { 8555 ret = qla4xxx_get_fwddb_entry(ha, 8556 ddb_entry->fw_ddb_index, 8557 fw_ddb_entry, fw_ddb_entry_dma, 8558 NULL, NULL, &ddb_state, NULL, 8559 NULL, NULL); 8560 if (ret == QLA_ERROR) 8561 goto exit_login_resp; 8562 8563 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8564 (ddb_state == DDB_DS_SESSION_FAILED)) 8565 break; 8566 8567 schedule_timeout_uninterruptible(HZ); 8568 8569 } while ((time_after(wtime, jiffies))); 8570 8571 if (!time_after(wtime, jiffies)) { 8572 DEBUG2(ql4_printk(KERN_INFO, ha, 8573 "%s: Login response wait timer expired\n", 8574 __func__)); 8575 goto exit_login_resp; 8576 } 8577 } 8578 } 8579 8580 exit_login_resp: 8581 if (fw_ddb_entry) 8582 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8583 fw_ddb_entry, fw_ddb_entry_dma); 8584 } 8585 8586 /** 8587 * qla4xxx_probe_adapter - callback function to probe HBA 8588 * @pdev: pointer to pci_dev structure 8589 * @pci_device_id: pointer to pci_device entry 8590 * 8591 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8592 * It returns zero if successful. It also initializes all data necessary for 8593 * the driver. 8594 **/ 8595 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8596 const struct pci_device_id *ent) 8597 { 8598 int ret = -ENODEV, status; 8599 struct Scsi_Host *host; 8600 struct scsi_qla_host *ha; 8601 uint8_t init_retry_count = 0; 8602 char buf[34]; 8603 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8604 uint32_t dev_state; 8605 8606 if (pci_enable_device(pdev)) 8607 return -1; 8608 8609 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8610 if (host == NULL) { 8611 printk(KERN_WARNING 8612 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8613 goto probe_disable_device; 8614 } 8615 8616 /* Clear our data area */ 8617 ha = to_qla_host(host); 8618 memset(ha, 0, sizeof(*ha)); 8619 8620 /* Save the information from PCI BIOS. */ 8621 ha->pdev = pdev; 8622 ha->host = host; 8623 ha->host_no = host->host_no; 8624 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8625 8626 pci_enable_pcie_error_reporting(pdev); 8627 8628 /* Setup Runtime configurable options */ 8629 if (is_qla8022(ha)) { 8630 ha->isp_ops = &qla4_82xx_isp_ops; 8631 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8632 ha->qdr_sn_window = -1; 8633 ha->ddr_mn_window = -1; 8634 ha->curr_window = 255; 8635 nx_legacy_intr = &legacy_intr[ha->func_num]; 8636 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8637 ha->nx_legacy_intr.tgt_status_reg = 8638 nx_legacy_intr->tgt_status_reg; 8639 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8640 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8641 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8642 ha->isp_ops = &qla4_83xx_isp_ops; 8643 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8644 } else { 8645 ha->isp_ops = &qla4xxx_isp_ops; 8646 } 8647 8648 if (is_qla80XX(ha)) { 8649 rwlock_init(&ha->hw_lock); 8650 ha->pf_bit = ha->func_num << 16; 8651 /* Set EEH reset type to fundamental if required by hba */ 8652 pdev->needs_freset = 1; 8653 } 8654 8655 /* Configure PCI I/O space. */ 8656 ret = ha->isp_ops->iospace_config(ha); 8657 if (ret) 8658 goto probe_failed_ioconfig; 8659 8660 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8661 pdev->device, pdev->irq, ha->reg); 8662 8663 qla4xxx_config_dma_addressing(ha); 8664 8665 /* Initialize lists and spinlocks. */ 8666 INIT_LIST_HEAD(&ha->free_srb_q); 8667 8668 mutex_init(&ha->mbox_sem); 8669 mutex_init(&ha->chap_sem); 8670 init_completion(&ha->mbx_intr_comp); 8671 init_completion(&ha->disable_acb_comp); 8672 init_completion(&ha->idc_comp); 8673 init_completion(&ha->link_up_comp); 8674 init_completion(&ha->disable_acb_comp); 8675 8676 spin_lock_init(&ha->hardware_lock); 8677 spin_lock_init(&ha->work_lock); 8678 8679 /* Initialize work list */ 8680 INIT_LIST_HEAD(&ha->work_list); 8681 8682 /* Allocate dma buffers */ 8683 if (qla4xxx_mem_alloc(ha)) { 8684 ql4_printk(KERN_WARNING, ha, 8685 "[ERROR] Failed to allocate memory for adapter\n"); 8686 8687 ret = -ENOMEM; 8688 goto probe_failed; 8689 } 8690 8691 host->cmd_per_lun = 3; 8692 host->max_channel = 0; 8693 host->max_lun = MAX_LUNS - 1; 8694 host->max_id = MAX_TARGETS; 8695 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8696 host->can_queue = MAX_SRBS ; 8697 host->transportt = qla4xxx_scsi_transport; 8698 8699 pci_set_drvdata(pdev, ha); 8700 8701 ret = scsi_add_host(host, &pdev->dev); 8702 if (ret) 8703 goto probe_failed; 8704 8705 if (is_qla80XX(ha)) 8706 qla4_8xxx_get_flash_info(ha); 8707 8708 if (is_qla8032(ha) || is_qla8042(ha)) { 8709 qla4_83xx_read_reset_template(ha); 8710 /* 8711 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8712 * If DONRESET_BIT0 is set, drivers should not set dev_state 8713 * to NEED_RESET. But if NEED_RESET is set, drivers should 8714 * should honor the reset. 8715 */ 8716 if (ql4xdontresethba == 1) 8717 qla4_83xx_set_idc_dontreset(ha); 8718 } 8719 8720 /* 8721 * Initialize the Host adapter request/response queues and 8722 * firmware 8723 * NOTE: interrupts enabled upon successful completion 8724 */ 8725 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8726 8727 /* Dont retry adapter initialization if IRQ allocation failed */ 8728 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8729 goto skip_retry_init; 8730 8731 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8732 init_retry_count++ < MAX_INIT_RETRIES) { 8733 8734 if (is_qla80XX(ha)) { 8735 ha->isp_ops->idc_lock(ha); 8736 dev_state = qla4_8xxx_rd_direct(ha, 8737 QLA8XXX_CRB_DEV_STATE); 8738 ha->isp_ops->idc_unlock(ha); 8739 if (dev_state == QLA8XXX_DEV_FAILED) { 8740 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8741 "initialize adapter. H/W is in failed state\n", 8742 __func__); 8743 break; 8744 } 8745 } 8746 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8747 "(%d)\n", __func__, init_retry_count)); 8748 8749 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8750 continue; 8751 8752 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8753 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8754 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8755 goto skip_retry_init; 8756 } 8757 } 8758 8759 skip_retry_init: 8760 if (!test_bit(AF_ONLINE, &ha->flags)) { 8761 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8762 8763 if ((is_qla8022(ha) && ql4xdontresethba) || 8764 ((is_qla8032(ha) || is_qla8042(ha)) && 8765 qla4_83xx_idc_dontreset(ha))) { 8766 /* Put the device in failed state. */ 8767 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8768 ha->isp_ops->idc_lock(ha); 8769 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8770 QLA8XXX_DEV_FAILED); 8771 ha->isp_ops->idc_unlock(ha); 8772 } 8773 ret = -ENODEV; 8774 goto remove_host; 8775 } 8776 8777 /* Startup the kernel thread for this host adapter. */ 8778 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8779 "qla4xxx_dpc\n", __func__)); 8780 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8781 ha->dpc_thread = create_singlethread_workqueue(buf); 8782 if (!ha->dpc_thread) { 8783 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8784 ret = -ENODEV; 8785 goto remove_host; 8786 } 8787 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8788 8789 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8790 ha->host_no); 8791 if (!ha->task_wq) { 8792 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8793 ret = -ENODEV; 8794 goto remove_host; 8795 } 8796 8797 /* 8798 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8799 * (which is called indirectly by qla4xxx_initialize_adapter), 8800 * so that irqs will be registered after crbinit but before 8801 * mbx_intr_enable. 8802 */ 8803 if (is_qla40XX(ha)) { 8804 ret = qla4xxx_request_irqs(ha); 8805 if (ret) { 8806 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8807 "interrupt %d already in use.\n", pdev->irq); 8808 goto remove_host; 8809 } 8810 } 8811 8812 pci_save_state(ha->pdev); 8813 ha->isp_ops->enable_intrs(ha); 8814 8815 /* Start timer thread. */ 8816 qla4xxx_start_timer(ha, qla4xxx_timer, 1); 8817 8818 set_bit(AF_INIT_DONE, &ha->flags); 8819 8820 qla4_8xxx_alloc_sysfs_attr(ha); 8821 8822 printk(KERN_INFO 8823 " QLogic iSCSI HBA Driver version: %s\n" 8824 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8825 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8826 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8827 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8828 8829 /* Set the driver version */ 8830 if (is_qla80XX(ha)) 8831 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8832 8833 if (qla4xxx_setup_boot_info(ha)) 8834 ql4_printk(KERN_ERR, ha, 8835 "%s: No iSCSI boot target configured\n", __func__); 8836 8837 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8838 /* Perform the build ddb list and login to each */ 8839 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8840 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8841 qla4xxx_wait_login_resp_boot_tgt(ha); 8842 8843 qla4xxx_create_chap_list(ha); 8844 8845 qla4xxx_create_ifaces(ha); 8846 return 0; 8847 8848 remove_host: 8849 scsi_remove_host(ha->host); 8850 8851 probe_failed: 8852 qla4xxx_free_adapter(ha); 8853 8854 probe_failed_ioconfig: 8855 pci_disable_pcie_error_reporting(pdev); 8856 scsi_host_put(ha->host); 8857 8858 probe_disable_device: 8859 pci_disable_device(pdev); 8860 8861 return ret; 8862 } 8863 8864 /** 8865 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8866 * @ha: pointer to adapter structure 8867 * 8868 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8869 * so that the other port will not re-initialize while in the process of 8870 * removing the ha due to driver unload or hba hotplug. 8871 **/ 8872 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8873 { 8874 struct scsi_qla_host *other_ha = NULL; 8875 struct pci_dev *other_pdev = NULL; 8876 int fn = ISP4XXX_PCI_FN_2; 8877 8878 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8879 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8880 fn = ISP4XXX_PCI_FN_1; 8881 8882 other_pdev = 8883 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8884 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8885 fn)); 8886 8887 /* Get other_ha if other_pdev is valid and state is enable*/ 8888 if (other_pdev) { 8889 if (atomic_read(&other_pdev->enable_cnt)) { 8890 other_ha = pci_get_drvdata(other_pdev); 8891 if (other_ha) { 8892 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8893 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8894 "Prevent %s reinit\n", __func__, 8895 dev_name(&other_ha->pdev->dev))); 8896 } 8897 } 8898 pci_dev_put(other_pdev); 8899 } 8900 } 8901 8902 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8903 struct ddb_entry *ddb_entry) 8904 { 8905 struct dev_db_entry *fw_ddb_entry = NULL; 8906 dma_addr_t fw_ddb_entry_dma; 8907 unsigned long wtime; 8908 uint32_t ddb_state; 8909 int options; 8910 int status; 8911 8912 options = LOGOUT_OPTION_CLOSE_SESSION; 8913 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8914 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8915 goto clear_ddb; 8916 } 8917 8918 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8919 &fw_ddb_entry_dma, GFP_KERNEL); 8920 if (!fw_ddb_entry) { 8921 ql4_printk(KERN_ERR, ha, 8922 "%s: Unable to allocate dma buffer\n", __func__); 8923 goto clear_ddb; 8924 } 8925 8926 wtime = jiffies + (HZ * LOGOUT_TOV); 8927 do { 8928 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8929 fw_ddb_entry, fw_ddb_entry_dma, 8930 NULL, NULL, &ddb_state, NULL, 8931 NULL, NULL); 8932 if (status == QLA_ERROR) 8933 goto free_ddb; 8934 8935 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8936 (ddb_state == DDB_DS_SESSION_FAILED)) 8937 goto free_ddb; 8938 8939 schedule_timeout_uninterruptible(HZ); 8940 } while ((time_after(wtime, jiffies))); 8941 8942 free_ddb: 8943 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8944 fw_ddb_entry, fw_ddb_entry_dma); 8945 clear_ddb: 8946 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8947 } 8948 8949 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8950 { 8951 struct ddb_entry *ddb_entry; 8952 int idx; 8953 8954 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8955 8956 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8957 if ((ddb_entry != NULL) && 8958 (ddb_entry->ddb_type == FLASH_DDB)) { 8959 8960 qla4xxx_destroy_ddb(ha, ddb_entry); 8961 /* 8962 * we have decremented the reference count of the driver 8963 * when we setup the session to have the driver unload 8964 * to be seamless without actually destroying the 8965 * session 8966 **/ 8967 try_module_get(qla4xxx_iscsi_transport.owner); 8968 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8969 qla4xxx_free_ddb(ha, ddb_entry); 8970 iscsi_session_teardown(ddb_entry->sess); 8971 } 8972 } 8973 } 8974 /** 8975 * qla4xxx_remove_adapter - callback function to remove adapter. 8976 * @pci_dev: PCI device pointer 8977 **/ 8978 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 8979 { 8980 struct scsi_qla_host *ha; 8981 8982 /* 8983 * If the PCI device is disabled then it means probe_adapter had 8984 * failed and resources already cleaned up on probe_adapter exit. 8985 */ 8986 if (!pci_is_enabled(pdev)) 8987 return; 8988 8989 ha = pci_get_drvdata(pdev); 8990 8991 if (is_qla40XX(ha)) 8992 qla4xxx_prevent_other_port_reinit(ha); 8993 8994 /* destroy iface from sysfs */ 8995 qla4xxx_destroy_ifaces(ha); 8996 8997 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 8998 iscsi_boot_destroy_kset(ha->boot_kset); 8999 9000 qla4xxx_destroy_fw_ddb_session(ha); 9001 qla4_8xxx_free_sysfs_attr(ha); 9002 9003 qla4xxx_sysfs_ddb_remove(ha); 9004 scsi_remove_host(ha->host); 9005 9006 qla4xxx_free_adapter(ha); 9007 9008 scsi_host_put(ha->host); 9009 9010 pci_disable_pcie_error_reporting(pdev); 9011 pci_disable_device(pdev); 9012 } 9013 9014 /** 9015 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9016 * @ha: HA context 9017 * 9018 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 9019 * supported addressing method. 9020 */ 9021 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9022 { 9023 int retval; 9024 9025 /* Update our PCI device dma_mask for full 64 bit mask */ 9026 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { 9027 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 9028 dev_dbg(&ha->pdev->dev, 9029 "Failed to set 64 bit PCI consistent mask; " 9030 "using 32 bit.\n"); 9031 retval = pci_set_consistent_dma_mask(ha->pdev, 9032 DMA_BIT_MASK(32)); 9033 } 9034 } else 9035 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 9036 } 9037 9038 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9039 { 9040 struct iscsi_cls_session *cls_sess; 9041 struct iscsi_session *sess; 9042 struct ddb_entry *ddb; 9043 int queue_depth = QL4_DEF_QDEPTH; 9044 9045 cls_sess = starget_to_session(sdev->sdev_target); 9046 sess = cls_sess->dd_data; 9047 ddb = sess->dd_data; 9048 9049 sdev->hostdata = ddb; 9050 9051 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9052 queue_depth = ql4xmaxqdepth; 9053 9054 scsi_change_queue_depth(sdev, queue_depth); 9055 return 0; 9056 } 9057 9058 /** 9059 * qla4xxx_del_from_active_array - returns an active srb 9060 * @ha: Pointer to host adapter structure. 9061 * @index: index into the active_array 9062 * 9063 * This routine removes and returns the srb at the specified index 9064 **/ 9065 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9066 uint32_t index) 9067 { 9068 struct srb *srb = NULL; 9069 struct scsi_cmnd *cmd = NULL; 9070 9071 cmd = scsi_host_find_tag(ha->host, index); 9072 if (!cmd) 9073 return srb; 9074 9075 srb = (struct srb *)CMD_SP(cmd); 9076 if (!srb) 9077 return srb; 9078 9079 /* update counters */ 9080 if (srb->flags & SRB_DMA_VALID) { 9081 ha->iocb_cnt -= srb->iocb_cnt; 9082 if (srb->cmd) 9083 srb->cmd->host_scribble = 9084 (unsigned char *)(unsigned long) MAX_SRBS; 9085 } 9086 return srb; 9087 } 9088 9089 /** 9090 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9091 * @ha: Pointer to host adapter structure. 9092 * @cmd: Scsi Command to wait on. 9093 * 9094 * This routine waits for the command to be returned by the Firmware 9095 * for some max time. 9096 **/ 9097 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9098 struct scsi_cmnd *cmd) 9099 { 9100 int done = 0; 9101 struct srb *rp; 9102 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9103 int ret = SUCCESS; 9104 9105 /* Dont wait on command if PCI error is being handled 9106 * by PCI AER driver 9107 */ 9108 if (unlikely(pci_channel_offline(ha->pdev)) || 9109 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9110 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9111 ha->host_no, __func__); 9112 return ret; 9113 } 9114 9115 do { 9116 /* Checking to see if its returned to OS */ 9117 rp = (struct srb *) CMD_SP(cmd); 9118 if (rp == NULL) { 9119 done++; 9120 break; 9121 } 9122 9123 msleep(2000); 9124 } while (max_wait_time--); 9125 9126 return done; 9127 } 9128 9129 /** 9130 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9131 * @ha: Pointer to host adapter structure 9132 **/ 9133 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9134 { 9135 unsigned long wait_online; 9136 9137 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9138 while (time_before(jiffies, wait_online)) { 9139 9140 if (adapter_up(ha)) 9141 return QLA_SUCCESS; 9142 9143 msleep(2000); 9144 } 9145 9146 return QLA_ERROR; 9147 } 9148 9149 /** 9150 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9151 * @ha: pointer to HBA 9152 * @t: target id 9153 * @l: lun id 9154 * 9155 * This function waits for all outstanding commands to a lun to complete. It 9156 * returns 0 if all pending commands are returned and 1 otherwise. 9157 **/ 9158 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9159 struct scsi_target *stgt, 9160 struct scsi_device *sdev) 9161 { 9162 int cnt; 9163 int status = 0; 9164 struct scsi_cmnd *cmd; 9165 9166 /* 9167 * Waiting for all commands for the designated target or dev 9168 * in the active array 9169 */ 9170 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9171 cmd = scsi_host_find_tag(ha->host, cnt); 9172 if (cmd && stgt == scsi_target(cmd->device) && 9173 (!sdev || sdev == cmd->device)) { 9174 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9175 status++; 9176 break; 9177 } 9178 } 9179 } 9180 return status; 9181 } 9182 9183 /** 9184 * qla4xxx_eh_abort - callback for abort task. 9185 * @cmd: Pointer to Linux's SCSI command structure 9186 * 9187 * This routine is called by the Linux OS to abort the specified 9188 * command. 9189 **/ 9190 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9191 { 9192 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9193 unsigned int id = cmd->device->id; 9194 uint64_t lun = cmd->device->lun; 9195 unsigned long flags; 9196 struct srb *srb = NULL; 9197 int ret = SUCCESS; 9198 int wait = 0; 9199 9200 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9201 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9202 9203 spin_lock_irqsave(&ha->hardware_lock, flags); 9204 srb = (struct srb *) CMD_SP(cmd); 9205 if (!srb) { 9206 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9207 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9208 ha->host_no, id, lun); 9209 return SUCCESS; 9210 } 9211 kref_get(&srb->srb_ref); 9212 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9213 9214 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9215 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9216 ha->host_no, id, lun)); 9217 ret = FAILED; 9218 } else { 9219 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9220 ha->host_no, id, lun)); 9221 wait = 1; 9222 } 9223 9224 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9225 9226 /* Wait for command to complete */ 9227 if (wait) { 9228 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9229 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9230 ha->host_no, id, lun)); 9231 ret = FAILED; 9232 } 9233 } 9234 9235 ql4_printk(KERN_INFO, ha, 9236 "scsi%ld:%d:%llu: Abort command - %s\n", 9237 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9238 9239 return ret; 9240 } 9241 9242 /** 9243 * qla4xxx_eh_device_reset - callback for target reset. 9244 * @cmd: Pointer to Linux's SCSI command structure 9245 * 9246 * This routine is called by the Linux OS to reset all luns on the 9247 * specified target. 9248 **/ 9249 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9250 { 9251 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9252 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9253 int ret = FAILED, stat; 9254 9255 if (!ddb_entry) 9256 return ret; 9257 9258 ret = iscsi_block_scsi_eh(cmd); 9259 if (ret) 9260 return ret; 9261 ret = FAILED; 9262 9263 ql4_printk(KERN_INFO, ha, 9264 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9265 cmd->device->channel, cmd->device->id, cmd->device->lun); 9266 9267 DEBUG2(printk(KERN_INFO 9268 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9269 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9270 cmd, jiffies, cmd->request->timeout / HZ, 9271 ha->dpc_flags, cmd->result, cmd->allowed)); 9272 9273 /* FIXME: wait for hba to go online */ 9274 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9275 if (stat != QLA_SUCCESS) { 9276 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9277 goto eh_dev_reset_done; 9278 } 9279 9280 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9281 cmd->device)) { 9282 ql4_printk(KERN_INFO, ha, 9283 "DEVICE RESET FAILED - waiting for " 9284 "commands.\n"); 9285 goto eh_dev_reset_done; 9286 } 9287 9288 /* Send marker. */ 9289 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9290 MM_LUN_RESET) != QLA_SUCCESS) 9291 goto eh_dev_reset_done; 9292 9293 ql4_printk(KERN_INFO, ha, 9294 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9295 ha->host_no, cmd->device->channel, cmd->device->id, 9296 cmd->device->lun); 9297 9298 ret = SUCCESS; 9299 9300 eh_dev_reset_done: 9301 9302 return ret; 9303 } 9304 9305 /** 9306 * qla4xxx_eh_target_reset - callback for target reset. 9307 * @cmd: Pointer to Linux's SCSI command structure 9308 * 9309 * This routine is called by the Linux OS to reset the target. 9310 **/ 9311 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9312 { 9313 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9314 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9315 int stat, ret; 9316 9317 if (!ddb_entry) 9318 return FAILED; 9319 9320 ret = iscsi_block_scsi_eh(cmd); 9321 if (ret) 9322 return ret; 9323 9324 starget_printk(KERN_INFO, scsi_target(cmd->device), 9325 "WARM TARGET RESET ISSUED.\n"); 9326 9327 DEBUG2(printk(KERN_INFO 9328 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9329 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9330 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9331 ha->dpc_flags, cmd->result, cmd->allowed)); 9332 9333 stat = qla4xxx_reset_target(ha, ddb_entry); 9334 if (stat != QLA_SUCCESS) { 9335 starget_printk(KERN_INFO, scsi_target(cmd->device), 9336 "WARM TARGET RESET FAILED.\n"); 9337 return FAILED; 9338 } 9339 9340 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9341 NULL)) { 9342 starget_printk(KERN_INFO, scsi_target(cmd->device), 9343 "WARM TARGET DEVICE RESET FAILED - " 9344 "waiting for commands.\n"); 9345 return FAILED; 9346 } 9347 9348 /* Send marker. */ 9349 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9350 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9351 starget_printk(KERN_INFO, scsi_target(cmd->device), 9352 "WARM TARGET DEVICE RESET FAILED - " 9353 "marker iocb failed.\n"); 9354 return FAILED; 9355 } 9356 9357 starget_printk(KERN_INFO, scsi_target(cmd->device), 9358 "WARM TARGET RESET SUCCEEDED.\n"); 9359 return SUCCESS; 9360 } 9361 9362 /** 9363 * qla4xxx_is_eh_active - check if error handler is running 9364 * @shost: Pointer to SCSI Host struct 9365 * 9366 * This routine finds that if reset host is called in EH 9367 * scenario or from some application like sg_reset 9368 **/ 9369 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9370 { 9371 if (shost->shost_state == SHOST_RECOVERY) 9372 return 1; 9373 return 0; 9374 } 9375 9376 /** 9377 * qla4xxx_eh_host_reset - kernel callback 9378 * @cmd: Pointer to Linux's SCSI command structure 9379 * 9380 * This routine is invoked by the Linux kernel to perform fatal error 9381 * recovery on the specified adapter. 9382 **/ 9383 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9384 { 9385 int return_status = FAILED; 9386 struct scsi_qla_host *ha; 9387 9388 ha = to_qla_host(cmd->device->host); 9389 9390 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9391 qla4_83xx_set_idc_dontreset(ha); 9392 9393 /* 9394 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9395 * protocol drivers, we should not set device_state to NEED_RESET 9396 */ 9397 if (ql4xdontresethba || 9398 ((is_qla8032(ha) || is_qla8042(ha)) && 9399 qla4_83xx_idc_dontreset(ha))) { 9400 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9401 ha->host_no, __func__)); 9402 9403 /* Clear outstanding srb in queues */ 9404 if (qla4xxx_is_eh_active(cmd->device->host)) 9405 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9406 9407 return FAILED; 9408 } 9409 9410 ql4_printk(KERN_INFO, ha, 9411 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9412 cmd->device->channel, cmd->device->id, cmd->device->lun); 9413 9414 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9415 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9416 "DEAD.\n", ha->host_no, cmd->device->channel, 9417 __func__)); 9418 9419 return FAILED; 9420 } 9421 9422 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9423 if (is_qla80XX(ha)) 9424 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9425 else 9426 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9427 } 9428 9429 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9430 return_status = SUCCESS; 9431 9432 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9433 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9434 9435 return return_status; 9436 } 9437 9438 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9439 { 9440 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9441 uint32_t mbox_sts[MBOX_REG_COUNT]; 9442 struct addr_ctrl_blk_def *acb = NULL; 9443 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9444 int rval = QLA_SUCCESS; 9445 dma_addr_t acb_dma; 9446 9447 acb = dma_alloc_coherent(&ha->pdev->dev, 9448 sizeof(struct addr_ctrl_blk_def), 9449 &acb_dma, GFP_KERNEL); 9450 if (!acb) { 9451 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9452 __func__); 9453 rval = -ENOMEM; 9454 goto exit_port_reset; 9455 } 9456 9457 memset(acb, 0, acb_len); 9458 9459 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9460 if (rval != QLA_SUCCESS) { 9461 rval = -EIO; 9462 goto exit_free_acb; 9463 } 9464 9465 rval = qla4xxx_disable_acb(ha); 9466 if (rval != QLA_SUCCESS) { 9467 rval = -EIO; 9468 goto exit_free_acb; 9469 } 9470 9471 wait_for_completion_timeout(&ha->disable_acb_comp, 9472 DISABLE_ACB_TOV * HZ); 9473 9474 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9475 if (rval != QLA_SUCCESS) { 9476 rval = -EIO; 9477 goto exit_free_acb; 9478 } 9479 9480 exit_free_acb: 9481 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9482 acb, acb_dma); 9483 exit_port_reset: 9484 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9485 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9486 return rval; 9487 } 9488 9489 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9490 { 9491 struct scsi_qla_host *ha = to_qla_host(shost); 9492 int rval = QLA_SUCCESS; 9493 uint32_t idc_ctrl; 9494 9495 if (ql4xdontresethba) { 9496 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9497 __func__)); 9498 rval = -EPERM; 9499 goto exit_host_reset; 9500 } 9501 9502 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9503 goto recover_adapter; 9504 9505 switch (reset_type) { 9506 case SCSI_ADAPTER_RESET: 9507 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9508 break; 9509 case SCSI_FIRMWARE_RESET: 9510 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9511 if (is_qla80XX(ha)) 9512 /* set firmware context reset */ 9513 set_bit(DPC_RESET_HA_FW_CONTEXT, 9514 &ha->dpc_flags); 9515 else { 9516 rval = qla4xxx_context_reset(ha); 9517 goto exit_host_reset; 9518 } 9519 } 9520 break; 9521 } 9522 9523 recover_adapter: 9524 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9525 * reset is issued by application */ 9526 if ((is_qla8032(ha) || is_qla8042(ha)) && 9527 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9528 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9529 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9530 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9531 } 9532 9533 rval = qla4xxx_recover_adapter(ha); 9534 if (rval != QLA_SUCCESS) { 9535 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9536 __func__)); 9537 rval = -EIO; 9538 } 9539 9540 exit_host_reset: 9541 return rval; 9542 } 9543 9544 /* PCI AER driver recovers from all correctable errors w/o 9545 * driver intervention. For uncorrectable errors PCI AER 9546 * driver calls the following device driver's callbacks 9547 * 9548 * - Fatal Errors - link_reset 9549 * - Non-Fatal Errors - driver's pci_error_detected() which 9550 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9551 * 9552 * PCI AER driver calls 9553 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled 9554 * returns RECOVERED or NEED_RESET if fw_hung 9555 * NEED_RESET - driver's slot_reset() 9556 * DISCONNECT - device is dead & cannot recover 9557 * RECOVERED - driver's pci_resume() 9558 */ 9559 static pci_ers_result_t 9560 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9561 { 9562 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9563 9564 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9565 ha->host_no, __func__, state); 9566 9567 if (!is_aer_supported(ha)) 9568 return PCI_ERS_RESULT_NONE; 9569 9570 switch (state) { 9571 case pci_channel_io_normal: 9572 clear_bit(AF_EEH_BUSY, &ha->flags); 9573 return PCI_ERS_RESULT_CAN_RECOVER; 9574 case pci_channel_io_frozen: 9575 set_bit(AF_EEH_BUSY, &ha->flags); 9576 qla4xxx_mailbox_premature_completion(ha); 9577 qla4xxx_free_irqs(ha); 9578 pci_disable_device(pdev); 9579 /* Return back all IOs */ 9580 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9581 return PCI_ERS_RESULT_NEED_RESET; 9582 case pci_channel_io_perm_failure: 9583 set_bit(AF_EEH_BUSY, &ha->flags); 9584 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9585 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9586 return PCI_ERS_RESULT_DISCONNECT; 9587 } 9588 return PCI_ERS_RESULT_NEED_RESET; 9589 } 9590 9591 /** 9592 * qla4xxx_pci_mmio_enabled() gets called if 9593 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9594 * and read/write to the device still works. 9595 **/ 9596 static pci_ers_result_t 9597 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9598 { 9599 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9600 9601 if (!is_aer_supported(ha)) 9602 return PCI_ERS_RESULT_NONE; 9603 9604 return PCI_ERS_RESULT_RECOVERED; 9605 } 9606 9607 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9608 { 9609 uint32_t rval = QLA_ERROR; 9610 int fn; 9611 struct pci_dev *other_pdev = NULL; 9612 9613 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9614 9615 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9616 9617 if (test_bit(AF_ONLINE, &ha->flags)) { 9618 clear_bit(AF_ONLINE, &ha->flags); 9619 clear_bit(AF_LINK_UP, &ha->flags); 9620 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9621 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9622 } 9623 9624 fn = PCI_FUNC(ha->pdev->devfn); 9625 if (is_qla8022(ha)) { 9626 while (fn > 0) { 9627 fn--; 9628 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9629 ha->host_no, __func__, fn); 9630 /* Get the pci device given the domain, bus, 9631 * slot/function number */ 9632 other_pdev = pci_get_domain_bus_and_slot( 9633 pci_domain_nr(ha->pdev->bus), 9634 ha->pdev->bus->number, 9635 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9636 fn)); 9637 9638 if (!other_pdev) 9639 continue; 9640 9641 if (atomic_read(&other_pdev->enable_cnt)) { 9642 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9643 ha->host_no, __func__, fn); 9644 pci_dev_put(other_pdev); 9645 break; 9646 } 9647 pci_dev_put(other_pdev); 9648 } 9649 } else { 9650 /* this case is meant for ISP83xx/ISP84xx only */ 9651 if (qla4_83xx_can_perform_reset(ha)) { 9652 /* reset fn as iSCSI is going to perform the reset */ 9653 fn = 0; 9654 } 9655 } 9656 9657 /* The first function on the card, the reset owner will 9658 * start & initialize the firmware. The other functions 9659 * on the card will reset the firmware context 9660 */ 9661 if (!fn) { 9662 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9663 "0x%x is the owner\n", ha->host_no, __func__, 9664 ha->pdev->devfn); 9665 9666 ha->isp_ops->idc_lock(ha); 9667 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9668 QLA8XXX_DEV_COLD); 9669 ha->isp_ops->idc_unlock(ha); 9670 9671 rval = qla4_8xxx_update_idc_reg(ha); 9672 if (rval == QLA_ERROR) { 9673 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9674 ha->host_no, __func__); 9675 ha->isp_ops->idc_lock(ha); 9676 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9677 QLA8XXX_DEV_FAILED); 9678 ha->isp_ops->idc_unlock(ha); 9679 goto exit_error_recovery; 9680 } 9681 9682 clear_bit(AF_FW_RECOVERY, &ha->flags); 9683 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9684 9685 if (rval != QLA_SUCCESS) { 9686 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9687 "FAILED\n", ha->host_no, __func__); 9688 qla4xxx_free_irqs(ha); 9689 ha->isp_ops->idc_lock(ha); 9690 qla4_8xxx_clear_drv_active(ha); 9691 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9692 QLA8XXX_DEV_FAILED); 9693 ha->isp_ops->idc_unlock(ha); 9694 } else { 9695 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9696 "READY\n", ha->host_no, __func__); 9697 ha->isp_ops->idc_lock(ha); 9698 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9699 QLA8XXX_DEV_READY); 9700 /* Clear driver state register */ 9701 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9702 qla4_8xxx_set_drv_active(ha); 9703 ha->isp_ops->idc_unlock(ha); 9704 ha->isp_ops->enable_intrs(ha); 9705 } 9706 } else { 9707 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9708 "the reset owner\n", ha->host_no, __func__, 9709 ha->pdev->devfn); 9710 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9711 QLA8XXX_DEV_READY)) { 9712 clear_bit(AF_FW_RECOVERY, &ha->flags); 9713 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9714 if (rval == QLA_SUCCESS) 9715 ha->isp_ops->enable_intrs(ha); 9716 else 9717 qla4xxx_free_irqs(ha); 9718 9719 ha->isp_ops->idc_lock(ha); 9720 qla4_8xxx_set_drv_active(ha); 9721 ha->isp_ops->idc_unlock(ha); 9722 } 9723 } 9724 exit_error_recovery: 9725 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9726 return rval; 9727 } 9728 9729 static pci_ers_result_t 9730 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9731 { 9732 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9733 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9734 int rc; 9735 9736 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9737 ha->host_no, __func__); 9738 9739 if (!is_aer_supported(ha)) 9740 return PCI_ERS_RESULT_NONE; 9741 9742 /* Restore the saved state of PCIe device - 9743 * BAR registers, PCI Config space, PCIX, MSI, 9744 * IOV states 9745 */ 9746 pci_restore_state(pdev); 9747 9748 /* pci_restore_state() clears the saved_state flag of the device 9749 * save restored state which resets saved_state flag 9750 */ 9751 pci_save_state(pdev); 9752 9753 /* Initialize device or resume if in suspended state */ 9754 rc = pci_enable_device(pdev); 9755 if (rc) { 9756 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9757 "device after reset\n", ha->host_no, __func__); 9758 goto exit_slot_reset; 9759 } 9760 9761 ha->isp_ops->disable_intrs(ha); 9762 9763 if (is_qla80XX(ha)) { 9764 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9765 ret = PCI_ERS_RESULT_RECOVERED; 9766 goto exit_slot_reset; 9767 } else 9768 goto exit_slot_reset; 9769 } 9770 9771 exit_slot_reset: 9772 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9773 "device after reset\n", ha->host_no, __func__, ret); 9774 return ret; 9775 } 9776 9777 static void 9778 qla4xxx_pci_resume(struct pci_dev *pdev) 9779 { 9780 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9781 int ret; 9782 9783 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9784 ha->host_no, __func__); 9785 9786 ret = qla4xxx_wait_for_hba_online(ha); 9787 if (ret != QLA_SUCCESS) { 9788 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9789 "resume I/O from slot/link_reset\n", ha->host_no, 9790 __func__); 9791 } 9792 9793 pci_cleanup_aer_uncorrect_error_status(pdev); 9794 clear_bit(AF_EEH_BUSY, &ha->flags); 9795 } 9796 9797 static const struct pci_error_handlers qla4xxx_err_handler = { 9798 .error_detected = qla4xxx_pci_error_detected, 9799 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9800 .slot_reset = qla4xxx_pci_slot_reset, 9801 .resume = qla4xxx_pci_resume, 9802 }; 9803 9804 static struct pci_device_id qla4xxx_pci_tbl[] = { 9805 { 9806 .vendor = PCI_VENDOR_ID_QLOGIC, 9807 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9808 .subvendor = PCI_ANY_ID, 9809 .subdevice = PCI_ANY_ID, 9810 }, 9811 { 9812 .vendor = PCI_VENDOR_ID_QLOGIC, 9813 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9814 .subvendor = PCI_ANY_ID, 9815 .subdevice = PCI_ANY_ID, 9816 }, 9817 { 9818 .vendor = PCI_VENDOR_ID_QLOGIC, 9819 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9820 .subvendor = PCI_ANY_ID, 9821 .subdevice = PCI_ANY_ID, 9822 }, 9823 { 9824 .vendor = PCI_VENDOR_ID_QLOGIC, 9825 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9826 .subvendor = PCI_ANY_ID, 9827 .subdevice = PCI_ANY_ID, 9828 }, 9829 { 9830 .vendor = PCI_VENDOR_ID_QLOGIC, 9831 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9832 .subvendor = PCI_ANY_ID, 9833 .subdevice = PCI_ANY_ID, 9834 }, 9835 { 9836 .vendor = PCI_VENDOR_ID_QLOGIC, 9837 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9838 .subvendor = PCI_ANY_ID, 9839 .subdevice = PCI_ANY_ID, 9840 }, 9841 {0, 0}, 9842 }; 9843 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9844 9845 static struct pci_driver qla4xxx_pci_driver = { 9846 .name = DRIVER_NAME, 9847 .id_table = qla4xxx_pci_tbl, 9848 .probe = qla4xxx_probe_adapter, 9849 .remove = qla4xxx_remove_adapter, 9850 .err_handler = &qla4xxx_err_handler, 9851 }; 9852 9853 static int __init qla4xxx_module_init(void) 9854 { 9855 int ret; 9856 9857 if (ql4xqfulltracking) 9858 qla4xxx_driver_template.track_queue_depth = 1; 9859 9860 /* Allocate cache for SRBs. */ 9861 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9862 SLAB_HWCACHE_ALIGN, NULL); 9863 if (srb_cachep == NULL) { 9864 printk(KERN_ERR 9865 "%s: Unable to allocate SRB cache..." 9866 "Failing load!\n", DRIVER_NAME); 9867 ret = -ENOMEM; 9868 goto no_srp_cache; 9869 } 9870 9871 /* Derive version string. */ 9872 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9873 if (ql4xextended_error_logging) 9874 strcat(qla4xxx_version_str, "-debug"); 9875 9876 qla4xxx_scsi_transport = 9877 iscsi_register_transport(&qla4xxx_iscsi_transport); 9878 if (!qla4xxx_scsi_transport){ 9879 ret = -ENODEV; 9880 goto release_srb_cache; 9881 } 9882 9883 ret = pci_register_driver(&qla4xxx_pci_driver); 9884 if (ret) 9885 goto unregister_transport; 9886 9887 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9888 return 0; 9889 9890 unregister_transport: 9891 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9892 release_srb_cache: 9893 kmem_cache_destroy(srb_cachep); 9894 no_srp_cache: 9895 return ret; 9896 } 9897 9898 static void __exit qla4xxx_module_exit(void) 9899 { 9900 pci_unregister_driver(&qla4xxx_pci_driver); 9901 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9902 kmem_cache_destroy(srb_cachep); 9903 } 9904 9905 module_init(qla4xxx_module_init); 9906 module_exit(qla4xxx_module_exit); 9907 9908 MODULE_AUTHOR("QLogic Corporation"); 9909 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9910 MODULE_LICENSE("GPL"); 9911 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9912