1*853e2bd2SBhanu Gollapudi /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. 2*853e2bd2SBhanu Gollapudi * This file contains the code that low level functions that interact 3*853e2bd2SBhanu Gollapudi * with 57712 FCoE firmware. 4*853e2bd2SBhanu Gollapudi * 5*853e2bd2SBhanu Gollapudi * Copyright (c) 2008 - 2010 Broadcom Corporation 6*853e2bd2SBhanu Gollapudi * 7*853e2bd2SBhanu Gollapudi * This program is free software; you can redistribute it and/or modify 8*853e2bd2SBhanu Gollapudi * it under the terms of the GNU General Public License as published by 9*853e2bd2SBhanu Gollapudi * the Free Software Foundation. 10*853e2bd2SBhanu Gollapudi * 11*853e2bd2SBhanu Gollapudi * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 12*853e2bd2SBhanu Gollapudi */ 13*853e2bd2SBhanu Gollapudi 14*853e2bd2SBhanu Gollapudi #include "bnx2fc.h" 15*853e2bd2SBhanu Gollapudi 16*853e2bd2SBhanu Gollapudi DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); 17*853e2bd2SBhanu Gollapudi 18*853e2bd2SBhanu Gollapudi static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, 19*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *new_cqe_kcqe); 20*853e2bd2SBhanu Gollapudi static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, 21*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *ofld_kcqe); 22*853e2bd2SBhanu Gollapudi static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, 23*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *ofld_kcqe); 24*853e2bd2SBhanu Gollapudi static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); 25*853e2bd2SBhanu Gollapudi static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 26*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *conn_destroy); 27*853e2bd2SBhanu Gollapudi 28*853e2bd2SBhanu Gollapudi int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) 29*853e2bd2SBhanu Gollapudi { 30*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_stat stat_req; 31*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[2]; 32*853e2bd2SBhanu Gollapudi int num_kwqes = 1; 33*853e2bd2SBhanu Gollapudi int rc = 0; 34*853e2bd2SBhanu Gollapudi 35*853e2bd2SBhanu Gollapudi memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); 36*853e2bd2SBhanu Gollapudi stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; 37*853e2bd2SBhanu Gollapudi stat_req.hdr.flags = 38*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 39*853e2bd2SBhanu Gollapudi 40*853e2bd2SBhanu Gollapudi stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; 41*853e2bd2SBhanu Gollapudi stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); 42*853e2bd2SBhanu Gollapudi 43*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &stat_req; 44*853e2bd2SBhanu Gollapudi 45*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 46*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 47*853e2bd2SBhanu Gollapudi 48*853e2bd2SBhanu Gollapudi return rc; 49*853e2bd2SBhanu Gollapudi } 50*853e2bd2SBhanu Gollapudi 51*853e2bd2SBhanu Gollapudi /** 52*853e2bd2SBhanu Gollapudi * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w 53*853e2bd2SBhanu Gollapudi * 54*853e2bd2SBhanu Gollapudi * @hba: adapter structure pointer 55*853e2bd2SBhanu Gollapudi * 56*853e2bd2SBhanu Gollapudi * Send down FCoE firmware init KWQEs which initiates the initial handshake 57*853e2bd2SBhanu Gollapudi * with the f/w. 58*853e2bd2SBhanu Gollapudi * 59*853e2bd2SBhanu Gollapudi */ 60*853e2bd2SBhanu Gollapudi int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) 61*853e2bd2SBhanu Gollapudi { 62*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_init1 fcoe_init1; 63*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_init2 fcoe_init2; 64*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_init3 fcoe_init3; 65*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[3]; 66*853e2bd2SBhanu Gollapudi int num_kwqes = 3; 67*853e2bd2SBhanu Gollapudi int rc = 0; 68*853e2bd2SBhanu Gollapudi 69*853e2bd2SBhanu Gollapudi if (!hba->cnic) { 70*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); 71*853e2bd2SBhanu Gollapudi return -ENODEV; 72*853e2bd2SBhanu Gollapudi } 73*853e2bd2SBhanu Gollapudi 74*853e2bd2SBhanu Gollapudi /* fill init1 KWQE */ 75*853e2bd2SBhanu Gollapudi memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); 76*853e2bd2SBhanu Gollapudi fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; 77*853e2bd2SBhanu Gollapudi fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << 78*853e2bd2SBhanu Gollapudi FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 79*853e2bd2SBhanu Gollapudi 80*853e2bd2SBhanu Gollapudi fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; 81*853e2bd2SBhanu Gollapudi fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; 82*853e2bd2SBhanu Gollapudi fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; 83*853e2bd2SBhanu Gollapudi fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; 84*853e2bd2SBhanu Gollapudi fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; 85*853e2bd2SBhanu Gollapudi fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; 86*853e2bd2SBhanu Gollapudi fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); 87*853e2bd2SBhanu Gollapudi fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; 88*853e2bd2SBhanu Gollapudi fcoe_init1.task_list_pbl_addr_hi = 89*853e2bd2SBhanu Gollapudi (u32) ((u64) hba->task_ctx_bd_dma >> 32); 90*853e2bd2SBhanu Gollapudi fcoe_init1.mtu = hba->netdev->mtu; 91*853e2bd2SBhanu Gollapudi 92*853e2bd2SBhanu Gollapudi fcoe_init1.flags = (PAGE_SHIFT << 93*853e2bd2SBhanu Gollapudi FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); 94*853e2bd2SBhanu Gollapudi 95*853e2bd2SBhanu Gollapudi fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; 96*853e2bd2SBhanu Gollapudi 97*853e2bd2SBhanu Gollapudi /* fill init2 KWQE */ 98*853e2bd2SBhanu Gollapudi memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); 99*853e2bd2SBhanu Gollapudi fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; 100*853e2bd2SBhanu Gollapudi fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << 101*853e2bd2SBhanu Gollapudi FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 102*853e2bd2SBhanu Gollapudi 103*853e2bd2SBhanu Gollapudi fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; 104*853e2bd2SBhanu Gollapudi fcoe_init2.hash_tbl_pbl_addr_hi = (u32) 105*853e2bd2SBhanu Gollapudi ((u64) hba->hash_tbl_pbl_dma >> 32); 106*853e2bd2SBhanu Gollapudi 107*853e2bd2SBhanu Gollapudi fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; 108*853e2bd2SBhanu Gollapudi fcoe_init2.t2_hash_tbl_addr_hi = (u32) 109*853e2bd2SBhanu Gollapudi ((u64) hba->t2_hash_tbl_dma >> 32); 110*853e2bd2SBhanu Gollapudi 111*853e2bd2SBhanu Gollapudi fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; 112*853e2bd2SBhanu Gollapudi fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) 113*853e2bd2SBhanu Gollapudi ((u64) hba->t2_hash_tbl_ptr_dma >> 32); 114*853e2bd2SBhanu Gollapudi 115*853e2bd2SBhanu Gollapudi fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; 116*853e2bd2SBhanu Gollapudi 117*853e2bd2SBhanu Gollapudi /* fill init3 KWQE */ 118*853e2bd2SBhanu Gollapudi memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); 119*853e2bd2SBhanu Gollapudi fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; 120*853e2bd2SBhanu Gollapudi fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << 121*853e2bd2SBhanu Gollapudi FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 122*853e2bd2SBhanu Gollapudi fcoe_init3.error_bit_map_lo = 0xffffffff; 123*853e2bd2SBhanu Gollapudi fcoe_init3.error_bit_map_hi = 0xffffffff; 124*853e2bd2SBhanu Gollapudi 125*853e2bd2SBhanu Gollapudi 126*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &fcoe_init1; 127*853e2bd2SBhanu Gollapudi kwqe_arr[1] = (struct kwqe *) &fcoe_init2; 128*853e2bd2SBhanu Gollapudi kwqe_arr[2] = (struct kwqe *) &fcoe_init3; 129*853e2bd2SBhanu Gollapudi 130*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 131*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 132*853e2bd2SBhanu Gollapudi 133*853e2bd2SBhanu Gollapudi return rc; 134*853e2bd2SBhanu Gollapudi } 135*853e2bd2SBhanu Gollapudi int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) 136*853e2bd2SBhanu Gollapudi { 137*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_destroy fcoe_destroy; 138*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[2]; 139*853e2bd2SBhanu Gollapudi int num_kwqes = 1; 140*853e2bd2SBhanu Gollapudi int rc = -1; 141*853e2bd2SBhanu Gollapudi 142*853e2bd2SBhanu Gollapudi /* fill destroy KWQE */ 143*853e2bd2SBhanu Gollapudi memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); 144*853e2bd2SBhanu Gollapudi fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; 145*853e2bd2SBhanu Gollapudi fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << 146*853e2bd2SBhanu Gollapudi FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 147*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; 148*853e2bd2SBhanu Gollapudi 149*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 150*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 151*853e2bd2SBhanu Gollapudi return rc; 152*853e2bd2SBhanu Gollapudi } 153*853e2bd2SBhanu Gollapudi 154*853e2bd2SBhanu Gollapudi /** 155*853e2bd2SBhanu Gollapudi * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process 156*853e2bd2SBhanu Gollapudi * 157*853e2bd2SBhanu Gollapudi * @port: port structure pointer 158*853e2bd2SBhanu Gollapudi * @tgt: bnx2fc_rport structure pointer 159*853e2bd2SBhanu Gollapudi */ 160*853e2bd2SBhanu Gollapudi int bnx2fc_send_session_ofld_req(struct fcoe_port *port, 161*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt) 162*853e2bd2SBhanu Gollapudi { 163*853e2bd2SBhanu Gollapudi struct fc_lport *lport = port->lport; 164*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = port->priv; 165*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[4]; 166*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_offload1 ofld_req1; 167*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_offload2 ofld_req2; 168*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_offload3 ofld_req3; 169*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_offload4 ofld_req4; 170*853e2bd2SBhanu Gollapudi struct fc_rport_priv *rdata = tgt->rdata; 171*853e2bd2SBhanu Gollapudi struct fc_rport *rport = tgt->rport; 172*853e2bd2SBhanu Gollapudi int num_kwqes = 4; 173*853e2bd2SBhanu Gollapudi u32 port_id; 174*853e2bd2SBhanu Gollapudi int rc = 0; 175*853e2bd2SBhanu Gollapudi u16 conn_id; 176*853e2bd2SBhanu Gollapudi 177*853e2bd2SBhanu Gollapudi /* Initialize offload request 1 structure */ 178*853e2bd2SBhanu Gollapudi memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); 179*853e2bd2SBhanu Gollapudi 180*853e2bd2SBhanu Gollapudi ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; 181*853e2bd2SBhanu Gollapudi ofld_req1.hdr.flags = 182*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 183*853e2bd2SBhanu Gollapudi 184*853e2bd2SBhanu Gollapudi 185*853e2bd2SBhanu Gollapudi conn_id = (u16)tgt->fcoe_conn_id; 186*853e2bd2SBhanu Gollapudi ofld_req1.fcoe_conn_id = conn_id; 187*853e2bd2SBhanu Gollapudi 188*853e2bd2SBhanu Gollapudi 189*853e2bd2SBhanu Gollapudi ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; 190*853e2bd2SBhanu Gollapudi ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); 191*853e2bd2SBhanu Gollapudi 192*853e2bd2SBhanu Gollapudi ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; 193*853e2bd2SBhanu Gollapudi ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); 194*853e2bd2SBhanu Gollapudi 195*853e2bd2SBhanu Gollapudi ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; 196*853e2bd2SBhanu Gollapudi ofld_req1.rq_first_pbe_addr_hi = 197*853e2bd2SBhanu Gollapudi (u32)((u64) tgt->rq_dma >> 32); 198*853e2bd2SBhanu Gollapudi 199*853e2bd2SBhanu Gollapudi ofld_req1.rq_prod = 0x8000; 200*853e2bd2SBhanu Gollapudi 201*853e2bd2SBhanu Gollapudi /* Initialize offload request 2 structure */ 202*853e2bd2SBhanu Gollapudi memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); 203*853e2bd2SBhanu Gollapudi 204*853e2bd2SBhanu Gollapudi ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; 205*853e2bd2SBhanu Gollapudi ofld_req2.hdr.flags = 206*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 207*853e2bd2SBhanu Gollapudi 208*853e2bd2SBhanu Gollapudi ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; 209*853e2bd2SBhanu Gollapudi 210*853e2bd2SBhanu Gollapudi ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; 211*853e2bd2SBhanu Gollapudi ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); 212*853e2bd2SBhanu Gollapudi 213*853e2bd2SBhanu Gollapudi ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; 214*853e2bd2SBhanu Gollapudi ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); 215*853e2bd2SBhanu Gollapudi 216*853e2bd2SBhanu Gollapudi ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; 217*853e2bd2SBhanu Gollapudi ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); 218*853e2bd2SBhanu Gollapudi 219*853e2bd2SBhanu Gollapudi /* Initialize offload request 3 structure */ 220*853e2bd2SBhanu Gollapudi memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); 221*853e2bd2SBhanu Gollapudi 222*853e2bd2SBhanu Gollapudi ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; 223*853e2bd2SBhanu Gollapudi ofld_req3.hdr.flags = 224*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 225*853e2bd2SBhanu Gollapudi 226*853e2bd2SBhanu Gollapudi ofld_req3.vlan_tag = hba->vlan_id << 227*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; 228*853e2bd2SBhanu Gollapudi ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; 229*853e2bd2SBhanu Gollapudi 230*853e2bd2SBhanu Gollapudi port_id = fc_host_port_id(lport->host); 231*853e2bd2SBhanu Gollapudi if (port_id == 0) { 232*853e2bd2SBhanu Gollapudi BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); 233*853e2bd2SBhanu Gollapudi return -EINVAL; 234*853e2bd2SBhanu Gollapudi } 235*853e2bd2SBhanu Gollapudi 236*853e2bd2SBhanu Gollapudi /* 237*853e2bd2SBhanu Gollapudi * Store s_id of the initiator for further reference. This will 238*853e2bd2SBhanu Gollapudi * be used during disable/destroy during linkdown processing as 239*853e2bd2SBhanu Gollapudi * when the lport is reset, the port_id also is reset to 0 240*853e2bd2SBhanu Gollapudi */ 241*853e2bd2SBhanu Gollapudi tgt->sid = port_id; 242*853e2bd2SBhanu Gollapudi ofld_req3.s_id[0] = (port_id & 0x000000FF); 243*853e2bd2SBhanu Gollapudi ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; 244*853e2bd2SBhanu Gollapudi ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; 245*853e2bd2SBhanu Gollapudi 246*853e2bd2SBhanu Gollapudi port_id = rport->port_id; 247*853e2bd2SBhanu Gollapudi ofld_req3.d_id[0] = (port_id & 0x000000FF); 248*853e2bd2SBhanu Gollapudi ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; 249*853e2bd2SBhanu Gollapudi ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; 250*853e2bd2SBhanu Gollapudi 251*853e2bd2SBhanu Gollapudi ofld_req3.tx_total_conc_seqs = rdata->max_seq; 252*853e2bd2SBhanu Gollapudi 253*853e2bd2SBhanu Gollapudi ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; 254*853e2bd2SBhanu Gollapudi ofld_req3.rx_max_fc_pay_len = lport->mfs; 255*853e2bd2SBhanu Gollapudi 256*853e2bd2SBhanu Gollapudi ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; 257*853e2bd2SBhanu Gollapudi ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; 258*853e2bd2SBhanu Gollapudi ofld_req3.rx_open_seqs_exch_c3 = 1; 259*853e2bd2SBhanu Gollapudi 260*853e2bd2SBhanu Gollapudi ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; 261*853e2bd2SBhanu Gollapudi ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); 262*853e2bd2SBhanu Gollapudi 263*853e2bd2SBhanu Gollapudi /* set mul_n_port_ids supported flag to 0, until it is supported */ 264*853e2bd2SBhanu Gollapudi ofld_req3.flags = 0; 265*853e2bd2SBhanu Gollapudi /* 266*853e2bd2SBhanu Gollapudi ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << 267*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); 268*853e2bd2SBhanu Gollapudi */ 269*853e2bd2SBhanu Gollapudi /* Info from PLOGI response */ 270*853e2bd2SBhanu Gollapudi ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << 271*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); 272*853e2bd2SBhanu Gollapudi 273*853e2bd2SBhanu Gollapudi ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << 274*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); 275*853e2bd2SBhanu Gollapudi 276*853e2bd2SBhanu Gollapudi /* vlan flag */ 277*853e2bd2SBhanu Gollapudi ofld_req3.flags |= (hba->vlan_enabled << 278*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); 279*853e2bd2SBhanu Gollapudi 280*853e2bd2SBhanu Gollapudi /* C2_VALID and ACK flags are not set as they are not suppported */ 281*853e2bd2SBhanu Gollapudi 282*853e2bd2SBhanu Gollapudi 283*853e2bd2SBhanu Gollapudi /* Initialize offload request 4 structure */ 284*853e2bd2SBhanu Gollapudi memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); 285*853e2bd2SBhanu Gollapudi ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; 286*853e2bd2SBhanu Gollapudi ofld_req4.hdr.flags = 287*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 288*853e2bd2SBhanu Gollapudi 289*853e2bd2SBhanu Gollapudi ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; 290*853e2bd2SBhanu Gollapudi 291*853e2bd2SBhanu Gollapudi 292*853e2bd2SBhanu Gollapudi ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; 293*853e2bd2SBhanu Gollapudi /* local mac */ 294*853e2bd2SBhanu Gollapudi ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; 295*853e2bd2SBhanu Gollapudi ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; 296*853e2bd2SBhanu Gollapudi ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; 297*853e2bd2SBhanu Gollapudi ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; 298*853e2bd2SBhanu Gollapudi ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; 299*853e2bd2SBhanu Gollapudi ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 300*853e2bd2SBhanu Gollapudi ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; 301*853e2bd2SBhanu Gollapudi ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; 302*853e2bd2SBhanu Gollapudi ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; 303*853e2bd2SBhanu Gollapudi ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; 304*853e2bd2SBhanu Gollapudi ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; 305*853e2bd2SBhanu Gollapudi 306*853e2bd2SBhanu Gollapudi ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 307*853e2bd2SBhanu Gollapudi ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 308*853e2bd2SBhanu Gollapudi 309*853e2bd2SBhanu Gollapudi ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; 310*853e2bd2SBhanu Gollapudi ofld_req4.confq_pbl_base_addr_hi = 311*853e2bd2SBhanu Gollapudi (u32)((u64) tgt->confq_pbl_dma >> 32); 312*853e2bd2SBhanu Gollapudi 313*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &ofld_req1; 314*853e2bd2SBhanu Gollapudi kwqe_arr[1] = (struct kwqe *) &ofld_req2; 315*853e2bd2SBhanu Gollapudi kwqe_arr[2] = (struct kwqe *) &ofld_req3; 316*853e2bd2SBhanu Gollapudi kwqe_arr[3] = (struct kwqe *) &ofld_req4; 317*853e2bd2SBhanu Gollapudi 318*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 319*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 320*853e2bd2SBhanu Gollapudi 321*853e2bd2SBhanu Gollapudi return rc; 322*853e2bd2SBhanu Gollapudi } 323*853e2bd2SBhanu Gollapudi 324*853e2bd2SBhanu Gollapudi /** 325*853e2bd2SBhanu Gollapudi * bnx2fc_send_session_enable_req - initiates FCoE Session enablement 326*853e2bd2SBhanu Gollapudi * 327*853e2bd2SBhanu Gollapudi * @port: port structure pointer 328*853e2bd2SBhanu Gollapudi * @tgt: bnx2fc_rport structure pointer 329*853e2bd2SBhanu Gollapudi */ 330*853e2bd2SBhanu Gollapudi static int bnx2fc_send_session_enable_req(struct fcoe_port *port, 331*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt) 332*853e2bd2SBhanu Gollapudi { 333*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[2]; 334*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = port->priv; 335*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_enable_disable enbl_req; 336*853e2bd2SBhanu Gollapudi struct fc_lport *lport = port->lport; 337*853e2bd2SBhanu Gollapudi struct fc_rport *rport = tgt->rport; 338*853e2bd2SBhanu Gollapudi int num_kwqes = 1; 339*853e2bd2SBhanu Gollapudi int rc = 0; 340*853e2bd2SBhanu Gollapudi u32 port_id; 341*853e2bd2SBhanu Gollapudi 342*853e2bd2SBhanu Gollapudi memset(&enbl_req, 0x00, 343*853e2bd2SBhanu Gollapudi sizeof(struct fcoe_kwqe_conn_enable_disable)); 344*853e2bd2SBhanu Gollapudi enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; 345*853e2bd2SBhanu Gollapudi enbl_req.hdr.flags = 346*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 347*853e2bd2SBhanu Gollapudi 348*853e2bd2SBhanu Gollapudi enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; 349*853e2bd2SBhanu Gollapudi /* local mac */ 350*853e2bd2SBhanu Gollapudi enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; 351*853e2bd2SBhanu Gollapudi enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; 352*853e2bd2SBhanu Gollapudi enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; 353*853e2bd2SBhanu Gollapudi enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; 354*853e2bd2SBhanu Gollapudi enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; 355*853e2bd2SBhanu Gollapudi 356*853e2bd2SBhanu Gollapudi enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 357*853e2bd2SBhanu Gollapudi enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; 358*853e2bd2SBhanu Gollapudi enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; 359*853e2bd2SBhanu Gollapudi enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; 360*853e2bd2SBhanu Gollapudi enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; 361*853e2bd2SBhanu Gollapudi enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; 362*853e2bd2SBhanu Gollapudi 363*853e2bd2SBhanu Gollapudi port_id = fc_host_port_id(lport->host); 364*853e2bd2SBhanu Gollapudi if (port_id != tgt->sid) { 365*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," 366*853e2bd2SBhanu Gollapudi "sid = 0x%x\n", port_id, tgt->sid); 367*853e2bd2SBhanu Gollapudi port_id = tgt->sid; 368*853e2bd2SBhanu Gollapudi } 369*853e2bd2SBhanu Gollapudi enbl_req.s_id[0] = (port_id & 0x000000FF); 370*853e2bd2SBhanu Gollapudi enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; 371*853e2bd2SBhanu Gollapudi enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; 372*853e2bd2SBhanu Gollapudi 373*853e2bd2SBhanu Gollapudi port_id = rport->port_id; 374*853e2bd2SBhanu Gollapudi enbl_req.d_id[0] = (port_id & 0x000000FF); 375*853e2bd2SBhanu Gollapudi enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 376*853e2bd2SBhanu Gollapudi enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 377*853e2bd2SBhanu Gollapudi enbl_req.vlan_tag = hba->vlan_id << 378*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 379*853e2bd2SBhanu Gollapudi enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 380*853e2bd2SBhanu Gollapudi enbl_req.vlan_flag = hba->vlan_enabled; 381*853e2bd2SBhanu Gollapudi enbl_req.context_id = tgt->context_id; 382*853e2bd2SBhanu Gollapudi enbl_req.conn_id = tgt->fcoe_conn_id; 383*853e2bd2SBhanu Gollapudi 384*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &enbl_req; 385*853e2bd2SBhanu Gollapudi 386*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 387*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 388*853e2bd2SBhanu Gollapudi return rc; 389*853e2bd2SBhanu Gollapudi } 390*853e2bd2SBhanu Gollapudi 391*853e2bd2SBhanu Gollapudi /** 392*853e2bd2SBhanu Gollapudi * bnx2fc_send_session_disable_req - initiates FCoE Session disable 393*853e2bd2SBhanu Gollapudi * 394*853e2bd2SBhanu Gollapudi * @port: port structure pointer 395*853e2bd2SBhanu Gollapudi * @tgt: bnx2fc_rport structure pointer 396*853e2bd2SBhanu Gollapudi */ 397*853e2bd2SBhanu Gollapudi int bnx2fc_send_session_disable_req(struct fcoe_port *port, 398*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt) 399*853e2bd2SBhanu Gollapudi { 400*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = port->priv; 401*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_enable_disable disable_req; 402*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[2]; 403*853e2bd2SBhanu Gollapudi struct fc_rport *rport = tgt->rport; 404*853e2bd2SBhanu Gollapudi int num_kwqes = 1; 405*853e2bd2SBhanu Gollapudi int rc = 0; 406*853e2bd2SBhanu Gollapudi u32 port_id; 407*853e2bd2SBhanu Gollapudi 408*853e2bd2SBhanu Gollapudi memset(&disable_req, 0x00, 409*853e2bd2SBhanu Gollapudi sizeof(struct fcoe_kwqe_conn_enable_disable)); 410*853e2bd2SBhanu Gollapudi disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; 411*853e2bd2SBhanu Gollapudi disable_req.hdr.flags = 412*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 413*853e2bd2SBhanu Gollapudi 414*853e2bd2SBhanu Gollapudi disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; 415*853e2bd2SBhanu Gollapudi disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; 416*853e2bd2SBhanu Gollapudi disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; 417*853e2bd2SBhanu Gollapudi disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; 418*853e2bd2SBhanu Gollapudi disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; 419*853e2bd2SBhanu Gollapudi 420*853e2bd2SBhanu Gollapudi disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 421*853e2bd2SBhanu Gollapudi disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; 422*853e2bd2SBhanu Gollapudi disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; 423*853e2bd2SBhanu Gollapudi disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; 424*853e2bd2SBhanu Gollapudi disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; 425*853e2bd2SBhanu Gollapudi disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; 426*853e2bd2SBhanu Gollapudi 427*853e2bd2SBhanu Gollapudi port_id = tgt->sid; 428*853e2bd2SBhanu Gollapudi disable_req.s_id[0] = (port_id & 0x000000FF); 429*853e2bd2SBhanu Gollapudi disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; 430*853e2bd2SBhanu Gollapudi disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; 431*853e2bd2SBhanu Gollapudi 432*853e2bd2SBhanu Gollapudi 433*853e2bd2SBhanu Gollapudi port_id = rport->port_id; 434*853e2bd2SBhanu Gollapudi disable_req.d_id[0] = (port_id & 0x000000FF); 435*853e2bd2SBhanu Gollapudi disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 436*853e2bd2SBhanu Gollapudi disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 437*853e2bd2SBhanu Gollapudi disable_req.context_id = tgt->context_id; 438*853e2bd2SBhanu Gollapudi disable_req.conn_id = tgt->fcoe_conn_id; 439*853e2bd2SBhanu Gollapudi disable_req.vlan_tag = hba->vlan_id << 440*853e2bd2SBhanu Gollapudi FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 441*853e2bd2SBhanu Gollapudi disable_req.vlan_tag |= 442*853e2bd2SBhanu Gollapudi 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 443*853e2bd2SBhanu Gollapudi disable_req.vlan_flag = hba->vlan_enabled; 444*853e2bd2SBhanu Gollapudi 445*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &disable_req; 446*853e2bd2SBhanu Gollapudi 447*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 448*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 449*853e2bd2SBhanu Gollapudi 450*853e2bd2SBhanu Gollapudi return rc; 451*853e2bd2SBhanu Gollapudi } 452*853e2bd2SBhanu Gollapudi 453*853e2bd2SBhanu Gollapudi /** 454*853e2bd2SBhanu Gollapudi * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy 455*853e2bd2SBhanu Gollapudi * 456*853e2bd2SBhanu Gollapudi * @port: port structure pointer 457*853e2bd2SBhanu Gollapudi * @tgt: bnx2fc_rport structure pointer 458*853e2bd2SBhanu Gollapudi */ 459*853e2bd2SBhanu Gollapudi int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, 460*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt) 461*853e2bd2SBhanu Gollapudi { 462*853e2bd2SBhanu Gollapudi struct fcoe_kwqe_conn_destroy destroy_req; 463*853e2bd2SBhanu Gollapudi struct kwqe *kwqe_arr[2]; 464*853e2bd2SBhanu Gollapudi int num_kwqes = 1; 465*853e2bd2SBhanu Gollapudi int rc = 0; 466*853e2bd2SBhanu Gollapudi 467*853e2bd2SBhanu Gollapudi memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); 468*853e2bd2SBhanu Gollapudi destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; 469*853e2bd2SBhanu Gollapudi destroy_req.hdr.flags = 470*853e2bd2SBhanu Gollapudi (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 471*853e2bd2SBhanu Gollapudi 472*853e2bd2SBhanu Gollapudi destroy_req.context_id = tgt->context_id; 473*853e2bd2SBhanu Gollapudi destroy_req.conn_id = tgt->fcoe_conn_id; 474*853e2bd2SBhanu Gollapudi 475*853e2bd2SBhanu Gollapudi kwqe_arr[0] = (struct kwqe *) &destroy_req; 476*853e2bd2SBhanu Gollapudi 477*853e2bd2SBhanu Gollapudi if (hba->cnic && hba->cnic->submit_kwqes) 478*853e2bd2SBhanu Gollapudi rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 479*853e2bd2SBhanu Gollapudi 480*853e2bd2SBhanu Gollapudi return rc; 481*853e2bd2SBhanu Gollapudi } 482*853e2bd2SBhanu Gollapudi 483*853e2bd2SBhanu Gollapudi static void bnx2fc_unsol_els_work(struct work_struct *work) 484*853e2bd2SBhanu Gollapudi { 485*853e2bd2SBhanu Gollapudi struct bnx2fc_unsol_els *unsol_els; 486*853e2bd2SBhanu Gollapudi struct fc_lport *lport; 487*853e2bd2SBhanu Gollapudi struct fc_frame *fp; 488*853e2bd2SBhanu Gollapudi 489*853e2bd2SBhanu Gollapudi unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); 490*853e2bd2SBhanu Gollapudi lport = unsol_els->lport; 491*853e2bd2SBhanu Gollapudi fp = unsol_els->fp; 492*853e2bd2SBhanu Gollapudi fc_exch_recv(lport, fp); 493*853e2bd2SBhanu Gollapudi kfree(unsol_els); 494*853e2bd2SBhanu Gollapudi } 495*853e2bd2SBhanu Gollapudi 496*853e2bd2SBhanu Gollapudi void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, 497*853e2bd2SBhanu Gollapudi unsigned char *buf, 498*853e2bd2SBhanu Gollapudi u32 frame_len, u16 l2_oxid) 499*853e2bd2SBhanu Gollapudi { 500*853e2bd2SBhanu Gollapudi struct fcoe_port *port = tgt->port; 501*853e2bd2SBhanu Gollapudi struct fc_lport *lport = port->lport; 502*853e2bd2SBhanu Gollapudi struct bnx2fc_unsol_els *unsol_els; 503*853e2bd2SBhanu Gollapudi struct fc_frame_header *fh; 504*853e2bd2SBhanu Gollapudi struct fc_frame *fp; 505*853e2bd2SBhanu Gollapudi struct sk_buff *skb; 506*853e2bd2SBhanu Gollapudi u32 payload_len; 507*853e2bd2SBhanu Gollapudi u32 crc; 508*853e2bd2SBhanu Gollapudi u8 op; 509*853e2bd2SBhanu Gollapudi 510*853e2bd2SBhanu Gollapudi 511*853e2bd2SBhanu Gollapudi unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); 512*853e2bd2SBhanu Gollapudi if (!unsol_els) { 513*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); 514*853e2bd2SBhanu Gollapudi return; 515*853e2bd2SBhanu Gollapudi } 516*853e2bd2SBhanu Gollapudi 517*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", 518*853e2bd2SBhanu Gollapudi l2_oxid, frame_len); 519*853e2bd2SBhanu Gollapudi 520*853e2bd2SBhanu Gollapudi payload_len = frame_len - sizeof(struct fc_frame_header); 521*853e2bd2SBhanu Gollapudi 522*853e2bd2SBhanu Gollapudi fp = fc_frame_alloc(lport, payload_len); 523*853e2bd2SBhanu Gollapudi if (!fp) { 524*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "fc_frame_alloc failure\n"); 525*853e2bd2SBhanu Gollapudi return; 526*853e2bd2SBhanu Gollapudi } 527*853e2bd2SBhanu Gollapudi 528*853e2bd2SBhanu Gollapudi fh = (struct fc_frame_header *) fc_frame_header_get(fp); 529*853e2bd2SBhanu Gollapudi /* Copy FC Frame header and payload into the frame */ 530*853e2bd2SBhanu Gollapudi memcpy(fh, buf, frame_len); 531*853e2bd2SBhanu Gollapudi 532*853e2bd2SBhanu Gollapudi if (l2_oxid != FC_XID_UNKNOWN) 533*853e2bd2SBhanu Gollapudi fh->fh_ox_id = htons(l2_oxid); 534*853e2bd2SBhanu Gollapudi 535*853e2bd2SBhanu Gollapudi skb = fp_skb(fp); 536*853e2bd2SBhanu Gollapudi 537*853e2bd2SBhanu Gollapudi if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || 538*853e2bd2SBhanu Gollapudi (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { 539*853e2bd2SBhanu Gollapudi 540*853e2bd2SBhanu Gollapudi if (fh->fh_type == FC_TYPE_ELS) { 541*853e2bd2SBhanu Gollapudi op = fc_frame_payload_op(fp); 542*853e2bd2SBhanu Gollapudi if ((op == ELS_TEST) || (op == ELS_ESTC) || 543*853e2bd2SBhanu Gollapudi (op == ELS_FAN) || (op == ELS_CSU)) { 544*853e2bd2SBhanu Gollapudi /* 545*853e2bd2SBhanu Gollapudi * No need to reply for these 546*853e2bd2SBhanu Gollapudi * ELS requests 547*853e2bd2SBhanu Gollapudi */ 548*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); 549*853e2bd2SBhanu Gollapudi kfree_skb(skb); 550*853e2bd2SBhanu Gollapudi return; 551*853e2bd2SBhanu Gollapudi } 552*853e2bd2SBhanu Gollapudi } 553*853e2bd2SBhanu Gollapudi crc = fcoe_fc_crc(fp); 554*853e2bd2SBhanu Gollapudi fc_frame_init(fp); 555*853e2bd2SBhanu Gollapudi fr_dev(fp) = lport; 556*853e2bd2SBhanu Gollapudi fr_sof(fp) = FC_SOF_I3; 557*853e2bd2SBhanu Gollapudi fr_eof(fp) = FC_EOF_T; 558*853e2bd2SBhanu Gollapudi fr_crc(fp) = cpu_to_le32(~crc); 559*853e2bd2SBhanu Gollapudi unsol_els->lport = lport; 560*853e2bd2SBhanu Gollapudi unsol_els->fp = fp; 561*853e2bd2SBhanu Gollapudi INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); 562*853e2bd2SBhanu Gollapudi queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); 563*853e2bd2SBhanu Gollapudi } else { 564*853e2bd2SBhanu Gollapudi BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); 565*853e2bd2SBhanu Gollapudi kfree_skb(skb); 566*853e2bd2SBhanu Gollapudi } 567*853e2bd2SBhanu Gollapudi } 568*853e2bd2SBhanu Gollapudi 569*853e2bd2SBhanu Gollapudi static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) 570*853e2bd2SBhanu Gollapudi { 571*853e2bd2SBhanu Gollapudi u8 num_rq; 572*853e2bd2SBhanu Gollapudi struct fcoe_err_report_entry *err_entry; 573*853e2bd2SBhanu Gollapudi unsigned char *rq_data; 574*853e2bd2SBhanu Gollapudi unsigned char *buf = NULL, *buf1; 575*853e2bd2SBhanu Gollapudi int i; 576*853e2bd2SBhanu Gollapudi u16 xid; 577*853e2bd2SBhanu Gollapudi u32 frame_len, len; 578*853e2bd2SBhanu Gollapudi struct bnx2fc_cmd *io_req = NULL; 579*853e2bd2SBhanu Gollapudi struct fcoe_task_ctx_entry *task, *task_page; 580*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = tgt->port->priv; 581*853e2bd2SBhanu Gollapudi int task_idx, index; 582*853e2bd2SBhanu Gollapudi int rc = 0; 583*853e2bd2SBhanu Gollapudi 584*853e2bd2SBhanu Gollapudi 585*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); 586*853e2bd2SBhanu Gollapudi switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { 587*853e2bd2SBhanu Gollapudi case FCOE_UNSOLICITED_FRAME_CQE_TYPE: 588*853e2bd2SBhanu Gollapudi frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> 589*853e2bd2SBhanu Gollapudi FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; 590*853e2bd2SBhanu Gollapudi 591*853e2bd2SBhanu Gollapudi num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; 592*853e2bd2SBhanu Gollapudi 593*853e2bd2SBhanu Gollapudi rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); 594*853e2bd2SBhanu Gollapudi if (rq_data) { 595*853e2bd2SBhanu Gollapudi buf = rq_data; 596*853e2bd2SBhanu Gollapudi } else { 597*853e2bd2SBhanu Gollapudi buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), 598*853e2bd2SBhanu Gollapudi GFP_ATOMIC); 599*853e2bd2SBhanu Gollapudi 600*853e2bd2SBhanu Gollapudi if (!buf1) { 601*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); 602*853e2bd2SBhanu Gollapudi break; 603*853e2bd2SBhanu Gollapudi } 604*853e2bd2SBhanu Gollapudi 605*853e2bd2SBhanu Gollapudi for (i = 0; i < num_rq; i++) { 606*853e2bd2SBhanu Gollapudi rq_data = (unsigned char *) 607*853e2bd2SBhanu Gollapudi bnx2fc_get_next_rqe(tgt, 1); 608*853e2bd2SBhanu Gollapudi len = BNX2FC_RQ_BUF_SZ; 609*853e2bd2SBhanu Gollapudi memcpy(buf1, rq_data, len); 610*853e2bd2SBhanu Gollapudi buf1 += len; 611*853e2bd2SBhanu Gollapudi } 612*853e2bd2SBhanu Gollapudi } 613*853e2bd2SBhanu Gollapudi bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, 614*853e2bd2SBhanu Gollapudi FC_XID_UNKNOWN); 615*853e2bd2SBhanu Gollapudi 616*853e2bd2SBhanu Gollapudi if (buf != rq_data) 617*853e2bd2SBhanu Gollapudi kfree(buf); 618*853e2bd2SBhanu Gollapudi bnx2fc_return_rqe(tgt, num_rq); 619*853e2bd2SBhanu Gollapudi break; 620*853e2bd2SBhanu Gollapudi 621*853e2bd2SBhanu Gollapudi case FCOE_ERROR_DETECTION_CQE_TYPE: 622*853e2bd2SBhanu Gollapudi /* 623*853e2bd2SBhanu Gollapudi *In case of error reporting CQE a single RQ entry 624*853e2bd2SBhanu Gollapudi * is consumes. 625*853e2bd2SBhanu Gollapudi */ 626*853e2bd2SBhanu Gollapudi spin_lock_bh(&tgt->tgt_lock); 627*853e2bd2SBhanu Gollapudi num_rq = 1; 628*853e2bd2SBhanu Gollapudi err_entry = (struct fcoe_err_report_entry *) 629*853e2bd2SBhanu Gollapudi bnx2fc_get_next_rqe(tgt, 1); 630*853e2bd2SBhanu Gollapudi xid = err_entry->fc_hdr.ox_id; 631*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); 632*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", 633*853e2bd2SBhanu Gollapudi err_entry->err_warn_bitmap_hi, 634*853e2bd2SBhanu Gollapudi err_entry->err_warn_bitmap_lo); 635*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", 636*853e2bd2SBhanu Gollapudi err_entry->tx_buf_off, err_entry->rx_buf_off); 637*853e2bd2SBhanu Gollapudi 638*853e2bd2SBhanu Gollapudi bnx2fc_return_rqe(tgt, 1); 639*853e2bd2SBhanu Gollapudi 640*853e2bd2SBhanu Gollapudi if (xid > BNX2FC_MAX_XID) { 641*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 642*853e2bd2SBhanu Gollapudi xid); 643*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 644*853e2bd2SBhanu Gollapudi break; 645*853e2bd2SBhanu Gollapudi } 646*853e2bd2SBhanu Gollapudi 647*853e2bd2SBhanu Gollapudi task_idx = xid / BNX2FC_TASKS_PER_PAGE; 648*853e2bd2SBhanu Gollapudi index = xid % BNX2FC_TASKS_PER_PAGE; 649*853e2bd2SBhanu Gollapudi task_page = (struct fcoe_task_ctx_entry *) 650*853e2bd2SBhanu Gollapudi hba->task_ctx[task_idx]; 651*853e2bd2SBhanu Gollapudi task = &(task_page[index]); 652*853e2bd2SBhanu Gollapudi 653*853e2bd2SBhanu Gollapudi io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 654*853e2bd2SBhanu Gollapudi if (!io_req) { 655*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 656*853e2bd2SBhanu Gollapudi break; 657*853e2bd2SBhanu Gollapudi } 658*853e2bd2SBhanu Gollapudi 659*853e2bd2SBhanu Gollapudi if (io_req->cmd_type != BNX2FC_SCSI_CMD) { 660*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); 661*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 662*853e2bd2SBhanu Gollapudi break; 663*853e2bd2SBhanu Gollapudi } 664*853e2bd2SBhanu Gollapudi 665*853e2bd2SBhanu Gollapudi if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, 666*853e2bd2SBhanu Gollapudi &io_req->req_flags)) { 667*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " 668*853e2bd2SBhanu Gollapudi "progress.. ignore unsol err\n"); 669*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 670*853e2bd2SBhanu Gollapudi break; 671*853e2bd2SBhanu Gollapudi } 672*853e2bd2SBhanu Gollapudi 673*853e2bd2SBhanu Gollapudi /* 674*853e2bd2SBhanu Gollapudi * If ABTS is already in progress, and FW error is 675*853e2bd2SBhanu Gollapudi * received after that, do not cancel the timeout_work 676*853e2bd2SBhanu Gollapudi * and let the error recovery continue by explicitly 677*853e2bd2SBhanu Gollapudi * logging out the target, when the ABTS eventually 678*853e2bd2SBhanu Gollapudi * times out. 679*853e2bd2SBhanu Gollapudi */ 680*853e2bd2SBhanu Gollapudi if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 681*853e2bd2SBhanu Gollapudi &io_req->req_flags)) { 682*853e2bd2SBhanu Gollapudi /* 683*853e2bd2SBhanu Gollapudi * Cancel the timeout_work, as we received IO 684*853e2bd2SBhanu Gollapudi * completion with FW error. 685*853e2bd2SBhanu Gollapudi */ 686*853e2bd2SBhanu Gollapudi if (cancel_delayed_work(&io_req->timeout_work)) 687*853e2bd2SBhanu Gollapudi kref_put(&io_req->refcount, 688*853e2bd2SBhanu Gollapudi bnx2fc_cmd_release); /* timer hold */ 689*853e2bd2SBhanu Gollapudi 690*853e2bd2SBhanu Gollapudi rc = bnx2fc_initiate_abts(io_req); 691*853e2bd2SBhanu Gollapudi if (rc != SUCCESS) { 692*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts " 693*853e2bd2SBhanu Gollapudi "failed. issue cleanup\n"); 694*853e2bd2SBhanu Gollapudi rc = bnx2fc_initiate_cleanup(io_req); 695*853e2bd2SBhanu Gollapudi BUG_ON(rc); 696*853e2bd2SBhanu Gollapudi } 697*853e2bd2SBhanu Gollapudi } else 698*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " 699*853e2bd2SBhanu Gollapudi "in ABTS processing\n", xid); 700*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 701*853e2bd2SBhanu Gollapudi break; 702*853e2bd2SBhanu Gollapudi 703*853e2bd2SBhanu Gollapudi case FCOE_WARNING_DETECTION_CQE_TYPE: 704*853e2bd2SBhanu Gollapudi /* 705*853e2bd2SBhanu Gollapudi *In case of warning reporting CQE a single RQ entry 706*853e2bd2SBhanu Gollapudi * is consumes. 707*853e2bd2SBhanu Gollapudi */ 708*853e2bd2SBhanu Gollapudi num_rq = 1; 709*853e2bd2SBhanu Gollapudi err_entry = (struct fcoe_err_report_entry *) 710*853e2bd2SBhanu Gollapudi bnx2fc_get_next_rqe(tgt, 1); 711*853e2bd2SBhanu Gollapudi xid = cpu_to_be16(err_entry->fc_hdr.ox_id); 712*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); 713*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", 714*853e2bd2SBhanu Gollapudi err_entry->err_warn_bitmap_hi, 715*853e2bd2SBhanu Gollapudi err_entry->err_warn_bitmap_lo); 716*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 717*853e2bd2SBhanu Gollapudi err_entry->tx_buf_off, err_entry->rx_buf_off); 718*853e2bd2SBhanu Gollapudi 719*853e2bd2SBhanu Gollapudi bnx2fc_return_rqe(tgt, 1); 720*853e2bd2SBhanu Gollapudi break; 721*853e2bd2SBhanu Gollapudi 722*853e2bd2SBhanu Gollapudi default: 723*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); 724*853e2bd2SBhanu Gollapudi break; 725*853e2bd2SBhanu Gollapudi } 726*853e2bd2SBhanu Gollapudi } 727*853e2bd2SBhanu Gollapudi 728*853e2bd2SBhanu Gollapudi void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) 729*853e2bd2SBhanu Gollapudi { 730*853e2bd2SBhanu Gollapudi struct fcoe_task_ctx_entry *task; 731*853e2bd2SBhanu Gollapudi struct fcoe_task_ctx_entry *task_page; 732*853e2bd2SBhanu Gollapudi struct fcoe_port *port = tgt->port; 733*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = port->priv; 734*853e2bd2SBhanu Gollapudi struct bnx2fc_cmd *io_req; 735*853e2bd2SBhanu Gollapudi int task_idx, index; 736*853e2bd2SBhanu Gollapudi u16 xid; 737*853e2bd2SBhanu Gollapudi u8 cmd_type; 738*853e2bd2SBhanu Gollapudi u8 rx_state = 0; 739*853e2bd2SBhanu Gollapudi u8 num_rq; 740*853e2bd2SBhanu Gollapudi 741*853e2bd2SBhanu Gollapudi spin_lock_bh(&tgt->tgt_lock); 742*853e2bd2SBhanu Gollapudi xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 743*853e2bd2SBhanu Gollapudi if (xid >= BNX2FC_MAX_TASKS) { 744*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "ERROR:xid out of range\n"); 745*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 746*853e2bd2SBhanu Gollapudi return; 747*853e2bd2SBhanu Gollapudi } 748*853e2bd2SBhanu Gollapudi task_idx = xid / BNX2FC_TASKS_PER_PAGE; 749*853e2bd2SBhanu Gollapudi index = xid % BNX2FC_TASKS_PER_PAGE; 750*853e2bd2SBhanu Gollapudi task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; 751*853e2bd2SBhanu Gollapudi task = &(task_page[index]); 752*853e2bd2SBhanu Gollapudi 753*853e2bd2SBhanu Gollapudi num_rq = ((task->rx_wr_tx_rd.rx_flags & 754*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> 755*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); 756*853e2bd2SBhanu Gollapudi 757*853e2bd2SBhanu Gollapudi io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 758*853e2bd2SBhanu Gollapudi 759*853e2bd2SBhanu Gollapudi if (io_req == NULL) { 760*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); 761*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 762*853e2bd2SBhanu Gollapudi return; 763*853e2bd2SBhanu Gollapudi } 764*853e2bd2SBhanu Gollapudi 765*853e2bd2SBhanu Gollapudi /* Timestamp IO completion time */ 766*853e2bd2SBhanu Gollapudi cmd_type = io_req->cmd_type; 767*853e2bd2SBhanu Gollapudi 768*853e2bd2SBhanu Gollapudi /* optimized completion path */ 769*853e2bd2SBhanu Gollapudi if (cmd_type == BNX2FC_SCSI_CMD) { 770*853e2bd2SBhanu Gollapudi rx_state = ((task->rx_wr_tx_rd.rx_flags & 771*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >> 772*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT); 773*853e2bd2SBhanu Gollapudi 774*853e2bd2SBhanu Gollapudi if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { 775*853e2bd2SBhanu Gollapudi bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); 776*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 777*853e2bd2SBhanu Gollapudi return; 778*853e2bd2SBhanu Gollapudi } 779*853e2bd2SBhanu Gollapudi } 780*853e2bd2SBhanu Gollapudi 781*853e2bd2SBhanu Gollapudi /* Process other IO completion types */ 782*853e2bd2SBhanu Gollapudi switch (cmd_type) { 783*853e2bd2SBhanu Gollapudi case BNX2FC_SCSI_CMD: 784*853e2bd2SBhanu Gollapudi if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) 785*853e2bd2SBhanu Gollapudi bnx2fc_process_abts_compl(io_req, task, num_rq); 786*853e2bd2SBhanu Gollapudi else if (rx_state == 787*853e2bd2SBhanu Gollapudi FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) 788*853e2bd2SBhanu Gollapudi bnx2fc_process_cleanup_compl(io_req, task, num_rq); 789*853e2bd2SBhanu Gollapudi else 790*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "Invalid rx state - %d\n", 791*853e2bd2SBhanu Gollapudi rx_state); 792*853e2bd2SBhanu Gollapudi break; 793*853e2bd2SBhanu Gollapudi 794*853e2bd2SBhanu Gollapudi case BNX2FC_TASK_MGMT_CMD: 795*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); 796*853e2bd2SBhanu Gollapudi bnx2fc_process_tm_compl(io_req, task, num_rq); 797*853e2bd2SBhanu Gollapudi break; 798*853e2bd2SBhanu Gollapudi 799*853e2bd2SBhanu Gollapudi case BNX2FC_ABTS: 800*853e2bd2SBhanu Gollapudi /* 801*853e2bd2SBhanu Gollapudi * ABTS request received by firmware. ABTS response 802*853e2bd2SBhanu Gollapudi * will be delivered to the task belonging to the IO 803*853e2bd2SBhanu Gollapudi * that was aborted 804*853e2bd2SBhanu Gollapudi */ 805*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); 806*853e2bd2SBhanu Gollapudi kref_put(&io_req->refcount, bnx2fc_cmd_release); 807*853e2bd2SBhanu Gollapudi break; 808*853e2bd2SBhanu Gollapudi 809*853e2bd2SBhanu Gollapudi case BNX2FC_ELS: 810*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); 811*853e2bd2SBhanu Gollapudi bnx2fc_process_els_compl(io_req, task, num_rq); 812*853e2bd2SBhanu Gollapudi break; 813*853e2bd2SBhanu Gollapudi 814*853e2bd2SBhanu Gollapudi case BNX2FC_CLEANUP: 815*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); 816*853e2bd2SBhanu Gollapudi kref_put(&io_req->refcount, bnx2fc_cmd_release); 817*853e2bd2SBhanu Gollapudi break; 818*853e2bd2SBhanu Gollapudi 819*853e2bd2SBhanu Gollapudi default: 820*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 821*853e2bd2SBhanu Gollapudi break; 822*853e2bd2SBhanu Gollapudi } 823*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->tgt_lock); 824*853e2bd2SBhanu Gollapudi } 825*853e2bd2SBhanu Gollapudi 826*853e2bd2SBhanu Gollapudi struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) 827*853e2bd2SBhanu Gollapudi { 828*853e2bd2SBhanu Gollapudi struct bnx2fc_work *work; 829*853e2bd2SBhanu Gollapudi work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); 830*853e2bd2SBhanu Gollapudi if (!work) 831*853e2bd2SBhanu Gollapudi return NULL; 832*853e2bd2SBhanu Gollapudi 833*853e2bd2SBhanu Gollapudi INIT_LIST_HEAD(&work->list); 834*853e2bd2SBhanu Gollapudi work->tgt = tgt; 835*853e2bd2SBhanu Gollapudi work->wqe = wqe; 836*853e2bd2SBhanu Gollapudi return work; 837*853e2bd2SBhanu Gollapudi } 838*853e2bd2SBhanu Gollapudi 839*853e2bd2SBhanu Gollapudi int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 840*853e2bd2SBhanu Gollapudi { 841*853e2bd2SBhanu Gollapudi struct fcoe_cqe *cq; 842*853e2bd2SBhanu Gollapudi u32 cq_cons; 843*853e2bd2SBhanu Gollapudi struct fcoe_cqe *cqe; 844*853e2bd2SBhanu Gollapudi u16 wqe; 845*853e2bd2SBhanu Gollapudi bool more_cqes_found = false; 846*853e2bd2SBhanu Gollapudi 847*853e2bd2SBhanu Gollapudi /* 848*853e2bd2SBhanu Gollapudi * cq_lock is a low contention lock used to protect 849*853e2bd2SBhanu Gollapudi * the CQ data structure from being freed up during 850*853e2bd2SBhanu Gollapudi * the upload operation 851*853e2bd2SBhanu Gollapudi */ 852*853e2bd2SBhanu Gollapudi spin_lock_bh(&tgt->cq_lock); 853*853e2bd2SBhanu Gollapudi 854*853e2bd2SBhanu Gollapudi if (!tgt->cq) { 855*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); 856*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->cq_lock); 857*853e2bd2SBhanu Gollapudi return 0; 858*853e2bd2SBhanu Gollapudi } 859*853e2bd2SBhanu Gollapudi cq = tgt->cq; 860*853e2bd2SBhanu Gollapudi cq_cons = tgt->cq_cons_idx; 861*853e2bd2SBhanu Gollapudi cqe = &cq[cq_cons]; 862*853e2bd2SBhanu Gollapudi 863*853e2bd2SBhanu Gollapudi do { 864*853e2bd2SBhanu Gollapudi more_cqes_found ^= true; 865*853e2bd2SBhanu Gollapudi 866*853e2bd2SBhanu Gollapudi while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == 867*853e2bd2SBhanu Gollapudi (tgt->cq_curr_toggle_bit << 868*853e2bd2SBhanu Gollapudi FCOE_CQE_TOGGLE_BIT_SHIFT)) { 869*853e2bd2SBhanu Gollapudi 870*853e2bd2SBhanu Gollapudi /* new entry on the cq */ 871*853e2bd2SBhanu Gollapudi if (wqe & FCOE_CQE_CQE_TYPE) { 872*853e2bd2SBhanu Gollapudi /* Unsolicited event notification */ 873*853e2bd2SBhanu Gollapudi bnx2fc_process_unsol_compl(tgt, wqe); 874*853e2bd2SBhanu Gollapudi } else { 875*853e2bd2SBhanu Gollapudi struct bnx2fc_work *work = NULL; 876*853e2bd2SBhanu Gollapudi struct bnx2fc_percpu_s *fps = NULL; 877*853e2bd2SBhanu Gollapudi unsigned int cpu = wqe % num_possible_cpus(); 878*853e2bd2SBhanu Gollapudi 879*853e2bd2SBhanu Gollapudi fps = &per_cpu(bnx2fc_percpu, cpu); 880*853e2bd2SBhanu Gollapudi spin_lock_bh(&fps->fp_work_lock); 881*853e2bd2SBhanu Gollapudi if (unlikely(!fps->iothread)) 882*853e2bd2SBhanu Gollapudi goto unlock; 883*853e2bd2SBhanu Gollapudi 884*853e2bd2SBhanu Gollapudi work = bnx2fc_alloc_work(tgt, wqe); 885*853e2bd2SBhanu Gollapudi if (work) 886*853e2bd2SBhanu Gollapudi list_add_tail(&work->list, 887*853e2bd2SBhanu Gollapudi &fps->work_list); 888*853e2bd2SBhanu Gollapudi unlock: 889*853e2bd2SBhanu Gollapudi spin_unlock_bh(&fps->fp_work_lock); 890*853e2bd2SBhanu Gollapudi 891*853e2bd2SBhanu Gollapudi /* Pending work request completion */ 892*853e2bd2SBhanu Gollapudi if (fps->iothread && work) 893*853e2bd2SBhanu Gollapudi wake_up_process(fps->iothread); 894*853e2bd2SBhanu Gollapudi else 895*853e2bd2SBhanu Gollapudi bnx2fc_process_cq_compl(tgt, wqe); 896*853e2bd2SBhanu Gollapudi } 897*853e2bd2SBhanu Gollapudi cqe++; 898*853e2bd2SBhanu Gollapudi tgt->cq_cons_idx++; 899*853e2bd2SBhanu Gollapudi 900*853e2bd2SBhanu Gollapudi if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { 901*853e2bd2SBhanu Gollapudi tgt->cq_cons_idx = 0; 902*853e2bd2SBhanu Gollapudi cqe = cq; 903*853e2bd2SBhanu Gollapudi tgt->cq_curr_toggle_bit = 904*853e2bd2SBhanu Gollapudi 1 - tgt->cq_curr_toggle_bit; 905*853e2bd2SBhanu Gollapudi } 906*853e2bd2SBhanu Gollapudi } 907*853e2bd2SBhanu Gollapudi /* Re-arm CQ */ 908*853e2bd2SBhanu Gollapudi if (more_cqes_found) { 909*853e2bd2SBhanu Gollapudi tgt->conn_db->cq_arm.lo = -1; 910*853e2bd2SBhanu Gollapudi wmb(); 911*853e2bd2SBhanu Gollapudi } 912*853e2bd2SBhanu Gollapudi } while (more_cqes_found); 913*853e2bd2SBhanu Gollapudi 914*853e2bd2SBhanu Gollapudi /* 915*853e2bd2SBhanu Gollapudi * Commit tgt->cq_cons_idx change to the memory 916*853e2bd2SBhanu Gollapudi * spin_lock implies full memory barrier, no need to smp_wmb 917*853e2bd2SBhanu Gollapudi */ 918*853e2bd2SBhanu Gollapudi 919*853e2bd2SBhanu Gollapudi spin_unlock_bh(&tgt->cq_lock); 920*853e2bd2SBhanu Gollapudi return 0; 921*853e2bd2SBhanu Gollapudi } 922*853e2bd2SBhanu Gollapudi 923*853e2bd2SBhanu Gollapudi /** 924*853e2bd2SBhanu Gollapudi * bnx2fc_fastpath_notification - process global event queue (KCQ) 925*853e2bd2SBhanu Gollapudi * 926*853e2bd2SBhanu Gollapudi * @hba: adapter structure pointer 927*853e2bd2SBhanu Gollapudi * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry 928*853e2bd2SBhanu Gollapudi * 929*853e2bd2SBhanu Gollapudi * Fast path event notification handler 930*853e2bd2SBhanu Gollapudi */ 931*853e2bd2SBhanu Gollapudi static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, 932*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *new_cqe_kcqe) 933*853e2bd2SBhanu Gollapudi { 934*853e2bd2SBhanu Gollapudi u32 conn_id = new_cqe_kcqe->fcoe_conn_id; 935*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; 936*853e2bd2SBhanu Gollapudi 937*853e2bd2SBhanu Gollapudi if (!tgt) { 938*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); 939*853e2bd2SBhanu Gollapudi return; 940*853e2bd2SBhanu Gollapudi } 941*853e2bd2SBhanu Gollapudi 942*853e2bd2SBhanu Gollapudi bnx2fc_process_new_cqes(tgt); 943*853e2bd2SBhanu Gollapudi } 944*853e2bd2SBhanu Gollapudi 945*853e2bd2SBhanu Gollapudi /** 946*853e2bd2SBhanu Gollapudi * bnx2fc_process_ofld_cmpl - process FCoE session offload completion 947*853e2bd2SBhanu Gollapudi * 948*853e2bd2SBhanu Gollapudi * @hba: adapter structure pointer 949*853e2bd2SBhanu Gollapudi * @ofld_kcqe: connection offload kcqe pointer 950*853e2bd2SBhanu Gollapudi * 951*853e2bd2SBhanu Gollapudi * handle session offload completion, enable the session if offload is 952*853e2bd2SBhanu Gollapudi * successful. 953*853e2bd2SBhanu Gollapudi */ 954*853e2bd2SBhanu Gollapudi static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, 955*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *ofld_kcqe) 956*853e2bd2SBhanu Gollapudi { 957*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt; 958*853e2bd2SBhanu Gollapudi struct fcoe_port *port; 959*853e2bd2SBhanu Gollapudi u32 conn_id; 960*853e2bd2SBhanu Gollapudi u32 context_id; 961*853e2bd2SBhanu Gollapudi int rc; 962*853e2bd2SBhanu Gollapudi 963*853e2bd2SBhanu Gollapudi conn_id = ofld_kcqe->fcoe_conn_id; 964*853e2bd2SBhanu Gollapudi context_id = ofld_kcqe->fcoe_conn_context_id; 965*853e2bd2SBhanu Gollapudi tgt = hba->tgt_ofld_list[conn_id]; 966*853e2bd2SBhanu Gollapudi if (!tgt) { 967*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); 968*853e2bd2SBhanu Gollapudi return; 969*853e2bd2SBhanu Gollapudi } 970*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", 971*853e2bd2SBhanu Gollapudi ofld_kcqe->fcoe_conn_context_id); 972*853e2bd2SBhanu Gollapudi port = tgt->port; 973*853e2bd2SBhanu Gollapudi if (hba != tgt->port->priv) { 974*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 975*853e2bd2SBhanu Gollapudi goto ofld_cmpl_err; 976*853e2bd2SBhanu Gollapudi } 977*853e2bd2SBhanu Gollapudi /* 978*853e2bd2SBhanu Gollapudi * cnic has allocated a context_id for this session; use this 979*853e2bd2SBhanu Gollapudi * while enabling the session. 980*853e2bd2SBhanu Gollapudi */ 981*853e2bd2SBhanu Gollapudi tgt->context_id = context_id; 982*853e2bd2SBhanu Gollapudi if (ofld_kcqe->completion_status) { 983*853e2bd2SBhanu Gollapudi if (ofld_kcqe->completion_status == 984*853e2bd2SBhanu Gollapudi FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { 985*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to allocate FCoE context " 986*853e2bd2SBhanu Gollapudi "resources\n"); 987*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); 988*853e2bd2SBhanu Gollapudi } 989*853e2bd2SBhanu Gollapudi goto ofld_cmpl_err; 990*853e2bd2SBhanu Gollapudi } else { 991*853e2bd2SBhanu Gollapudi 992*853e2bd2SBhanu Gollapudi /* now enable the session */ 993*853e2bd2SBhanu Gollapudi rc = bnx2fc_send_session_enable_req(port, tgt); 994*853e2bd2SBhanu Gollapudi if (rc) { 995*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "enable session failed\n"); 996*853e2bd2SBhanu Gollapudi goto ofld_cmpl_err; 997*853e2bd2SBhanu Gollapudi } 998*853e2bd2SBhanu Gollapudi } 999*853e2bd2SBhanu Gollapudi return; 1000*853e2bd2SBhanu Gollapudi ofld_cmpl_err: 1001*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1002*853e2bd2SBhanu Gollapudi wake_up_interruptible(&tgt->ofld_wait); 1003*853e2bd2SBhanu Gollapudi } 1004*853e2bd2SBhanu Gollapudi 1005*853e2bd2SBhanu Gollapudi /** 1006*853e2bd2SBhanu Gollapudi * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion 1007*853e2bd2SBhanu Gollapudi * 1008*853e2bd2SBhanu Gollapudi * @hba: adapter structure pointer 1009*853e2bd2SBhanu Gollapudi * @ofld_kcqe: connection offload kcqe pointer 1010*853e2bd2SBhanu Gollapudi * 1011*853e2bd2SBhanu Gollapudi * handle session enable completion, mark the rport as ready 1012*853e2bd2SBhanu Gollapudi */ 1013*853e2bd2SBhanu Gollapudi 1014*853e2bd2SBhanu Gollapudi static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, 1015*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *ofld_kcqe) 1016*853e2bd2SBhanu Gollapudi { 1017*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt; 1018*853e2bd2SBhanu Gollapudi u32 conn_id; 1019*853e2bd2SBhanu Gollapudi u32 context_id; 1020*853e2bd2SBhanu Gollapudi 1021*853e2bd2SBhanu Gollapudi context_id = ofld_kcqe->fcoe_conn_context_id; 1022*853e2bd2SBhanu Gollapudi conn_id = ofld_kcqe->fcoe_conn_id; 1023*853e2bd2SBhanu Gollapudi tgt = hba->tgt_ofld_list[conn_id]; 1024*853e2bd2SBhanu Gollapudi if (!tgt) { 1025*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); 1026*853e2bd2SBhanu Gollapudi return; 1027*853e2bd2SBhanu Gollapudi } 1028*853e2bd2SBhanu Gollapudi 1029*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", 1030*853e2bd2SBhanu Gollapudi ofld_kcqe->fcoe_conn_context_id); 1031*853e2bd2SBhanu Gollapudi 1032*853e2bd2SBhanu Gollapudi /* 1033*853e2bd2SBhanu Gollapudi * context_id should be the same for this target during offload 1034*853e2bd2SBhanu Gollapudi * and enable 1035*853e2bd2SBhanu Gollapudi */ 1036*853e2bd2SBhanu Gollapudi if (tgt->context_id != context_id) { 1037*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "context id mis-match\n"); 1038*853e2bd2SBhanu Gollapudi return; 1039*853e2bd2SBhanu Gollapudi } 1040*853e2bd2SBhanu Gollapudi if (hba != tgt->port->priv) { 1041*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1042*853e2bd2SBhanu Gollapudi goto enbl_cmpl_err; 1043*853e2bd2SBhanu Gollapudi } 1044*853e2bd2SBhanu Gollapudi if (ofld_kcqe->completion_status) { 1045*853e2bd2SBhanu Gollapudi goto enbl_cmpl_err; 1046*853e2bd2SBhanu Gollapudi } else { 1047*853e2bd2SBhanu Gollapudi /* enable successful - rport ready for issuing IOs */ 1048*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1049*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1050*853e2bd2SBhanu Gollapudi wake_up_interruptible(&tgt->ofld_wait); 1051*853e2bd2SBhanu Gollapudi } 1052*853e2bd2SBhanu Gollapudi return; 1053*853e2bd2SBhanu Gollapudi 1054*853e2bd2SBhanu Gollapudi enbl_cmpl_err: 1055*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1056*853e2bd2SBhanu Gollapudi wake_up_interruptible(&tgt->ofld_wait); 1057*853e2bd2SBhanu Gollapudi } 1058*853e2bd2SBhanu Gollapudi 1059*853e2bd2SBhanu Gollapudi static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, 1060*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *disable_kcqe) 1061*853e2bd2SBhanu Gollapudi { 1062*853e2bd2SBhanu Gollapudi 1063*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt; 1064*853e2bd2SBhanu Gollapudi u32 conn_id; 1065*853e2bd2SBhanu Gollapudi 1066*853e2bd2SBhanu Gollapudi conn_id = disable_kcqe->fcoe_conn_id; 1067*853e2bd2SBhanu Gollapudi tgt = hba->tgt_ofld_list[conn_id]; 1068*853e2bd2SBhanu Gollapudi if (!tgt) { 1069*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); 1070*853e2bd2SBhanu Gollapudi return; 1071*853e2bd2SBhanu Gollapudi } 1072*853e2bd2SBhanu Gollapudi 1073*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); 1074*853e2bd2SBhanu Gollapudi 1075*853e2bd2SBhanu Gollapudi if (disable_kcqe->completion_status) { 1076*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", 1077*853e2bd2SBhanu Gollapudi disable_kcqe->completion_status); 1078*853e2bd2SBhanu Gollapudi return; 1079*853e2bd2SBhanu Gollapudi } else { 1080*853e2bd2SBhanu Gollapudi /* disable successful */ 1081*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "disable successful\n"); 1082*853e2bd2SBhanu Gollapudi clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1083*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); 1084*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 1085*853e2bd2SBhanu Gollapudi wake_up_interruptible(&tgt->upld_wait); 1086*853e2bd2SBhanu Gollapudi } 1087*853e2bd2SBhanu Gollapudi } 1088*853e2bd2SBhanu Gollapudi 1089*853e2bd2SBhanu Gollapudi static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 1090*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *destroy_kcqe) 1091*853e2bd2SBhanu Gollapudi { 1092*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt; 1093*853e2bd2SBhanu Gollapudi u32 conn_id; 1094*853e2bd2SBhanu Gollapudi 1095*853e2bd2SBhanu Gollapudi conn_id = destroy_kcqe->fcoe_conn_id; 1096*853e2bd2SBhanu Gollapudi tgt = hba->tgt_ofld_list[conn_id]; 1097*853e2bd2SBhanu Gollapudi if (!tgt) { 1098*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); 1099*853e2bd2SBhanu Gollapudi return; 1100*853e2bd2SBhanu Gollapudi } 1101*853e2bd2SBhanu Gollapudi 1102*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); 1103*853e2bd2SBhanu Gollapudi 1104*853e2bd2SBhanu Gollapudi if (destroy_kcqe->completion_status) { 1105*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", 1106*853e2bd2SBhanu Gollapudi destroy_kcqe->completion_status); 1107*853e2bd2SBhanu Gollapudi return; 1108*853e2bd2SBhanu Gollapudi } else { 1109*853e2bd2SBhanu Gollapudi /* destroy successful */ 1110*853e2bd2SBhanu Gollapudi BNX2FC_TGT_DBG(tgt, "upload successful\n"); 1111*853e2bd2SBhanu Gollapudi clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); 1112*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); 1113*853e2bd2SBhanu Gollapudi set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 1114*853e2bd2SBhanu Gollapudi wake_up_interruptible(&tgt->upld_wait); 1115*853e2bd2SBhanu Gollapudi } 1116*853e2bd2SBhanu Gollapudi } 1117*853e2bd2SBhanu Gollapudi 1118*853e2bd2SBhanu Gollapudi static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) 1119*853e2bd2SBhanu Gollapudi { 1120*853e2bd2SBhanu Gollapudi switch (err_code) { 1121*853e2bd2SBhanu Gollapudi case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: 1122*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); 1123*853e2bd2SBhanu Gollapudi break; 1124*853e2bd2SBhanu Gollapudi 1125*853e2bd2SBhanu Gollapudi case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: 1126*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); 1127*853e2bd2SBhanu Gollapudi break; 1128*853e2bd2SBhanu Gollapudi 1129*853e2bd2SBhanu Gollapudi case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: 1130*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "init_failure due to NIC error\n"); 1131*853e2bd2SBhanu Gollapudi break; 1132*853e2bd2SBhanu Gollapudi 1133*853e2bd2SBhanu Gollapudi default: 1134*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); 1135*853e2bd2SBhanu Gollapudi } 1136*853e2bd2SBhanu Gollapudi } 1137*853e2bd2SBhanu Gollapudi 1138*853e2bd2SBhanu Gollapudi /** 1139*853e2bd2SBhanu Gollapudi * bnx2fc_indicae_kcqe - process KCQE 1140*853e2bd2SBhanu Gollapudi * 1141*853e2bd2SBhanu Gollapudi * @hba: adapter structure pointer 1142*853e2bd2SBhanu Gollapudi * @kcqe: kcqe pointer 1143*853e2bd2SBhanu Gollapudi * @num_cqe: Number of completion queue elements 1144*853e2bd2SBhanu Gollapudi * 1145*853e2bd2SBhanu Gollapudi * Generic KCQ event handler 1146*853e2bd2SBhanu Gollapudi */ 1147*853e2bd2SBhanu Gollapudi void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], 1148*853e2bd2SBhanu Gollapudi u32 num_cqe) 1149*853e2bd2SBhanu Gollapudi { 1150*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; 1151*853e2bd2SBhanu Gollapudi int i = 0; 1152*853e2bd2SBhanu Gollapudi struct fcoe_kcqe *kcqe = NULL; 1153*853e2bd2SBhanu Gollapudi 1154*853e2bd2SBhanu Gollapudi while (i < num_cqe) { 1155*853e2bd2SBhanu Gollapudi kcqe = (struct fcoe_kcqe *) kcq[i++]; 1156*853e2bd2SBhanu Gollapudi 1157*853e2bd2SBhanu Gollapudi switch (kcqe->op_code) { 1158*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: 1159*853e2bd2SBhanu Gollapudi bnx2fc_fastpath_notification(hba, kcqe); 1160*853e2bd2SBhanu Gollapudi break; 1161*853e2bd2SBhanu Gollapudi 1162*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_OFFLOAD_CONN: 1163*853e2bd2SBhanu Gollapudi bnx2fc_process_ofld_cmpl(hba, kcqe); 1164*853e2bd2SBhanu Gollapudi break; 1165*853e2bd2SBhanu Gollapudi 1166*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_ENABLE_CONN: 1167*853e2bd2SBhanu Gollapudi bnx2fc_process_enable_conn_cmpl(hba, kcqe); 1168*853e2bd2SBhanu Gollapudi break; 1169*853e2bd2SBhanu Gollapudi 1170*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_INIT_FUNC: 1171*853e2bd2SBhanu Gollapudi if (kcqe->completion_status != 1172*853e2bd2SBhanu Gollapudi FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { 1173*853e2bd2SBhanu Gollapudi bnx2fc_init_failure(hba, 1174*853e2bd2SBhanu Gollapudi kcqe->completion_status); 1175*853e2bd2SBhanu Gollapudi } else { 1176*853e2bd2SBhanu Gollapudi set_bit(ADAPTER_STATE_UP, &hba->adapter_state); 1177*853e2bd2SBhanu Gollapudi bnx2fc_get_link_state(hba); 1178*853e2bd2SBhanu Gollapudi printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", 1179*853e2bd2SBhanu Gollapudi (u8)hba->pcidev->bus->number); 1180*853e2bd2SBhanu Gollapudi } 1181*853e2bd2SBhanu Gollapudi break; 1182*853e2bd2SBhanu Gollapudi 1183*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_DESTROY_FUNC: 1184*853e2bd2SBhanu Gollapudi if (kcqe->completion_status != 1185*853e2bd2SBhanu Gollapudi FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { 1186*853e2bd2SBhanu Gollapudi 1187*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "DESTROY failed\n"); 1188*853e2bd2SBhanu Gollapudi } else { 1189*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "DESTROY success\n"); 1190*853e2bd2SBhanu Gollapudi } 1191*853e2bd2SBhanu Gollapudi hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 1192*853e2bd2SBhanu Gollapudi wake_up_interruptible(&hba->destroy_wait); 1193*853e2bd2SBhanu Gollapudi break; 1194*853e2bd2SBhanu Gollapudi 1195*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_DISABLE_CONN: 1196*853e2bd2SBhanu Gollapudi bnx2fc_process_conn_disable_cmpl(hba, kcqe); 1197*853e2bd2SBhanu Gollapudi break; 1198*853e2bd2SBhanu Gollapudi 1199*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_DESTROY_CONN: 1200*853e2bd2SBhanu Gollapudi bnx2fc_process_conn_destroy_cmpl(hba, kcqe); 1201*853e2bd2SBhanu Gollapudi break; 1202*853e2bd2SBhanu Gollapudi 1203*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_STAT_FUNC: 1204*853e2bd2SBhanu Gollapudi if (kcqe->completion_status != 1205*853e2bd2SBhanu Gollapudi FCOE_KCQE_COMPLETION_STATUS_SUCCESS) 1206*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "STAT failed\n"); 1207*853e2bd2SBhanu Gollapudi complete(&hba->stat_req_done); 1208*853e2bd2SBhanu Gollapudi break; 1209*853e2bd2SBhanu Gollapudi 1210*853e2bd2SBhanu Gollapudi case FCOE_KCQE_OPCODE_FCOE_ERROR: 1211*853e2bd2SBhanu Gollapudi /* fall thru */ 1212*853e2bd2SBhanu Gollapudi default: 1213*853e2bd2SBhanu Gollapudi printk(KERN_ALERT PFX "unknown opcode 0x%x\n", 1214*853e2bd2SBhanu Gollapudi kcqe->op_code); 1215*853e2bd2SBhanu Gollapudi } 1216*853e2bd2SBhanu Gollapudi } 1217*853e2bd2SBhanu Gollapudi } 1218*853e2bd2SBhanu Gollapudi 1219*853e2bd2SBhanu Gollapudi void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) 1220*853e2bd2SBhanu Gollapudi { 1221*853e2bd2SBhanu Gollapudi struct fcoe_sqe *sqe; 1222*853e2bd2SBhanu Gollapudi 1223*853e2bd2SBhanu Gollapudi sqe = &tgt->sq[tgt->sq_prod_idx]; 1224*853e2bd2SBhanu Gollapudi 1225*853e2bd2SBhanu Gollapudi /* Fill SQ WQE */ 1226*853e2bd2SBhanu Gollapudi sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; 1227*853e2bd2SBhanu Gollapudi sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; 1228*853e2bd2SBhanu Gollapudi 1229*853e2bd2SBhanu Gollapudi /* Advance SQ Prod Idx */ 1230*853e2bd2SBhanu Gollapudi if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { 1231*853e2bd2SBhanu Gollapudi tgt->sq_prod_idx = 0; 1232*853e2bd2SBhanu Gollapudi tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; 1233*853e2bd2SBhanu Gollapudi } 1234*853e2bd2SBhanu Gollapudi } 1235*853e2bd2SBhanu Gollapudi 1236*853e2bd2SBhanu Gollapudi void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) 1237*853e2bd2SBhanu Gollapudi { 1238*853e2bd2SBhanu Gollapudi struct b577xx_doorbell_set_prod ev_doorbell; 1239*853e2bd2SBhanu Gollapudi u32 msg; 1240*853e2bd2SBhanu Gollapudi 1241*853e2bd2SBhanu Gollapudi wmb(); 1242*853e2bd2SBhanu Gollapudi 1243*853e2bd2SBhanu Gollapudi memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod)); 1244*853e2bd2SBhanu Gollapudi ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE; 1245*853e2bd2SBhanu Gollapudi 1246*853e2bd2SBhanu Gollapudi ev_doorbell.prod = tgt->sq_prod_idx | 1247*853e2bd2SBhanu Gollapudi (tgt->sq_curr_toggle_bit << 15); 1248*853e2bd2SBhanu Gollapudi ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << 1249*853e2bd2SBhanu Gollapudi B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; 1250*853e2bd2SBhanu Gollapudi msg = *((u32 *)&ev_doorbell); 1251*853e2bd2SBhanu Gollapudi writel(cpu_to_le32(msg), tgt->ctx_base); 1252*853e2bd2SBhanu Gollapudi 1253*853e2bd2SBhanu Gollapudi mmiowb(); 1254*853e2bd2SBhanu Gollapudi 1255*853e2bd2SBhanu Gollapudi } 1256*853e2bd2SBhanu Gollapudi 1257*853e2bd2SBhanu Gollapudi int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) 1258*853e2bd2SBhanu Gollapudi { 1259*853e2bd2SBhanu Gollapudi u32 context_id = tgt->context_id; 1260*853e2bd2SBhanu Gollapudi struct fcoe_port *port = tgt->port; 1261*853e2bd2SBhanu Gollapudi u32 reg_off; 1262*853e2bd2SBhanu Gollapudi resource_size_t reg_base; 1263*853e2bd2SBhanu Gollapudi struct bnx2fc_hba *hba = port->priv; 1264*853e2bd2SBhanu Gollapudi 1265*853e2bd2SBhanu Gollapudi reg_base = pci_resource_start(hba->pcidev, 1266*853e2bd2SBhanu Gollapudi BNX2X_DOORBELL_PCI_BAR); 1267*853e2bd2SBhanu Gollapudi reg_off = BNX2FC_5771X_DB_PAGE_SIZE * 1268*853e2bd2SBhanu Gollapudi (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; 1269*853e2bd2SBhanu Gollapudi tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); 1270*853e2bd2SBhanu Gollapudi if (!tgt->ctx_base) 1271*853e2bd2SBhanu Gollapudi return -ENOMEM; 1272*853e2bd2SBhanu Gollapudi return 0; 1273*853e2bd2SBhanu Gollapudi } 1274*853e2bd2SBhanu Gollapudi 1275*853e2bd2SBhanu Gollapudi char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) 1276*853e2bd2SBhanu Gollapudi { 1277*853e2bd2SBhanu Gollapudi char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); 1278*853e2bd2SBhanu Gollapudi 1279*853e2bd2SBhanu Gollapudi if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) 1280*853e2bd2SBhanu Gollapudi return NULL; 1281*853e2bd2SBhanu Gollapudi 1282*853e2bd2SBhanu Gollapudi tgt->rq_cons_idx += num_items; 1283*853e2bd2SBhanu Gollapudi 1284*853e2bd2SBhanu Gollapudi if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) 1285*853e2bd2SBhanu Gollapudi tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; 1286*853e2bd2SBhanu Gollapudi 1287*853e2bd2SBhanu Gollapudi return buf; 1288*853e2bd2SBhanu Gollapudi } 1289*853e2bd2SBhanu Gollapudi 1290*853e2bd2SBhanu Gollapudi void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) 1291*853e2bd2SBhanu Gollapudi { 1292*853e2bd2SBhanu Gollapudi /* return the rq buffer */ 1293*853e2bd2SBhanu Gollapudi u32 next_prod_idx = tgt->rq_prod_idx + num_items; 1294*853e2bd2SBhanu Gollapudi if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { 1295*853e2bd2SBhanu Gollapudi /* Wrap around RQ */ 1296*853e2bd2SBhanu Gollapudi next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; 1297*853e2bd2SBhanu Gollapudi } 1298*853e2bd2SBhanu Gollapudi tgt->rq_prod_idx = next_prod_idx; 1299*853e2bd2SBhanu Gollapudi tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1300*853e2bd2SBhanu Gollapudi } 1301*853e2bd2SBhanu Gollapudi 1302*853e2bd2SBhanu Gollapudi void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1303*853e2bd2SBhanu Gollapudi struct fcoe_task_ctx_entry *task, 1304*853e2bd2SBhanu Gollapudi u16 orig_xid) 1305*853e2bd2SBhanu Gollapudi { 1306*853e2bd2SBhanu Gollapudi u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; 1307*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt = io_req->tgt; 1308*853e2bd2SBhanu Gollapudi u32 context_id = tgt->context_id; 1309*853e2bd2SBhanu Gollapudi 1310*853e2bd2SBhanu Gollapudi memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1311*853e2bd2SBhanu Gollapudi 1312*853e2bd2SBhanu Gollapudi /* Tx Write Rx Read */ 1313*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << 1314*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; 1315*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags = task_type << 1316*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; 1317*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1318*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; 1319*853e2bd2SBhanu Gollapudi /* Common */ 1320*853e2bd2SBhanu Gollapudi task->cmn.common_flags = context_id << 1321*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; 1322*853e2bd2SBhanu Gollapudi task->cmn.general.cleanup_info.task_id = orig_xid; 1323*853e2bd2SBhanu Gollapudi 1324*853e2bd2SBhanu Gollapudi 1325*853e2bd2SBhanu Gollapudi } 1326*853e2bd2SBhanu Gollapudi 1327*853e2bd2SBhanu Gollapudi void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 1328*853e2bd2SBhanu Gollapudi struct fcoe_task_ctx_entry *task) 1329*853e2bd2SBhanu Gollapudi { 1330*853e2bd2SBhanu Gollapudi struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 1331*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt = io_req->tgt; 1332*853e2bd2SBhanu Gollapudi struct fc_frame_header *fc_hdr; 1333*853e2bd2SBhanu Gollapudi u8 task_type = 0; 1334*853e2bd2SBhanu Gollapudi u64 *hdr; 1335*853e2bd2SBhanu Gollapudi u64 temp_hdr[3]; 1336*853e2bd2SBhanu Gollapudi u32 context_id; 1337*853e2bd2SBhanu Gollapudi 1338*853e2bd2SBhanu Gollapudi 1339*853e2bd2SBhanu Gollapudi /* Obtain task_type */ 1340*853e2bd2SBhanu Gollapudi if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || 1341*853e2bd2SBhanu Gollapudi (io_req->cmd_type == BNX2FC_ELS)) { 1342*853e2bd2SBhanu Gollapudi task_type = FCOE_TASK_TYPE_MIDPATH; 1343*853e2bd2SBhanu Gollapudi } else if (io_req->cmd_type == BNX2FC_ABTS) { 1344*853e2bd2SBhanu Gollapudi task_type = FCOE_TASK_TYPE_ABTS; 1345*853e2bd2SBhanu Gollapudi } 1346*853e2bd2SBhanu Gollapudi 1347*853e2bd2SBhanu Gollapudi memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1348*853e2bd2SBhanu Gollapudi 1349*853e2bd2SBhanu Gollapudi /* Setup the task from io_req for easy reference */ 1350*853e2bd2SBhanu Gollapudi io_req->task = task; 1351*853e2bd2SBhanu Gollapudi 1352*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", 1353*853e2bd2SBhanu Gollapudi io_req->cmd_type, task_type); 1354*853e2bd2SBhanu Gollapudi 1355*853e2bd2SBhanu Gollapudi /* Tx only */ 1356*853e2bd2SBhanu Gollapudi if ((task_type == FCOE_TASK_TYPE_MIDPATH) || 1357*853e2bd2SBhanu Gollapudi (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { 1358*853e2bd2SBhanu Gollapudi task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1359*853e2bd2SBhanu Gollapudi (u32)mp_req->mp_req_bd_dma; 1360*853e2bd2SBhanu Gollapudi task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = 1361*853e2bd2SBhanu Gollapudi (u32)((u64)mp_req->mp_req_bd_dma >> 32); 1362*853e2bd2SBhanu Gollapudi task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; 1363*853e2bd2SBhanu Gollapudi BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n", 1364*853e2bd2SBhanu Gollapudi (unsigned long long)mp_req->mp_req_bd_dma); 1365*853e2bd2SBhanu Gollapudi } 1366*853e2bd2SBhanu Gollapudi 1367*853e2bd2SBhanu Gollapudi /* Tx Write Rx Read */ 1368*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << 1369*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; 1370*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags = task_type << 1371*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; 1372*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << 1373*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; 1374*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1375*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; 1376*853e2bd2SBhanu Gollapudi 1377*853e2bd2SBhanu Gollapudi /* Common */ 1378*853e2bd2SBhanu Gollapudi task->cmn.data_2_trns = io_req->data_xfer_len; 1379*853e2bd2SBhanu Gollapudi context_id = tgt->context_id; 1380*853e2bd2SBhanu Gollapudi task->cmn.common_flags = context_id << 1381*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; 1382*853e2bd2SBhanu Gollapudi task->cmn.common_flags |= 1 << 1383*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; 1384*853e2bd2SBhanu Gollapudi task->cmn.common_flags |= 1 << 1385*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; 1386*853e2bd2SBhanu Gollapudi 1387*853e2bd2SBhanu Gollapudi /* Rx Write Tx Read */ 1388*853e2bd2SBhanu Gollapudi fc_hdr = &(mp_req->req_fc_hdr); 1389*853e2bd2SBhanu Gollapudi if (task_type == FCOE_TASK_TYPE_MIDPATH) { 1390*853e2bd2SBhanu Gollapudi fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); 1391*853e2bd2SBhanu Gollapudi fc_hdr->fh_rx_id = htons(0xffff); 1392*853e2bd2SBhanu Gollapudi task->rx_wr_tx_rd.rx_id = 0xffff; 1393*853e2bd2SBhanu Gollapudi } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { 1394*853e2bd2SBhanu Gollapudi fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); 1395*853e2bd2SBhanu Gollapudi } 1396*853e2bd2SBhanu Gollapudi 1397*853e2bd2SBhanu Gollapudi /* Fill FC Header into middle path buffer */ 1398*853e2bd2SBhanu Gollapudi hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; 1399*853e2bd2SBhanu Gollapudi memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); 1400*853e2bd2SBhanu Gollapudi hdr[0] = cpu_to_be64(temp_hdr[0]); 1401*853e2bd2SBhanu Gollapudi hdr[1] = cpu_to_be64(temp_hdr[1]); 1402*853e2bd2SBhanu Gollapudi hdr[2] = cpu_to_be64(temp_hdr[2]); 1403*853e2bd2SBhanu Gollapudi 1404*853e2bd2SBhanu Gollapudi /* Rx Only */ 1405*853e2bd2SBhanu Gollapudi if (task_type == FCOE_TASK_TYPE_MIDPATH) { 1406*853e2bd2SBhanu Gollapudi 1407*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1408*853e2bd2SBhanu Gollapudi (u32)mp_req->mp_resp_bd_dma; 1409*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = 1410*853e2bd2SBhanu Gollapudi (u32)((u64)mp_req->mp_resp_bd_dma >> 32); 1411*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; 1412*853e2bd2SBhanu Gollapudi } 1413*853e2bd2SBhanu Gollapudi } 1414*853e2bd2SBhanu Gollapudi 1415*853e2bd2SBhanu Gollapudi void bnx2fc_init_task(struct bnx2fc_cmd *io_req, 1416*853e2bd2SBhanu Gollapudi struct fcoe_task_ctx_entry *task) 1417*853e2bd2SBhanu Gollapudi { 1418*853e2bd2SBhanu Gollapudi u8 task_type; 1419*853e2bd2SBhanu Gollapudi struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1420*853e2bd2SBhanu Gollapudi struct io_bdt *bd_tbl = io_req->bd_tbl; 1421*853e2bd2SBhanu Gollapudi struct bnx2fc_rport *tgt = io_req->tgt; 1422*853e2bd2SBhanu Gollapudi u64 *fcp_cmnd; 1423*853e2bd2SBhanu Gollapudi u64 tmp_fcp_cmnd[4]; 1424*853e2bd2SBhanu Gollapudi u32 context_id; 1425*853e2bd2SBhanu Gollapudi int cnt, i; 1426*853e2bd2SBhanu Gollapudi int bd_count; 1427*853e2bd2SBhanu Gollapudi 1428*853e2bd2SBhanu Gollapudi memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1429*853e2bd2SBhanu Gollapudi 1430*853e2bd2SBhanu Gollapudi /* Setup the task from io_req for easy reference */ 1431*853e2bd2SBhanu Gollapudi io_req->task = task; 1432*853e2bd2SBhanu Gollapudi 1433*853e2bd2SBhanu Gollapudi if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 1434*853e2bd2SBhanu Gollapudi task_type = FCOE_TASK_TYPE_WRITE; 1435*853e2bd2SBhanu Gollapudi else 1436*853e2bd2SBhanu Gollapudi task_type = FCOE_TASK_TYPE_READ; 1437*853e2bd2SBhanu Gollapudi 1438*853e2bd2SBhanu Gollapudi /* Tx only */ 1439*853e2bd2SBhanu Gollapudi if (task_type == FCOE_TASK_TYPE_WRITE) { 1440*853e2bd2SBhanu Gollapudi task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1441*853e2bd2SBhanu Gollapudi (u32)bd_tbl->bd_tbl_dma; 1442*853e2bd2SBhanu Gollapudi task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = 1443*853e2bd2SBhanu Gollapudi (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1444*853e2bd2SBhanu Gollapudi task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1445*853e2bd2SBhanu Gollapudi bd_tbl->bd_valid; 1446*853e2bd2SBhanu Gollapudi } 1447*853e2bd2SBhanu Gollapudi 1448*853e2bd2SBhanu Gollapudi /*Tx Write Rx Read */ 1449*853e2bd2SBhanu Gollapudi /* Init state to NORMAL */ 1450*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << 1451*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; 1452*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags = task_type << 1453*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; 1454*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << 1455*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; 1456*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1457*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; 1458*853e2bd2SBhanu Gollapudi 1459*853e2bd2SBhanu Gollapudi /* Common */ 1460*853e2bd2SBhanu Gollapudi task->cmn.data_2_trns = io_req->data_xfer_len; 1461*853e2bd2SBhanu Gollapudi context_id = tgt->context_id; 1462*853e2bd2SBhanu Gollapudi task->cmn.common_flags = context_id << 1463*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; 1464*853e2bd2SBhanu Gollapudi task->cmn.common_flags |= 1 << 1465*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; 1466*853e2bd2SBhanu Gollapudi task->cmn.common_flags |= 1 << 1467*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; 1468*853e2bd2SBhanu Gollapudi 1469*853e2bd2SBhanu Gollapudi /* Set initiative ownership */ 1470*853e2bd2SBhanu Gollapudi task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT; 1471*853e2bd2SBhanu Gollapudi 1472*853e2bd2SBhanu Gollapudi /* Set initial seq counter */ 1473*853e2bd2SBhanu Gollapudi task->cmn.tx_low_seq_cnt = 1; 1474*853e2bd2SBhanu Gollapudi 1475*853e2bd2SBhanu Gollapudi /* Set state to "waiting for the first packet" */ 1476*853e2bd2SBhanu Gollapudi task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME; 1477*853e2bd2SBhanu Gollapudi 1478*853e2bd2SBhanu Gollapudi /* Fill FCP_CMND IU */ 1479*853e2bd2SBhanu Gollapudi fcp_cmnd = (u64 *) 1480*853e2bd2SBhanu Gollapudi task->cmn.general.cmd_info.fcp_cmd_payload.opaque; 1481*853e2bd2SBhanu Gollapudi bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); 1482*853e2bd2SBhanu Gollapudi 1483*853e2bd2SBhanu Gollapudi /* swap fcp_cmnd */ 1484*853e2bd2SBhanu Gollapudi cnt = sizeof(struct fcp_cmnd) / sizeof(u64); 1485*853e2bd2SBhanu Gollapudi 1486*853e2bd2SBhanu Gollapudi for (i = 0; i < cnt; i++) { 1487*853e2bd2SBhanu Gollapudi *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); 1488*853e2bd2SBhanu Gollapudi fcp_cmnd++; 1489*853e2bd2SBhanu Gollapudi } 1490*853e2bd2SBhanu Gollapudi 1491*853e2bd2SBhanu Gollapudi /* Rx Write Tx Read */ 1492*853e2bd2SBhanu Gollapudi task->rx_wr_tx_rd.rx_id = 0xffff; 1493*853e2bd2SBhanu Gollapudi 1494*853e2bd2SBhanu Gollapudi /* Rx Only */ 1495*853e2bd2SBhanu Gollapudi if (task_type == FCOE_TASK_TYPE_READ) { 1496*853e2bd2SBhanu Gollapudi 1497*853e2bd2SBhanu Gollapudi bd_count = bd_tbl->bd_valid; 1498*853e2bd2SBhanu Gollapudi if (bd_count == 1) { 1499*853e2bd2SBhanu Gollapudi 1500*853e2bd2SBhanu Gollapudi struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1501*853e2bd2SBhanu Gollapudi 1502*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = 1503*853e2bd2SBhanu Gollapudi fcoe_bd_tbl->buf_addr_lo; 1504*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = 1505*853e2bd2SBhanu Gollapudi fcoe_bd_tbl->buf_addr_hi; 1506*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = 1507*853e2bd2SBhanu Gollapudi fcoe_bd_tbl->buf_len; 1508*853e2bd2SBhanu Gollapudi task->tx_wr_rx_rd.init_flags |= 1 << 1509*853e2bd2SBhanu Gollapudi FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; 1510*853e2bd2SBhanu Gollapudi } else { 1511*853e2bd2SBhanu Gollapudi 1512*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = 1513*853e2bd2SBhanu Gollapudi (u32)bd_tbl->bd_tbl_dma; 1514*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = 1515*853e2bd2SBhanu Gollapudi (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1516*853e2bd2SBhanu Gollapudi task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1517*853e2bd2SBhanu Gollapudi bd_tbl->bd_valid; 1518*853e2bd2SBhanu Gollapudi } 1519*853e2bd2SBhanu Gollapudi } 1520*853e2bd2SBhanu Gollapudi } 1521*853e2bd2SBhanu Gollapudi 1522*853e2bd2SBhanu Gollapudi /** 1523*853e2bd2SBhanu Gollapudi * bnx2fc_setup_task_ctx - allocate and map task context 1524*853e2bd2SBhanu Gollapudi * 1525*853e2bd2SBhanu Gollapudi * @hba: pointer to adapter structure 1526*853e2bd2SBhanu Gollapudi * 1527*853e2bd2SBhanu Gollapudi * allocate memory for task context, and associated BD table to be used 1528*853e2bd2SBhanu Gollapudi * by firmware 1529*853e2bd2SBhanu Gollapudi * 1530*853e2bd2SBhanu Gollapudi */ 1531*853e2bd2SBhanu Gollapudi int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) 1532*853e2bd2SBhanu Gollapudi { 1533*853e2bd2SBhanu Gollapudi int rc = 0; 1534*853e2bd2SBhanu Gollapudi struct regpair *task_ctx_bdt; 1535*853e2bd2SBhanu Gollapudi dma_addr_t addr; 1536*853e2bd2SBhanu Gollapudi int i; 1537*853e2bd2SBhanu Gollapudi 1538*853e2bd2SBhanu Gollapudi /* 1539*853e2bd2SBhanu Gollapudi * Allocate task context bd table. A page size of bd table 1540*853e2bd2SBhanu Gollapudi * can map 256 buffers. Each buffer contains 32 task context 1541*853e2bd2SBhanu Gollapudi * entries. Hence the limit with one page is 8192 task context 1542*853e2bd2SBhanu Gollapudi * entries. 1543*853e2bd2SBhanu Gollapudi */ 1544*853e2bd2SBhanu Gollapudi hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 1545*853e2bd2SBhanu Gollapudi PAGE_SIZE, 1546*853e2bd2SBhanu Gollapudi &hba->task_ctx_bd_dma, 1547*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1548*853e2bd2SBhanu Gollapudi if (!hba->task_ctx_bd_tbl) { 1549*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1550*853e2bd2SBhanu Gollapudi rc = -1; 1551*853e2bd2SBhanu Gollapudi goto out; 1552*853e2bd2SBhanu Gollapudi } 1553*853e2bd2SBhanu Gollapudi memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); 1554*853e2bd2SBhanu Gollapudi 1555*853e2bd2SBhanu Gollapudi /* 1556*853e2bd2SBhanu Gollapudi * Allocate task_ctx which is an array of pointers pointing to 1557*853e2bd2SBhanu Gollapudi * a page containing 32 task contexts 1558*853e2bd2SBhanu Gollapudi */ 1559*853e2bd2SBhanu Gollapudi hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), 1560*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1561*853e2bd2SBhanu Gollapudi if (!hba->task_ctx) { 1562*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to allocate task context array\n"); 1563*853e2bd2SBhanu Gollapudi rc = -1; 1564*853e2bd2SBhanu Gollapudi goto out1; 1565*853e2bd2SBhanu Gollapudi } 1566*853e2bd2SBhanu Gollapudi 1567*853e2bd2SBhanu Gollapudi /* 1568*853e2bd2SBhanu Gollapudi * Allocate task_ctx_dma which is an array of dma addresses 1569*853e2bd2SBhanu Gollapudi */ 1570*853e2bd2SBhanu Gollapudi hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * 1571*853e2bd2SBhanu Gollapudi sizeof(dma_addr_t)), GFP_KERNEL); 1572*853e2bd2SBhanu Gollapudi if (!hba->task_ctx_dma) { 1573*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to alloc context mapping array\n"); 1574*853e2bd2SBhanu Gollapudi rc = -1; 1575*853e2bd2SBhanu Gollapudi goto out2; 1576*853e2bd2SBhanu Gollapudi } 1577*853e2bd2SBhanu Gollapudi 1578*853e2bd2SBhanu Gollapudi task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1579*853e2bd2SBhanu Gollapudi for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1580*853e2bd2SBhanu Gollapudi 1581*853e2bd2SBhanu Gollapudi hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1582*853e2bd2SBhanu Gollapudi PAGE_SIZE, 1583*853e2bd2SBhanu Gollapudi &hba->task_ctx_dma[i], 1584*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1585*853e2bd2SBhanu Gollapudi if (!hba->task_ctx[i]) { 1586*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to alloc task context\n"); 1587*853e2bd2SBhanu Gollapudi rc = -1; 1588*853e2bd2SBhanu Gollapudi goto out3; 1589*853e2bd2SBhanu Gollapudi } 1590*853e2bd2SBhanu Gollapudi memset(hba->task_ctx[i], 0, PAGE_SIZE); 1591*853e2bd2SBhanu Gollapudi addr = (u64)hba->task_ctx_dma[i]; 1592*853e2bd2SBhanu Gollapudi task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); 1593*853e2bd2SBhanu Gollapudi task_ctx_bdt->lo = cpu_to_le32((u32)addr); 1594*853e2bd2SBhanu Gollapudi task_ctx_bdt++; 1595*853e2bd2SBhanu Gollapudi } 1596*853e2bd2SBhanu Gollapudi return 0; 1597*853e2bd2SBhanu Gollapudi 1598*853e2bd2SBhanu Gollapudi out3: 1599*853e2bd2SBhanu Gollapudi for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1600*853e2bd2SBhanu Gollapudi if (hba->task_ctx[i]) { 1601*853e2bd2SBhanu Gollapudi 1602*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1603*853e2bd2SBhanu Gollapudi hba->task_ctx[i], hba->task_ctx_dma[i]); 1604*853e2bd2SBhanu Gollapudi hba->task_ctx[i] = NULL; 1605*853e2bd2SBhanu Gollapudi } 1606*853e2bd2SBhanu Gollapudi } 1607*853e2bd2SBhanu Gollapudi 1608*853e2bd2SBhanu Gollapudi kfree(hba->task_ctx_dma); 1609*853e2bd2SBhanu Gollapudi hba->task_ctx_dma = NULL; 1610*853e2bd2SBhanu Gollapudi out2: 1611*853e2bd2SBhanu Gollapudi kfree(hba->task_ctx); 1612*853e2bd2SBhanu Gollapudi hba->task_ctx = NULL; 1613*853e2bd2SBhanu Gollapudi out1: 1614*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1615*853e2bd2SBhanu Gollapudi hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); 1616*853e2bd2SBhanu Gollapudi hba->task_ctx_bd_tbl = NULL; 1617*853e2bd2SBhanu Gollapudi out: 1618*853e2bd2SBhanu Gollapudi return rc; 1619*853e2bd2SBhanu Gollapudi } 1620*853e2bd2SBhanu Gollapudi 1621*853e2bd2SBhanu Gollapudi void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) 1622*853e2bd2SBhanu Gollapudi { 1623*853e2bd2SBhanu Gollapudi int i; 1624*853e2bd2SBhanu Gollapudi 1625*853e2bd2SBhanu Gollapudi if (hba->task_ctx_bd_tbl) { 1626*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1627*853e2bd2SBhanu Gollapudi hba->task_ctx_bd_tbl, 1628*853e2bd2SBhanu Gollapudi hba->task_ctx_bd_dma); 1629*853e2bd2SBhanu Gollapudi hba->task_ctx_bd_tbl = NULL; 1630*853e2bd2SBhanu Gollapudi } 1631*853e2bd2SBhanu Gollapudi 1632*853e2bd2SBhanu Gollapudi if (hba->task_ctx) { 1633*853e2bd2SBhanu Gollapudi for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1634*853e2bd2SBhanu Gollapudi if (hba->task_ctx[i]) { 1635*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1636*853e2bd2SBhanu Gollapudi hba->task_ctx[i], 1637*853e2bd2SBhanu Gollapudi hba->task_ctx_dma[i]); 1638*853e2bd2SBhanu Gollapudi hba->task_ctx[i] = NULL; 1639*853e2bd2SBhanu Gollapudi } 1640*853e2bd2SBhanu Gollapudi } 1641*853e2bd2SBhanu Gollapudi kfree(hba->task_ctx); 1642*853e2bd2SBhanu Gollapudi hba->task_ctx = NULL; 1643*853e2bd2SBhanu Gollapudi } 1644*853e2bd2SBhanu Gollapudi 1645*853e2bd2SBhanu Gollapudi kfree(hba->task_ctx_dma); 1646*853e2bd2SBhanu Gollapudi hba->task_ctx_dma = NULL; 1647*853e2bd2SBhanu Gollapudi } 1648*853e2bd2SBhanu Gollapudi 1649*853e2bd2SBhanu Gollapudi static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) 1650*853e2bd2SBhanu Gollapudi { 1651*853e2bd2SBhanu Gollapudi int i; 1652*853e2bd2SBhanu Gollapudi int segment_count; 1653*853e2bd2SBhanu Gollapudi int hash_table_size; 1654*853e2bd2SBhanu Gollapudi u32 *pbl; 1655*853e2bd2SBhanu Gollapudi 1656*853e2bd2SBhanu Gollapudi segment_count = hba->hash_tbl_segment_count; 1657*853e2bd2SBhanu Gollapudi hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * 1658*853e2bd2SBhanu Gollapudi sizeof(struct fcoe_hash_table_entry); 1659*853e2bd2SBhanu Gollapudi 1660*853e2bd2SBhanu Gollapudi pbl = hba->hash_tbl_pbl; 1661*853e2bd2SBhanu Gollapudi for (i = 0; i < segment_count; ++i) { 1662*853e2bd2SBhanu Gollapudi dma_addr_t dma_address; 1663*853e2bd2SBhanu Gollapudi 1664*853e2bd2SBhanu Gollapudi dma_address = le32_to_cpu(*pbl); 1665*853e2bd2SBhanu Gollapudi ++pbl; 1666*853e2bd2SBhanu Gollapudi dma_address += ((u64)le32_to_cpu(*pbl)) << 32; 1667*853e2bd2SBhanu Gollapudi ++pbl; 1668*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, 1669*853e2bd2SBhanu Gollapudi BNX2FC_HASH_TBL_CHUNK_SIZE, 1670*853e2bd2SBhanu Gollapudi hba->hash_tbl_segments[i], 1671*853e2bd2SBhanu Gollapudi dma_address); 1672*853e2bd2SBhanu Gollapudi 1673*853e2bd2SBhanu Gollapudi } 1674*853e2bd2SBhanu Gollapudi 1675*853e2bd2SBhanu Gollapudi if (hba->hash_tbl_pbl) { 1676*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1677*853e2bd2SBhanu Gollapudi hba->hash_tbl_pbl, 1678*853e2bd2SBhanu Gollapudi hba->hash_tbl_pbl_dma); 1679*853e2bd2SBhanu Gollapudi hba->hash_tbl_pbl = NULL; 1680*853e2bd2SBhanu Gollapudi } 1681*853e2bd2SBhanu Gollapudi } 1682*853e2bd2SBhanu Gollapudi 1683*853e2bd2SBhanu Gollapudi static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) 1684*853e2bd2SBhanu Gollapudi { 1685*853e2bd2SBhanu Gollapudi int i; 1686*853e2bd2SBhanu Gollapudi int hash_table_size; 1687*853e2bd2SBhanu Gollapudi int segment_count; 1688*853e2bd2SBhanu Gollapudi int segment_array_size; 1689*853e2bd2SBhanu Gollapudi int dma_segment_array_size; 1690*853e2bd2SBhanu Gollapudi dma_addr_t *dma_segment_array; 1691*853e2bd2SBhanu Gollapudi u32 *pbl; 1692*853e2bd2SBhanu Gollapudi 1693*853e2bd2SBhanu Gollapudi hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * 1694*853e2bd2SBhanu Gollapudi sizeof(struct fcoe_hash_table_entry); 1695*853e2bd2SBhanu Gollapudi 1696*853e2bd2SBhanu Gollapudi segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; 1697*853e2bd2SBhanu Gollapudi segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; 1698*853e2bd2SBhanu Gollapudi hba->hash_tbl_segment_count = segment_count; 1699*853e2bd2SBhanu Gollapudi 1700*853e2bd2SBhanu Gollapudi segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); 1701*853e2bd2SBhanu Gollapudi hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); 1702*853e2bd2SBhanu Gollapudi if (!hba->hash_tbl_segments) { 1703*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "hash table pointers alloc failed\n"); 1704*853e2bd2SBhanu Gollapudi return -ENOMEM; 1705*853e2bd2SBhanu Gollapudi } 1706*853e2bd2SBhanu Gollapudi dma_segment_array_size = segment_count * sizeof(*dma_segment_array); 1707*853e2bd2SBhanu Gollapudi dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); 1708*853e2bd2SBhanu Gollapudi if (!dma_segment_array) { 1709*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); 1710*853e2bd2SBhanu Gollapudi return -ENOMEM; 1711*853e2bd2SBhanu Gollapudi } 1712*853e2bd2SBhanu Gollapudi 1713*853e2bd2SBhanu Gollapudi for (i = 0; i < segment_count; ++i) { 1714*853e2bd2SBhanu Gollapudi hba->hash_tbl_segments[i] = 1715*853e2bd2SBhanu Gollapudi dma_alloc_coherent(&hba->pcidev->dev, 1716*853e2bd2SBhanu Gollapudi BNX2FC_HASH_TBL_CHUNK_SIZE, 1717*853e2bd2SBhanu Gollapudi &dma_segment_array[i], 1718*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1719*853e2bd2SBhanu Gollapudi if (!hba->hash_tbl_segments[i]) { 1720*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "hash segment alloc failed\n"); 1721*853e2bd2SBhanu Gollapudi while (--i >= 0) { 1722*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, 1723*853e2bd2SBhanu Gollapudi BNX2FC_HASH_TBL_CHUNK_SIZE, 1724*853e2bd2SBhanu Gollapudi hba->hash_tbl_segments[i], 1725*853e2bd2SBhanu Gollapudi dma_segment_array[i]); 1726*853e2bd2SBhanu Gollapudi hba->hash_tbl_segments[i] = NULL; 1727*853e2bd2SBhanu Gollapudi } 1728*853e2bd2SBhanu Gollapudi kfree(dma_segment_array); 1729*853e2bd2SBhanu Gollapudi return -ENOMEM; 1730*853e2bd2SBhanu Gollapudi } 1731*853e2bd2SBhanu Gollapudi memset(hba->hash_tbl_segments[i], 0, 1732*853e2bd2SBhanu Gollapudi BNX2FC_HASH_TBL_CHUNK_SIZE); 1733*853e2bd2SBhanu Gollapudi } 1734*853e2bd2SBhanu Gollapudi 1735*853e2bd2SBhanu Gollapudi hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, 1736*853e2bd2SBhanu Gollapudi PAGE_SIZE, 1737*853e2bd2SBhanu Gollapudi &hba->hash_tbl_pbl_dma, 1738*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1739*853e2bd2SBhanu Gollapudi if (!hba->hash_tbl_pbl) { 1740*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 1741*853e2bd2SBhanu Gollapudi kfree(dma_segment_array); 1742*853e2bd2SBhanu Gollapudi return -ENOMEM; 1743*853e2bd2SBhanu Gollapudi } 1744*853e2bd2SBhanu Gollapudi memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); 1745*853e2bd2SBhanu Gollapudi 1746*853e2bd2SBhanu Gollapudi pbl = hba->hash_tbl_pbl; 1747*853e2bd2SBhanu Gollapudi for (i = 0; i < segment_count; ++i) { 1748*853e2bd2SBhanu Gollapudi u64 paddr = dma_segment_array[i]; 1749*853e2bd2SBhanu Gollapudi *pbl = cpu_to_le32((u32) paddr); 1750*853e2bd2SBhanu Gollapudi ++pbl; 1751*853e2bd2SBhanu Gollapudi *pbl = cpu_to_le32((u32) (paddr >> 32)); 1752*853e2bd2SBhanu Gollapudi ++pbl; 1753*853e2bd2SBhanu Gollapudi } 1754*853e2bd2SBhanu Gollapudi pbl = hba->hash_tbl_pbl; 1755*853e2bd2SBhanu Gollapudi i = 0; 1756*853e2bd2SBhanu Gollapudi while (*pbl && *(pbl + 1)) { 1757*853e2bd2SBhanu Gollapudi u32 lo; 1758*853e2bd2SBhanu Gollapudi u32 hi; 1759*853e2bd2SBhanu Gollapudi lo = *pbl; 1760*853e2bd2SBhanu Gollapudi ++pbl; 1761*853e2bd2SBhanu Gollapudi hi = *pbl; 1762*853e2bd2SBhanu Gollapudi ++pbl; 1763*853e2bd2SBhanu Gollapudi ++i; 1764*853e2bd2SBhanu Gollapudi } 1765*853e2bd2SBhanu Gollapudi kfree(dma_segment_array); 1766*853e2bd2SBhanu Gollapudi return 0; 1767*853e2bd2SBhanu Gollapudi } 1768*853e2bd2SBhanu Gollapudi 1769*853e2bd2SBhanu Gollapudi /** 1770*853e2bd2SBhanu Gollapudi * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer 1771*853e2bd2SBhanu Gollapudi * 1772*853e2bd2SBhanu Gollapudi * @hba: Pointer to adapter structure 1773*853e2bd2SBhanu Gollapudi * 1774*853e2bd2SBhanu Gollapudi */ 1775*853e2bd2SBhanu Gollapudi int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) 1776*853e2bd2SBhanu Gollapudi { 1777*853e2bd2SBhanu Gollapudi u64 addr; 1778*853e2bd2SBhanu Gollapudi u32 mem_size; 1779*853e2bd2SBhanu Gollapudi int i; 1780*853e2bd2SBhanu Gollapudi 1781*853e2bd2SBhanu Gollapudi if (bnx2fc_allocate_hash_table(hba)) 1782*853e2bd2SBhanu Gollapudi return -ENOMEM; 1783*853e2bd2SBhanu Gollapudi 1784*853e2bd2SBhanu Gollapudi mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 1785*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 1786*853e2bd2SBhanu Gollapudi &hba->t2_hash_tbl_ptr_dma, 1787*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1788*853e2bd2SBhanu Gollapudi if (!hba->t2_hash_tbl_ptr) { 1789*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 1790*853e2bd2SBhanu Gollapudi bnx2fc_free_fw_resc(hba); 1791*853e2bd2SBhanu Gollapudi return -ENOMEM; 1792*853e2bd2SBhanu Gollapudi } 1793*853e2bd2SBhanu Gollapudi memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); 1794*853e2bd2SBhanu Gollapudi 1795*853e2bd2SBhanu Gollapudi mem_size = BNX2FC_NUM_MAX_SESS * 1796*853e2bd2SBhanu Gollapudi sizeof(struct fcoe_t2_hash_table_entry); 1797*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 1798*853e2bd2SBhanu Gollapudi &hba->t2_hash_tbl_dma, 1799*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1800*853e2bd2SBhanu Gollapudi if (!hba->t2_hash_tbl) { 1801*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 1802*853e2bd2SBhanu Gollapudi bnx2fc_free_fw_resc(hba); 1803*853e2bd2SBhanu Gollapudi return -ENOMEM; 1804*853e2bd2SBhanu Gollapudi } 1805*853e2bd2SBhanu Gollapudi memset(hba->t2_hash_tbl, 0x00, mem_size); 1806*853e2bd2SBhanu Gollapudi for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { 1807*853e2bd2SBhanu Gollapudi addr = (unsigned long) hba->t2_hash_tbl_dma + 1808*853e2bd2SBhanu Gollapudi ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); 1809*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; 1810*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl[i].next.hi = addr >> 32; 1811*853e2bd2SBhanu Gollapudi } 1812*853e2bd2SBhanu Gollapudi 1813*853e2bd2SBhanu Gollapudi hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, 1814*853e2bd2SBhanu Gollapudi PAGE_SIZE, &hba->dummy_buf_dma, 1815*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1816*853e2bd2SBhanu Gollapudi if (!hba->dummy_buffer) { 1817*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); 1818*853e2bd2SBhanu Gollapudi bnx2fc_free_fw_resc(hba); 1819*853e2bd2SBhanu Gollapudi return -ENOMEM; 1820*853e2bd2SBhanu Gollapudi } 1821*853e2bd2SBhanu Gollapudi 1822*853e2bd2SBhanu Gollapudi hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, 1823*853e2bd2SBhanu Gollapudi PAGE_SIZE, 1824*853e2bd2SBhanu Gollapudi &hba->stats_buf_dma, 1825*853e2bd2SBhanu Gollapudi GFP_KERNEL); 1826*853e2bd2SBhanu Gollapudi if (!hba->stats_buffer) { 1827*853e2bd2SBhanu Gollapudi printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 1828*853e2bd2SBhanu Gollapudi bnx2fc_free_fw_resc(hba); 1829*853e2bd2SBhanu Gollapudi return -ENOMEM; 1830*853e2bd2SBhanu Gollapudi } 1831*853e2bd2SBhanu Gollapudi memset(hba->stats_buffer, 0x00, PAGE_SIZE); 1832*853e2bd2SBhanu Gollapudi 1833*853e2bd2SBhanu Gollapudi return 0; 1834*853e2bd2SBhanu Gollapudi } 1835*853e2bd2SBhanu Gollapudi 1836*853e2bd2SBhanu Gollapudi void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) 1837*853e2bd2SBhanu Gollapudi { 1838*853e2bd2SBhanu Gollapudi u32 mem_size; 1839*853e2bd2SBhanu Gollapudi 1840*853e2bd2SBhanu Gollapudi if (hba->stats_buffer) { 1841*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1842*853e2bd2SBhanu Gollapudi hba->stats_buffer, hba->stats_buf_dma); 1843*853e2bd2SBhanu Gollapudi hba->stats_buffer = NULL; 1844*853e2bd2SBhanu Gollapudi } 1845*853e2bd2SBhanu Gollapudi 1846*853e2bd2SBhanu Gollapudi if (hba->dummy_buffer) { 1847*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1848*853e2bd2SBhanu Gollapudi hba->dummy_buffer, hba->dummy_buf_dma); 1849*853e2bd2SBhanu Gollapudi hba->dummy_buffer = NULL; 1850*853e2bd2SBhanu Gollapudi } 1851*853e2bd2SBhanu Gollapudi 1852*853e2bd2SBhanu Gollapudi if (hba->t2_hash_tbl_ptr) { 1853*853e2bd2SBhanu Gollapudi mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 1854*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, mem_size, 1855*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl_ptr, 1856*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl_ptr_dma); 1857*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl_ptr = NULL; 1858*853e2bd2SBhanu Gollapudi } 1859*853e2bd2SBhanu Gollapudi 1860*853e2bd2SBhanu Gollapudi if (hba->t2_hash_tbl) { 1861*853e2bd2SBhanu Gollapudi mem_size = BNX2FC_NUM_MAX_SESS * 1862*853e2bd2SBhanu Gollapudi sizeof(struct fcoe_t2_hash_table_entry); 1863*853e2bd2SBhanu Gollapudi dma_free_coherent(&hba->pcidev->dev, mem_size, 1864*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl, hba->t2_hash_tbl_dma); 1865*853e2bd2SBhanu Gollapudi hba->t2_hash_tbl = NULL; 1866*853e2bd2SBhanu Gollapudi } 1867*853e2bd2SBhanu Gollapudi bnx2fc_free_hash_table(hba); 1868*853e2bd2SBhanu Gollapudi } 1869