1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 */
6 #include <linux/errno.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/skbuff.h>
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <linux/workqueue.h>
15 #include <scsi/fc/fc_fip.h>
16 #include <scsi/fc/fc_els.h>
17 #include <scsi/fc/fc_fcoe.h>
18 #include <scsi/fc_frame.h>
19 #include <scsi/libfc.h>
20 #include "fnic_io.h"
21 #include "fnic.h"
22 #include "fnic_fip.h"
23 #include "cq_enet_desc.h"
24 #include "cq_exch_desc.h"
25
26 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
27 struct workqueue_struct *fnic_fip_queue;
28 struct workqueue_struct *fnic_event_queue;
29
30 static void fnic_set_eth_mode(struct fnic *);
31 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
32 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
33 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
34 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
35 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
36
fnic_handle_link(struct work_struct * work)37 void fnic_handle_link(struct work_struct *work)
38 {
39 struct fnic *fnic = container_of(work, struct fnic, link_work);
40 unsigned long flags;
41 int old_link_status;
42 u32 old_link_down_cnt;
43 u64 old_port_speed, new_port_speed;
44
45 spin_lock_irqsave(&fnic->fnic_lock, flags);
46
47 fnic->link_events = 1; /* less work to just set everytime*/
48
49 if (fnic->stop_rx_link_events) {
50 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
51 return;
52 }
53
54 old_link_down_cnt = fnic->link_down_cnt;
55 old_link_status = fnic->link_status;
56 old_port_speed = atomic64_read(
57 &fnic->fnic_stats.misc_stats.current_port_speed);
58
59 fnic->link_status = vnic_dev_link_status(fnic->vdev);
60 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
61
62 new_port_speed = vnic_dev_port_speed(fnic->vdev);
63 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
64 new_port_speed);
65 if (old_port_speed != new_port_speed)
66 FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
67 "Current vnic speed set to : %llu\n",
68 new_port_speed);
69
70 switch (vnic_dev_port_speed(fnic->vdev)) {
71 case DCEM_PORTSPEED_10G:
72 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
73 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
74 break;
75 case DCEM_PORTSPEED_20G:
76 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
77 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
78 break;
79 case DCEM_PORTSPEED_25G:
80 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
81 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
82 break;
83 case DCEM_PORTSPEED_40G:
84 case DCEM_PORTSPEED_4x10G:
85 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
86 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
87 break;
88 case DCEM_PORTSPEED_100G:
89 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
90 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
91 break;
92 default:
93 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
94 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
95 break;
96 }
97
98 if (old_link_status == fnic->link_status) {
99 if (!fnic->link_status) {
100 /* DOWN -> DOWN */
101 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
102 fnic_fc_trace_set_data(fnic->lport->host->host_no,
103 FNIC_FC_LE, "Link Status: DOWN->DOWN",
104 strlen("Link Status: DOWN->DOWN"));
105 } else {
106 if (old_link_down_cnt != fnic->link_down_cnt) {
107 /* UP -> DOWN -> UP */
108 fnic->lport->host_stats.link_failure_count++;
109 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
110 fnic_fc_trace_set_data(
111 fnic->lport->host->host_no,
112 FNIC_FC_LE,
113 "Link Status:UP_DOWN_UP",
114 strlen("Link_Status:UP_DOWN_UP")
115 );
116 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
117 "link down\n");
118 fcoe_ctlr_link_down(&fnic->ctlr);
119 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
120 /* start FCoE VLAN discovery */
121 fnic_fc_trace_set_data(
122 fnic->lport->host->host_no,
123 FNIC_FC_LE,
124 "Link Status: UP_DOWN_UP_VLAN",
125 strlen(
126 "Link Status: UP_DOWN_UP_VLAN")
127 );
128 fnic_fcoe_send_vlan_req(fnic);
129 return;
130 }
131 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
132 "link up\n");
133 fcoe_ctlr_link_up(&fnic->ctlr);
134 } else {
135 /* UP -> UP */
136 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
137 fnic_fc_trace_set_data(
138 fnic->lport->host->host_no, FNIC_FC_LE,
139 "Link Status: UP_UP",
140 strlen("Link Status: UP_UP"));
141 }
142 }
143 } else if (fnic->link_status) {
144 /* DOWN -> UP */
145 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
146 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
147 /* start FCoE VLAN discovery */
148 fnic_fc_trace_set_data(
149 fnic->lport->host->host_no,
150 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
151 strlen("Link Status: DOWN_UP_VLAN"));
152 fnic_fcoe_send_vlan_req(fnic);
153 return;
154 }
155 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
156 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
157 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
158 fcoe_ctlr_link_up(&fnic->ctlr);
159 } else {
160 /* UP -> DOWN */
161 fnic->lport->host_stats.link_failure_count++;
162 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
163 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
164 fnic_fc_trace_set_data(
165 fnic->lport->host->host_no, FNIC_FC_LE,
166 "Link Status: UP_DOWN",
167 strlen("Link Status: UP_DOWN"));
168 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
169 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
170 "deleting fip-timer during link-down\n");
171 del_timer_sync(&fnic->fip_timer);
172 }
173 fcoe_ctlr_link_down(&fnic->ctlr);
174 }
175
176 }
177
178 /*
179 * This function passes incoming fabric frames to libFC
180 */
fnic_handle_frame(struct work_struct * work)181 void fnic_handle_frame(struct work_struct *work)
182 {
183 struct fnic *fnic = container_of(work, struct fnic, frame_work);
184 struct fc_lport *lp = fnic->lport;
185 unsigned long flags;
186 struct sk_buff *skb;
187 struct fc_frame *fp;
188
189 while ((skb = skb_dequeue(&fnic->frame_queue))) {
190
191 spin_lock_irqsave(&fnic->fnic_lock, flags);
192 if (fnic->stop_rx_link_events) {
193 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
194 dev_kfree_skb(skb);
195 return;
196 }
197 fp = (struct fc_frame *)skb;
198
199 /*
200 * If we're in a transitional state, just re-queue and return.
201 * The queue will be serviced when we get to a stable state.
202 */
203 if (fnic->state != FNIC_IN_FC_MODE &&
204 fnic->state != FNIC_IN_ETH_MODE) {
205 skb_queue_head(&fnic->frame_queue, skb);
206 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
207 return;
208 }
209 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
210
211 fc_exch_recv(lp, fp);
212 }
213 }
214
fnic_fcoe_evlist_free(struct fnic * fnic)215 void fnic_fcoe_evlist_free(struct fnic *fnic)
216 {
217 struct fnic_event *fevt = NULL;
218 struct fnic_event *next = NULL;
219 unsigned long flags;
220
221 spin_lock_irqsave(&fnic->fnic_lock, flags);
222 if (list_empty(&fnic->evlist)) {
223 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
224 return;
225 }
226
227 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
228 list_del(&fevt->list);
229 kfree(fevt);
230 }
231 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
232 }
233
fnic_handle_event(struct work_struct * work)234 void fnic_handle_event(struct work_struct *work)
235 {
236 struct fnic *fnic = container_of(work, struct fnic, event_work);
237 struct fnic_event *fevt = NULL;
238 struct fnic_event *next = NULL;
239 unsigned long flags;
240
241 spin_lock_irqsave(&fnic->fnic_lock, flags);
242 if (list_empty(&fnic->evlist)) {
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244 return;
245 }
246
247 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
248 if (fnic->stop_rx_link_events) {
249 list_del(&fevt->list);
250 kfree(fevt);
251 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
252 return;
253 }
254 /*
255 * If we're in a transitional state, just re-queue and return.
256 * The queue will be serviced when we get to a stable state.
257 */
258 if (fnic->state != FNIC_IN_FC_MODE &&
259 fnic->state != FNIC_IN_ETH_MODE) {
260 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
261 return;
262 }
263
264 list_del(&fevt->list);
265 switch (fevt->event) {
266 case FNIC_EVT_START_VLAN_DISC:
267 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
268 fnic_fcoe_send_vlan_req(fnic);
269 spin_lock_irqsave(&fnic->fnic_lock, flags);
270 break;
271 case FNIC_EVT_START_FCF_DISC:
272 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
273 "Start FCF Discovery\n");
274 fnic_fcoe_start_fcf_disc(fnic);
275 break;
276 default:
277 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
278 "Unknown event 0x%x\n", fevt->event);
279 break;
280 }
281 kfree(fevt);
282 }
283 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
284 }
285
286 /**
287 * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
288 * @fip: The FCoE controller that received the frame
289 * @skb: The received FIP frame
290 *
291 * Returns non-zero if the frame is rejected with unsupported cmd with
292 * insufficient resource els explanation.
293 */
is_fnic_fip_flogi_reject(struct fcoe_ctlr * fip,struct sk_buff * skb)294 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
295 struct sk_buff *skb)
296 {
297 struct fc_lport *lport = fip->lp;
298 struct fip_header *fiph;
299 struct fc_frame_header *fh = NULL;
300 struct fip_desc *desc;
301 struct fip_encaps *els;
302 u16 op;
303 u8 els_op;
304 u8 sub;
305
306 size_t rlen;
307 size_t dlen = 0;
308
309 if (skb_linearize(skb))
310 return 0;
311
312 if (skb->len < sizeof(*fiph))
313 return 0;
314
315 fiph = (struct fip_header *)skb->data;
316 op = ntohs(fiph->fip_op);
317 sub = fiph->fip_subcode;
318
319 if (op != FIP_OP_LS)
320 return 0;
321
322 if (sub != FIP_SC_REP)
323 return 0;
324
325 rlen = ntohs(fiph->fip_dl_len) * 4;
326 if (rlen + sizeof(*fiph) > skb->len)
327 return 0;
328
329 desc = (struct fip_desc *)(fiph + 1);
330 dlen = desc->fip_dlen * FIP_BPW;
331
332 if (desc->fip_dtype == FIP_DT_FLOGI) {
333
334 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
335 return 0;
336
337 els = (struct fip_encaps *)desc;
338 fh = (struct fc_frame_header *)(els + 1);
339
340 if (!fh)
341 return 0;
342
343 /*
344 * ELS command code, reason and explanation should be = Reject,
345 * unsupported command and insufficient resource
346 */
347 els_op = *(u8 *)(fh + 1);
348 if (els_op == ELS_LS_RJT) {
349 shost_printk(KERN_INFO, lport->host,
350 "Flogi Request Rejected by Switch\n");
351 return 1;
352 }
353 shost_printk(KERN_INFO, lport->host,
354 "Flogi Request Accepted by Switch\n");
355 }
356 return 0;
357 }
358
fnic_fcoe_send_vlan_req(struct fnic * fnic)359 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
360 {
361 struct fcoe_ctlr *fip = &fnic->ctlr;
362 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
363 struct sk_buff *skb;
364 char *eth_fr;
365 struct fip_vlan *vlan;
366 u64 vlan_tov;
367
368 fnic_fcoe_reset_vlans(fnic);
369 fnic->set_vlan(fnic, 0);
370
371 if (printk_ratelimit())
372 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
373 "Sending VLAN request...\n");
374
375 skb = dev_alloc_skb(sizeof(struct fip_vlan));
376 if (!skb)
377 return;
378
379 eth_fr = (char *)skb->data;
380 vlan = (struct fip_vlan *)eth_fr;
381
382 memset(vlan, 0, sizeof(*vlan));
383 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
384 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
385 vlan->eth.h_proto = htons(ETH_P_FIP);
386
387 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
388 vlan->fip.fip_op = htons(FIP_OP_VLAN);
389 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
390 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
391
392 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
393 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
394 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
395
396 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
397 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
398 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
399 atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
400
401 skb_put(skb, sizeof(*vlan));
402 skb->protocol = htons(ETH_P_FIP);
403 skb_reset_mac_header(skb);
404 skb_reset_network_header(skb);
405 fip->send(fip, skb);
406
407 /* set a timer so that we can retry if there no response */
408 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
409 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
410 }
411
fnic_fcoe_process_vlan_resp(struct fnic * fnic,struct sk_buff * skb)412 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
413 {
414 struct fcoe_ctlr *fip = &fnic->ctlr;
415 struct fip_header *fiph;
416 struct fip_desc *desc;
417 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
418 u16 vid;
419 size_t rlen;
420 size_t dlen;
421 struct fcoe_vlan *vlan;
422 u64 sol_time;
423 unsigned long flags;
424
425 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
426 "Received VLAN response...\n");
427
428 fiph = (struct fip_header *) skb->data;
429
430 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
431 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
432 ntohs(fiph->fip_op), fiph->fip_subcode);
433
434 rlen = ntohs(fiph->fip_dl_len) * 4;
435 fnic_fcoe_reset_vlans(fnic);
436 spin_lock_irqsave(&fnic->vlans_lock, flags);
437 desc = (struct fip_desc *)(fiph + 1);
438 while (rlen > 0) {
439 dlen = desc->fip_dlen * FIP_BPW;
440 switch (desc->fip_dtype) {
441 case FIP_DT_VLAN:
442 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
443 shost_printk(KERN_INFO, fnic->lport->host,
444 "process_vlan_resp: FIP VLAN %d\n", vid);
445 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
446 if (!vlan) {
447 /* retry from timer */
448 spin_unlock_irqrestore(&fnic->vlans_lock,
449 flags);
450 goto out;
451 }
452 vlan->vid = vid & 0x0fff;
453 vlan->state = FIP_VLAN_AVAIL;
454 list_add_tail(&vlan->list, &fnic->vlans);
455 break;
456 }
457 desc = (struct fip_desc *)((char *)desc + dlen);
458 rlen -= dlen;
459 }
460
461 /* any VLAN descriptors present ? */
462 if (list_empty(&fnic->vlans)) {
463 /* retry from timer */
464 atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
465 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
466 "No VLAN descriptors in FIP VLAN response\n");
467 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
468 goto out;
469 }
470
471 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
472 fnic->set_vlan(fnic, vlan->vid);
473 vlan->state = FIP_VLAN_SENT; /* sent now */
474 vlan->sol_count++;
475 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
476
477 /* start the solicitation */
478 fcoe_ctlr_link_up(fip);
479
480 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
481 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
482 out:
483 return;
484 }
485
fnic_fcoe_start_fcf_disc(struct fnic * fnic)486 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
487 {
488 unsigned long flags;
489 struct fcoe_vlan *vlan;
490 u64 sol_time;
491
492 spin_lock_irqsave(&fnic->vlans_lock, flags);
493 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
494 fnic->set_vlan(fnic, vlan->vid);
495 vlan->state = FIP_VLAN_SENT; /* sent now */
496 vlan->sol_count = 1;
497 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
498
499 /* start the solicitation */
500 fcoe_ctlr_link_up(&fnic->ctlr);
501
502 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
503 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
504 }
505
fnic_fcoe_vlan_check(struct fnic * fnic,u16 flag)506 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
507 {
508 unsigned long flags;
509 struct fcoe_vlan *fvlan;
510
511 spin_lock_irqsave(&fnic->vlans_lock, flags);
512 if (list_empty(&fnic->vlans)) {
513 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
514 return -EINVAL;
515 }
516
517 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
518 if (fvlan->state == FIP_VLAN_USED) {
519 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
520 return 0;
521 }
522
523 if (fvlan->state == FIP_VLAN_SENT) {
524 fvlan->state = FIP_VLAN_USED;
525 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
526 return 0;
527 }
528 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
529 return -EINVAL;
530 }
531
fnic_event_enq(struct fnic * fnic,enum fnic_evt ev)532 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
533 {
534 struct fnic_event *fevt;
535 unsigned long flags;
536
537 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
538 if (!fevt)
539 return;
540
541 fevt->fnic = fnic;
542 fevt->event = ev;
543
544 spin_lock_irqsave(&fnic->fnic_lock, flags);
545 list_add_tail(&fevt->list, &fnic->evlist);
546 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
547
548 schedule_work(&fnic->event_work);
549 }
550
fnic_fcoe_handle_fip_frame(struct fnic * fnic,struct sk_buff * skb)551 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
552 {
553 struct fip_header *fiph;
554 int ret = 1;
555 u16 op;
556 u8 sub;
557
558 if (!skb || !(skb->data))
559 return -1;
560
561 if (skb_linearize(skb))
562 goto drop;
563
564 fiph = (struct fip_header *)skb->data;
565 op = ntohs(fiph->fip_op);
566 sub = fiph->fip_subcode;
567
568 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
569 goto drop;
570
571 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
572 goto drop;
573
574 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
575 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
576 goto drop;
577 /* pass it on to fcoe */
578 ret = 1;
579 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
580 /* set the vlan as used */
581 fnic_fcoe_process_vlan_resp(fnic, skb);
582 ret = 0;
583 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
584 /* received CVL request, restart vlan disc */
585 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
586 /* pass it on to fcoe */
587 ret = 1;
588 }
589 drop:
590 return ret;
591 }
592
fnic_handle_fip_frame(struct work_struct * work)593 void fnic_handle_fip_frame(struct work_struct *work)
594 {
595 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
596 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
597 unsigned long flags;
598 struct sk_buff *skb;
599 struct ethhdr *eh;
600
601 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
602 spin_lock_irqsave(&fnic->fnic_lock, flags);
603 if (fnic->stop_rx_link_events) {
604 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
605 dev_kfree_skb(skb);
606 return;
607 }
608 /*
609 * If we're in a transitional state, just re-queue and return.
610 * The queue will be serviced when we get to a stable state.
611 */
612 if (fnic->state != FNIC_IN_FC_MODE &&
613 fnic->state != FNIC_IN_ETH_MODE) {
614 skb_queue_head(&fnic->fip_frame_queue, skb);
615 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
616 return;
617 }
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619 eh = (struct ethhdr *)skb->data;
620 if (eh->h_proto == htons(ETH_P_FIP)) {
621 skb_pull(skb, sizeof(*eh));
622 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
623 dev_kfree_skb(skb);
624 continue;
625 }
626 /*
627 * If there's FLOGI rejects - clear all
628 * fcf's & restart from scratch
629 */
630 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
631 atomic64_inc(
632 &fnic_stats->vlan_stats.flogi_rejects);
633 shost_printk(KERN_INFO, fnic->lport->host,
634 "Trigger a Link down - VLAN Disc\n");
635 fcoe_ctlr_link_down(&fnic->ctlr);
636 /* start FCoE VLAN discovery */
637 fnic_fcoe_send_vlan_req(fnic);
638 dev_kfree_skb(skb);
639 continue;
640 }
641 fcoe_ctlr_recv(&fnic->ctlr, skb);
642 continue;
643 }
644 }
645 }
646
647 /**
648 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
649 * @fnic: fnic instance.
650 * @skb: Ethernet Frame.
651 */
fnic_import_rq_eth_pkt(struct fnic * fnic,struct sk_buff * skb)652 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
653 {
654 struct fc_frame *fp;
655 struct ethhdr *eh;
656 struct fcoe_hdr *fcoe_hdr;
657 struct fcoe_crc_eof *ft;
658
659 /*
660 * Undo VLAN encapsulation if present.
661 */
662 eh = (struct ethhdr *)skb->data;
663 if (eh->h_proto == htons(ETH_P_8021Q)) {
664 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
665 eh = skb_pull(skb, VLAN_HLEN);
666 skb_reset_mac_header(skb);
667 }
668 if (eh->h_proto == htons(ETH_P_FIP)) {
669 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
670 printk(KERN_ERR "Dropped FIP frame, as firmware "
671 "uses non-FIP mode, Enable FIP "
672 "using UCSM\n");
673 goto drop;
674 }
675 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
676 FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
677 printk(KERN_ERR "fnic ctlr frame trace error!!!");
678 }
679 skb_queue_tail(&fnic->fip_frame_queue, skb);
680 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
681 return 1; /* let caller know packet was used */
682 }
683 if (eh->h_proto != htons(ETH_P_FCOE))
684 goto drop;
685 skb_set_network_header(skb, sizeof(*eh));
686 skb_pull(skb, sizeof(*eh));
687
688 fcoe_hdr = (struct fcoe_hdr *)skb->data;
689 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
690 goto drop;
691
692 fp = (struct fc_frame *)skb;
693 fc_frame_init(fp);
694 fr_sof(fp) = fcoe_hdr->fcoe_sof;
695 skb_pull(skb, sizeof(struct fcoe_hdr));
696 skb_reset_transport_header(skb);
697
698 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
699 fr_eof(fp) = ft->fcoe_eof;
700 skb_trim(skb, skb->len - sizeof(*ft));
701 return 0;
702 drop:
703 dev_kfree_skb_irq(skb);
704 return -1;
705 }
706
707 /**
708 * fnic_update_mac_locked() - set data MAC address and filters.
709 * @fnic: fnic instance.
710 * @new: newly-assigned FCoE MAC address.
711 *
712 * Called with the fnic lock held.
713 */
fnic_update_mac_locked(struct fnic * fnic,u8 * new)714 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
715 {
716 u8 *ctl = fnic->ctlr.ctl_src_addr;
717 u8 *data = fnic->data_src_addr;
718
719 if (is_zero_ether_addr(new))
720 new = ctl;
721 if (ether_addr_equal(data, new))
722 return;
723 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
724 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
725 vnic_dev_del_addr(fnic->vdev, data);
726 memcpy(data, new, ETH_ALEN);
727 if (!ether_addr_equal(new, ctl))
728 vnic_dev_add_addr(fnic->vdev, new);
729 }
730
731 /**
732 * fnic_update_mac() - set data MAC address and filters.
733 * @lport: local port.
734 * @new: newly-assigned FCoE MAC address.
735 */
fnic_update_mac(struct fc_lport * lport,u8 * new)736 void fnic_update_mac(struct fc_lport *lport, u8 *new)
737 {
738 struct fnic *fnic = lport_priv(lport);
739
740 spin_lock_irq(&fnic->fnic_lock);
741 fnic_update_mac_locked(fnic, new);
742 spin_unlock_irq(&fnic->fnic_lock);
743 }
744
745 /**
746 * fnic_set_port_id() - set the port_ID after successful FLOGI.
747 * @lport: local port.
748 * @port_id: assigned FC_ID.
749 * @fp: received frame containing the FLOGI accept or NULL.
750 *
751 * This is called from libfc when a new FC_ID has been assigned.
752 * This causes us to reset the firmware to FC_MODE and setup the new MAC
753 * address and FC_ID.
754 *
755 * It is also called with FC_ID 0 when we're logged off.
756 *
757 * If the FC_ID is due to point-to-point, fp may be NULL.
758 */
fnic_set_port_id(struct fc_lport * lport,u32 port_id,struct fc_frame * fp)759 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
760 {
761 struct fnic *fnic = lport_priv(lport);
762 u8 *mac;
763 int ret;
764
765 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
766 port_id, fp);
767
768 /*
769 * If we're clearing the FC_ID, change to use the ctl_src_addr.
770 * Set ethernet mode to send FLOGI.
771 */
772 if (!port_id) {
773 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
774 fnic_set_eth_mode(fnic);
775 return;
776 }
777
778 if (fp) {
779 mac = fr_cb(fp)->granted_mac;
780 if (is_zero_ether_addr(mac)) {
781 /* non-FIP - FLOGI already accepted - ignore return */
782 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
783 }
784 fnic_update_mac(lport, mac);
785 }
786
787 /* Change state to reflect transition to FC mode */
788 spin_lock_irq(&fnic->fnic_lock);
789 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
790 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
791 else {
792 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
793 "Unexpected fnic state %s while"
794 " processing flogi resp\n",
795 fnic_state_to_str(fnic->state));
796 spin_unlock_irq(&fnic->fnic_lock);
797 return;
798 }
799 spin_unlock_irq(&fnic->fnic_lock);
800
801 /*
802 * Send FLOGI registration to firmware to set up FC mode.
803 * The new address will be set up when registration completes.
804 */
805 ret = fnic_flogi_reg_handler(fnic, port_id);
806
807 if (ret < 0) {
808 spin_lock_irq(&fnic->fnic_lock);
809 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
810 fnic->state = FNIC_IN_ETH_MODE;
811 spin_unlock_irq(&fnic->fnic_lock);
812 }
813 }
814
fnic_rq_cmpl_frame_recv(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque)815 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
816 *cq_desc, struct vnic_rq_buf *buf,
817 int skipped __attribute__((unused)),
818 void *opaque)
819 {
820 struct fnic *fnic = vnic_dev_priv(rq->vdev);
821 struct sk_buff *skb;
822 struct fc_frame *fp;
823 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
824 u8 type, color, eop, sop, ingress_port, vlan_stripped;
825 u8 fcoe = 0, fcoe_sof, fcoe_eof;
826 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
827 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
828 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
829 u8 fcs_ok = 1, packet_error = 0;
830 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
831 u32 rss_hash;
832 u16 exchange_id, tmpl;
833 u8 sof = 0;
834 u8 eof = 0;
835 u32 fcp_bytes_written = 0;
836 unsigned long flags;
837
838 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
839 DMA_FROM_DEVICE);
840 skb = buf->os_buf;
841 fp = (struct fc_frame *)skb;
842 buf->os_buf = NULL;
843
844 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
845 if (type == CQ_DESC_TYPE_RQ_FCP) {
846 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
847 &type, &color, &q_number, &completed_index,
848 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
849 &tmpl, &fcp_bytes_written, &sof, &eof,
850 &ingress_port, &packet_error,
851 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
852 &vlan);
853 skb_trim(skb, fcp_bytes_written);
854 fr_sof(fp) = sof;
855 fr_eof(fp) = eof;
856
857 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
858 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
859 &type, &color, &q_number, &completed_index,
860 &ingress_port, &fcoe, &eop, &sop,
861 &rss_type, &csum_not_calc, &rss_hash,
862 &bytes_written, &packet_error,
863 &vlan_stripped, &vlan, &checksum,
864 &fcoe_sof, &fcoe_fc_crc_ok,
865 &fcoe_enc_error, &fcoe_eof,
866 &tcp_udp_csum_ok, &udp, &tcp,
867 &ipv4_csum_ok, &ipv6, &ipv4,
868 &ipv4_fragment, &fcs_ok);
869 skb_trim(skb, bytes_written);
870 if (!fcs_ok) {
871 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
872 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
873 "fcs error. dropping packet.\n");
874 goto drop;
875 }
876 if (fnic_import_rq_eth_pkt(fnic, skb))
877 return;
878
879 } else {
880 /* wrong CQ type*/
881 shost_printk(KERN_ERR, fnic->lport->host,
882 "fnic rq_cmpl wrong cq type x%x\n", type);
883 goto drop;
884 }
885
886 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
887 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
888 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
889 "fnic rq_cmpl fcoe x%x fcsok x%x"
890 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
891 " x%x\n",
892 fcoe, fcs_ok, packet_error,
893 fcoe_fc_crc_ok, fcoe_enc_error);
894 goto drop;
895 }
896
897 spin_lock_irqsave(&fnic->fnic_lock, flags);
898 if (fnic->stop_rx_link_events) {
899 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
900 goto drop;
901 }
902 fr_dev(fp) = fnic->lport;
903 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
904 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
905 (char *)skb->data, skb->len)) != 0) {
906 printk(KERN_ERR "fnic ctlr frame trace error!!!");
907 }
908
909 skb_queue_tail(&fnic->frame_queue, skb);
910 queue_work(fnic_event_queue, &fnic->frame_work);
911
912 return;
913 drop:
914 dev_kfree_skb_irq(skb);
915 }
916
fnic_rq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)917 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
918 struct cq_desc *cq_desc, u8 type,
919 u16 q_number, u16 completed_index,
920 void *opaque)
921 {
922 struct fnic *fnic = vnic_dev_priv(vdev);
923
924 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
925 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
926 NULL);
927 return 0;
928 }
929
fnic_rq_cmpl_handler(struct fnic * fnic,int rq_work_to_do)930 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
931 {
932 unsigned int tot_rq_work_done = 0, cur_work_done;
933 unsigned int i;
934 int err;
935
936 for (i = 0; i < fnic->rq_count; i++) {
937 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
938 fnic_rq_cmpl_handler_cont,
939 NULL);
940 if (cur_work_done) {
941 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
942 if (err)
943 shost_printk(KERN_ERR, fnic->lport->host,
944 "fnic_alloc_rq_frame can't alloc"
945 " frame\n");
946 }
947 tot_rq_work_done += cur_work_done;
948 }
949
950 return tot_rq_work_done;
951 }
952
953 /*
954 * This function is called once at init time to allocate and fill RQ
955 * buffers. Subsequently, it is called in the interrupt context after RQ
956 * buffer processing to replenish the buffers in the RQ
957 */
fnic_alloc_rq_frame(struct vnic_rq * rq)958 int fnic_alloc_rq_frame(struct vnic_rq *rq)
959 {
960 struct fnic *fnic = vnic_dev_priv(rq->vdev);
961 struct sk_buff *skb;
962 u16 len;
963 dma_addr_t pa;
964 int r;
965
966 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
967 skb = dev_alloc_skb(len);
968 if (!skb) {
969 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
970 "Unable to allocate RQ sk_buff\n");
971 return -ENOMEM;
972 }
973 skb_reset_mac_header(skb);
974 skb_reset_transport_header(skb);
975 skb_reset_network_header(skb);
976 skb_put(skb, len);
977 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
978 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
979 r = -ENOMEM;
980 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
981 goto free_skb;
982 }
983
984 fnic_queue_rq_desc(rq, skb, pa, len);
985 return 0;
986
987 free_skb:
988 kfree_skb(skb);
989 return r;
990 }
991
fnic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)992 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
993 {
994 struct fc_frame *fp = buf->os_buf;
995 struct fnic *fnic = vnic_dev_priv(rq->vdev);
996
997 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
998 DMA_FROM_DEVICE);
999
1000 dev_kfree_skb(fp_skb(fp));
1001 buf->os_buf = NULL;
1002 }
1003
1004 /**
1005 * fnic_eth_send() - Send Ethernet frame.
1006 * @fip: fcoe_ctlr instance.
1007 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
1008 */
fnic_eth_send(struct fcoe_ctlr * fip,struct sk_buff * skb)1009 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1010 {
1011 struct fnic *fnic = fnic_from_ctlr(fip);
1012 struct vnic_wq *wq = &fnic->wq[0];
1013 dma_addr_t pa;
1014 struct ethhdr *eth_hdr;
1015 struct vlan_ethhdr *vlan_hdr;
1016 unsigned long flags;
1017
1018 if (!fnic->vlan_hw_insert) {
1019 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1020 vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1021 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1022 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1023 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1024 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1025 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1026 FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1027 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1028 }
1029 } else {
1030 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1031 FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1032 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1033 }
1034 }
1035
1036 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1037 DMA_TO_DEVICE);
1038 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1039 printk(KERN_ERR "DMA mapping failed\n");
1040 goto free_skb;
1041 }
1042
1043 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1044 if (!vnic_wq_desc_avail(wq))
1045 goto irq_restore;
1046
1047 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1048 0 /* hw inserts cos value */,
1049 fnic->vlan_id, 1);
1050 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1051 return;
1052
1053 irq_restore:
1054 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1055 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1056 free_skb:
1057 kfree_skb(skb);
1058 }
1059
1060 /*
1061 * Send FC frame.
1062 */
fnic_send_frame(struct fnic * fnic,struct fc_frame * fp)1063 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1064 {
1065 struct vnic_wq *wq = &fnic->wq[0];
1066 struct sk_buff *skb;
1067 dma_addr_t pa;
1068 struct ethhdr *eth_hdr;
1069 struct vlan_ethhdr *vlan_hdr;
1070 struct fcoe_hdr *fcoe_hdr;
1071 struct fc_frame_header *fh;
1072 u32 tot_len, eth_hdr_len;
1073 int ret = 0;
1074 unsigned long flags;
1075
1076 fh = fc_frame_header_get(fp);
1077 skb = fp_skb(fp);
1078
1079 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1080 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1081 return 0;
1082
1083 if (!fnic->vlan_hw_insert) {
1084 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1085 vlan_hdr = skb_push(skb, eth_hdr_len);
1086 eth_hdr = (struct ethhdr *)vlan_hdr;
1087 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1088 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1089 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1090 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1091 } else {
1092 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1093 eth_hdr = skb_push(skb, eth_hdr_len);
1094 eth_hdr->h_proto = htons(ETH_P_FCOE);
1095 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1096 }
1097
1098 if (fnic->ctlr.map_dest)
1099 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1100 else
1101 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1102 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1103
1104 tot_len = skb->len;
1105 BUG_ON(tot_len % 4);
1106
1107 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1108 fcoe_hdr->fcoe_sof = fr_sof(fp);
1109 if (FC_FCOE_VER)
1110 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1111
1112 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1113 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1114 ret = -ENOMEM;
1115 printk(KERN_ERR "DMA map failed with error %d\n", ret);
1116 goto free_skb_on_err;
1117 }
1118
1119 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1120 (char *)eth_hdr, tot_len)) != 0) {
1121 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1122 }
1123
1124 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1125
1126 if (!vnic_wq_desc_avail(wq)) {
1127 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1128 ret = -1;
1129 goto irq_restore;
1130 }
1131
1132 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1133 0 /* hw inserts cos value */,
1134 fnic->vlan_id, 1, 1, 1);
1135
1136 irq_restore:
1137 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1138
1139 free_skb_on_err:
1140 if (ret)
1141 dev_kfree_skb_any(fp_skb(fp));
1142
1143 return ret;
1144 }
1145
1146 /*
1147 * fnic_send
1148 * Routine to send a raw frame
1149 */
fnic_send(struct fc_lport * lp,struct fc_frame * fp)1150 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1151 {
1152 struct fnic *fnic = lport_priv(lp);
1153 unsigned long flags;
1154
1155 if (fnic->in_remove) {
1156 dev_kfree_skb(fp_skb(fp));
1157 return -1;
1158 }
1159
1160 /*
1161 * Queue frame if in a transitional state.
1162 * This occurs while registering the Port_ID / MAC address after FLOGI.
1163 */
1164 spin_lock_irqsave(&fnic->fnic_lock, flags);
1165 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1166 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1167 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1168 return 0;
1169 }
1170 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1171
1172 return fnic_send_frame(fnic, fp);
1173 }
1174
1175 /**
1176 * fnic_flush_tx() - send queued frames.
1177 * @fnic: fnic device
1178 *
1179 * Send frames that were waiting to go out in FC or Ethernet mode.
1180 * Whenever changing modes we purge queued frames, so these frames should
1181 * be queued for the stable mode that we're in, either FC or Ethernet.
1182 *
1183 * Called without fnic_lock held.
1184 */
fnic_flush_tx(struct fnic * fnic)1185 void fnic_flush_tx(struct fnic *fnic)
1186 {
1187 struct sk_buff *skb;
1188 struct fc_frame *fp;
1189
1190 while ((skb = skb_dequeue(&fnic->tx_queue))) {
1191 fp = (struct fc_frame *)skb;
1192 fnic_send_frame(fnic, fp);
1193 }
1194 }
1195
1196 /**
1197 * fnic_set_eth_mode() - put fnic into ethernet mode.
1198 * @fnic: fnic device
1199 *
1200 * Called without fnic lock held.
1201 */
fnic_set_eth_mode(struct fnic * fnic)1202 static void fnic_set_eth_mode(struct fnic *fnic)
1203 {
1204 unsigned long flags;
1205 enum fnic_state old_state;
1206 int ret;
1207
1208 spin_lock_irqsave(&fnic->fnic_lock, flags);
1209 again:
1210 old_state = fnic->state;
1211 switch (old_state) {
1212 case FNIC_IN_FC_MODE:
1213 case FNIC_IN_ETH_TRANS_FC_MODE:
1214 default:
1215 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1216 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1217
1218 ret = fnic_fw_reset_handler(fnic);
1219
1220 spin_lock_irqsave(&fnic->fnic_lock, flags);
1221 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1222 goto again;
1223 if (ret)
1224 fnic->state = old_state;
1225 break;
1226
1227 case FNIC_IN_FC_TRANS_ETH_MODE:
1228 case FNIC_IN_ETH_MODE:
1229 break;
1230 }
1231 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1232 }
1233
fnic_wq_complete_frame_send(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)1234 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1235 struct cq_desc *cq_desc,
1236 struct vnic_wq_buf *buf, void *opaque)
1237 {
1238 struct sk_buff *skb = buf->os_buf;
1239 struct fc_frame *fp = (struct fc_frame *)skb;
1240 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1241
1242 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1243 DMA_TO_DEVICE);
1244 dev_kfree_skb_irq(fp_skb(fp));
1245 buf->os_buf = NULL;
1246 }
1247
fnic_wq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)1248 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1249 struct cq_desc *cq_desc, u8 type,
1250 u16 q_number, u16 completed_index,
1251 void *opaque)
1252 {
1253 struct fnic *fnic = vnic_dev_priv(vdev);
1254 unsigned long flags;
1255
1256 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1257 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1258 fnic_wq_complete_frame_send, NULL);
1259 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1260
1261 return 0;
1262 }
1263
fnic_wq_cmpl_handler(struct fnic * fnic,int work_to_do)1264 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1265 {
1266 unsigned int wq_work_done = 0;
1267 unsigned int i;
1268
1269 for (i = 0; i < fnic->raw_wq_count; i++) {
1270 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1271 work_to_do,
1272 fnic_wq_cmpl_handler_cont,
1273 NULL);
1274 }
1275
1276 return wq_work_done;
1277 }
1278
1279
fnic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)1280 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1281 {
1282 struct fc_frame *fp = buf->os_buf;
1283 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1284
1285 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1286 DMA_TO_DEVICE);
1287
1288 dev_kfree_skb(fp_skb(fp));
1289 buf->os_buf = NULL;
1290 }
1291
fnic_fcoe_reset_vlans(struct fnic * fnic)1292 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1293 {
1294 unsigned long flags;
1295 struct fcoe_vlan *vlan;
1296 struct fcoe_vlan *next;
1297
1298 /*
1299 * indicate a link down to fcoe so that all fcf's are free'd
1300 * might not be required since we did this before sending vlan
1301 * discovery request
1302 */
1303 spin_lock_irqsave(&fnic->vlans_lock, flags);
1304 if (!list_empty(&fnic->vlans)) {
1305 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1306 list_del(&vlan->list);
1307 kfree(vlan);
1308 }
1309 }
1310 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1311 }
1312
fnic_handle_fip_timer(struct fnic * fnic)1313 void fnic_handle_fip_timer(struct fnic *fnic)
1314 {
1315 unsigned long flags;
1316 struct fcoe_vlan *vlan;
1317 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1318 u64 sol_time;
1319
1320 spin_lock_irqsave(&fnic->fnic_lock, flags);
1321 if (fnic->stop_rx_link_events) {
1322 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1323 return;
1324 }
1325 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1326
1327 if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1328 return;
1329
1330 spin_lock_irqsave(&fnic->vlans_lock, flags);
1331 if (list_empty(&fnic->vlans)) {
1332 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1333 /* no vlans available, try again */
1334 if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
1335 if (printk_ratelimit())
1336 shost_printk(KERN_DEBUG, fnic->lport->host,
1337 "Start VLAN Discovery\n");
1338 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1339 return;
1340 }
1341
1342 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1343 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1344 "fip_timer: vlan %d state %d sol_count %d\n",
1345 vlan->vid, vlan->state, vlan->sol_count);
1346 switch (vlan->state) {
1347 case FIP_VLAN_USED:
1348 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1349 "FIP VLAN is selected for FC transaction\n");
1350 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1351 break;
1352 case FIP_VLAN_FAILED:
1353 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1354 /* if all vlans are in failed state, restart vlan disc */
1355 if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
1356 if (printk_ratelimit())
1357 shost_printk(KERN_DEBUG, fnic->lport->host,
1358 "Start VLAN Discovery\n");
1359 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1360 break;
1361 case FIP_VLAN_SENT:
1362 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1363 /*
1364 * no response on this vlan, remove from the list.
1365 * Try the next vlan
1366 */
1367 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1368 "Dequeue this VLAN ID %d from list\n",
1369 vlan->vid);
1370 list_del(&vlan->list);
1371 kfree(vlan);
1372 vlan = NULL;
1373 if (list_empty(&fnic->vlans)) {
1374 /* we exhausted all vlans, restart vlan disc */
1375 spin_unlock_irqrestore(&fnic->vlans_lock,
1376 flags);
1377 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1378 "fip_timer: vlan list empty, "
1379 "trigger vlan disc\n");
1380 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1381 return;
1382 }
1383 /* check the next vlan */
1384 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1385 list);
1386 fnic->set_vlan(fnic, vlan->vid);
1387 vlan->state = FIP_VLAN_SENT; /* sent now */
1388 }
1389 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1390 atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1391 vlan->sol_count++;
1392 sol_time = jiffies + msecs_to_jiffies
1393 (FCOE_CTLR_START_DELAY);
1394 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1395 break;
1396 }
1397 }
1398