1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2007, 2009
4  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5  *		 Frank Pavlic <fpavlic@de.ibm.com>,
6  *		 Thomas Spatzier <tspat@de.ibm.com>,
7  *		 Frank Blaschka <frank.blaschka@de.ibm.com>
8  */
9 
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/mm.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/rcutree.h>
30 #include <linux/skbuff.h>
31 #include <linux/vmalloc.h>
32 
33 #include <net/iucv/af_iucv.h>
34 #include <net/dsfield.h>
35 
36 #include <asm/ebcdic.h>
37 #include <asm/chpid.h>
38 #include <asm/io.h>
39 #include <asm/sysinfo.h>
40 #include <asm/diag.h>
41 #include <asm/cio.h>
42 #include <asm/ccwdev.h>
43 #include <asm/cpcmd.h>
44 
45 #include "qeth_core.h"
46 
47 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
48 	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49 	/*                   N  P  A    M  L  V                      H  */
50 	[QETH_DBF_SETUP] = {"qeth_setup",
51 				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
52 	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
53 			    &debug_sprintf_view, NULL},
54 	[QETH_DBF_CTRL]  = {"qeth_control",
55 		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
56 };
57 EXPORT_SYMBOL_GPL(qeth_dbf);
58 
59 struct kmem_cache *qeth_core_header_cache;
60 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
61 static struct kmem_cache *qeth_qdio_outbuf_cache;
62 
63 static struct device *qeth_core_root_dev;
64 static struct dentry *qeth_debugfs_root;
65 static struct lock_class_key qdio_out_skb_queue_key;
66 
67 static void qeth_issue_next_read_cb(struct qeth_card *card,
68 				    struct qeth_cmd_buffer *iob,
69 				    unsigned int data_length);
70 static int qeth_qdio_establish(struct qeth_card *);
71 static void qeth_free_qdio_queues(struct qeth_card *card);
72 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 		struct qeth_qdio_out_buffer *buf,
74 		enum iucv_tx_notify notification);
75 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
76 				 int budget);
77 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
78 
79 static void qeth_close_dev_handler(struct work_struct *work)
80 {
81 	struct qeth_card *card;
82 
83 	card = container_of(work, struct qeth_card, close_dev_work);
84 	QETH_CARD_TEXT(card, 2, "cldevhdl");
85 	ccwgroup_set_offline(card->gdev);
86 }
87 
88 static const char *qeth_get_cardname(struct qeth_card *card)
89 {
90 	if (IS_VM_NIC(card)) {
91 		switch (card->info.type) {
92 		case QETH_CARD_TYPE_OSD:
93 			return " Virtual NIC QDIO";
94 		case QETH_CARD_TYPE_IQD:
95 			return " Virtual NIC Hiper";
96 		case QETH_CARD_TYPE_OSM:
97 			return " Virtual NIC QDIO - OSM";
98 		case QETH_CARD_TYPE_OSX:
99 			return " Virtual NIC QDIO - OSX";
100 		default:
101 			return " unknown";
102 		}
103 	} else {
104 		switch (card->info.type) {
105 		case QETH_CARD_TYPE_OSD:
106 			return " OSD Express";
107 		case QETH_CARD_TYPE_IQD:
108 			return " HiperSockets";
109 		case QETH_CARD_TYPE_OSN:
110 			return " OSN QDIO";
111 		case QETH_CARD_TYPE_OSM:
112 			return " OSM QDIO";
113 		case QETH_CARD_TYPE_OSX:
114 			return " OSX QDIO";
115 		default:
116 			return " unknown";
117 		}
118 	}
119 	return " n/a";
120 }
121 
122 /* max length to be returned: 14 */
123 const char *qeth_get_cardname_short(struct qeth_card *card)
124 {
125 	if (IS_VM_NIC(card)) {
126 		switch (card->info.type) {
127 		case QETH_CARD_TYPE_OSD:
128 			return "Virt.NIC QDIO";
129 		case QETH_CARD_TYPE_IQD:
130 			return "Virt.NIC Hiper";
131 		case QETH_CARD_TYPE_OSM:
132 			return "Virt.NIC OSM";
133 		case QETH_CARD_TYPE_OSX:
134 			return "Virt.NIC OSX";
135 		default:
136 			return "unknown";
137 		}
138 	} else {
139 		switch (card->info.type) {
140 		case QETH_CARD_TYPE_OSD:
141 			switch (card->info.link_type) {
142 			case QETH_LINK_TYPE_FAST_ETH:
143 				return "OSD_100";
144 			case QETH_LINK_TYPE_HSTR:
145 				return "HSTR";
146 			case QETH_LINK_TYPE_GBIT_ETH:
147 				return "OSD_1000";
148 			case QETH_LINK_TYPE_10GBIT_ETH:
149 				return "OSD_10GIG";
150 			case QETH_LINK_TYPE_25GBIT_ETH:
151 				return "OSD_25GIG";
152 			case QETH_LINK_TYPE_LANE_ETH100:
153 				return "OSD_FE_LANE";
154 			case QETH_LINK_TYPE_LANE_TR:
155 				return "OSD_TR_LANE";
156 			case QETH_LINK_TYPE_LANE_ETH1000:
157 				return "OSD_GbE_LANE";
158 			case QETH_LINK_TYPE_LANE:
159 				return "OSD_ATM_LANE";
160 			default:
161 				return "OSD_Express";
162 			}
163 		case QETH_CARD_TYPE_IQD:
164 			return "HiperSockets";
165 		case QETH_CARD_TYPE_OSN:
166 			return "OSN";
167 		case QETH_CARD_TYPE_OSM:
168 			return "OSM_1000";
169 		case QETH_CARD_TYPE_OSX:
170 			return "OSX_10GIG";
171 		default:
172 			return "unknown";
173 		}
174 	}
175 	return "n/a";
176 }
177 
178 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
179 			 int clear_start_mask)
180 {
181 	unsigned long flags;
182 
183 	spin_lock_irqsave(&card->thread_mask_lock, flags);
184 	card->thread_allowed_mask = threads;
185 	if (clear_start_mask)
186 		card->thread_start_mask &= threads;
187 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
188 	wake_up(&card->wait_q);
189 }
190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
191 
192 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
193 {
194 	unsigned long flags;
195 	int rc = 0;
196 
197 	spin_lock_irqsave(&card->thread_mask_lock, flags);
198 	rc = (card->thread_running_mask & threads);
199 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
200 	return rc;
201 }
202 EXPORT_SYMBOL_GPL(qeth_threads_running);
203 
204 static void qeth_clear_working_pool_list(struct qeth_card *card)
205 {
206 	struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 	struct qeth_qdio_q *queue = card->qdio.in_q;
208 	unsigned int i;
209 
210 	QETH_CARD_TEXT(card, 5, "clwrklst");
211 	list_for_each_entry_safe(pool_entry, tmp,
212 			    &card->qdio.in_buf_pool.entry_list, list){
213 			list_del(&pool_entry->list);
214 	}
215 
216 	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
217 		queue->bufs[i].pool_entry = NULL;
218 }
219 
220 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
221 {
222 	unsigned int i;
223 
224 	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
225 		if (entry->elements[i])
226 			__free_page(entry->elements[i]);
227 	}
228 
229 	kfree(entry);
230 }
231 
232 static void qeth_free_buffer_pool(struct qeth_card *card)
233 {
234 	struct qeth_buffer_pool_entry *entry, *tmp;
235 
236 	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
237 				 init_list) {
238 		list_del(&entry->init_list);
239 		qeth_free_pool_entry(entry);
240 	}
241 }
242 
243 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
244 {
245 	struct qeth_buffer_pool_entry *entry;
246 	unsigned int i;
247 
248 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
249 	if (!entry)
250 		return NULL;
251 
252 	for (i = 0; i < pages; i++) {
253 		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
254 
255 		if (!entry->elements[i]) {
256 			qeth_free_pool_entry(entry);
257 			return NULL;
258 		}
259 	}
260 
261 	return entry;
262 }
263 
264 static int qeth_alloc_buffer_pool(struct qeth_card *card)
265 {
266 	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
267 	unsigned int i;
268 
269 	QETH_CARD_TEXT(card, 5, "alocpool");
270 	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
271 		struct qeth_buffer_pool_entry *entry;
272 
273 		entry = qeth_alloc_pool_entry(buf_elements);
274 		if (!entry) {
275 			qeth_free_buffer_pool(card);
276 			return -ENOMEM;
277 		}
278 
279 		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
280 	}
281 	return 0;
282 }
283 
284 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
285 {
286 	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
287 	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
288 	struct qeth_buffer_pool_entry *entry, *tmp;
289 	int delta = count - pool->buf_count;
290 	LIST_HEAD(entries);
291 
292 	QETH_CARD_TEXT(card, 2, "realcbp");
293 
294 	/* Defer until queue is allocated: */
295 	if (!card->qdio.in_q)
296 		goto out;
297 
298 	/* Remove entries from the pool: */
299 	while (delta < 0) {
300 		entry = list_first_entry(&pool->entry_list,
301 					 struct qeth_buffer_pool_entry,
302 					 init_list);
303 		list_del(&entry->init_list);
304 		qeth_free_pool_entry(entry);
305 
306 		delta++;
307 	}
308 
309 	/* Allocate additional entries: */
310 	while (delta > 0) {
311 		entry = qeth_alloc_pool_entry(buf_elements);
312 		if (!entry) {
313 			list_for_each_entry_safe(entry, tmp, &entries,
314 						 init_list) {
315 				list_del(&entry->init_list);
316 				qeth_free_pool_entry(entry);
317 			}
318 
319 			return -ENOMEM;
320 		}
321 
322 		list_add(&entry->init_list, &entries);
323 
324 		delta--;
325 	}
326 
327 	list_splice(&entries, &pool->entry_list);
328 
329 out:
330 	card->qdio.in_buf_pool.buf_count = count;
331 	pool->buf_count = count;
332 	return 0;
333 }
334 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
335 
336 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
337 {
338 	if (!q)
339 		return;
340 
341 	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
342 	kfree(q);
343 }
344 
345 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
346 {
347 	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
348 	int i;
349 
350 	if (!q)
351 		return NULL;
352 
353 	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
354 		kfree(q);
355 		return NULL;
356 	}
357 
358 	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
359 		q->bufs[i].buffer = q->qdio_bufs[i];
360 
361 	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
362 	return q;
363 }
364 
365 static int qeth_cq_init(struct qeth_card *card)
366 {
367 	int rc;
368 
369 	if (card->options.cq == QETH_CQ_ENABLED) {
370 		QETH_CARD_TEXT(card, 2, "cqinit");
371 		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
372 				   QDIO_MAX_BUFFERS_PER_Q);
373 		card->qdio.c_q->next_buf_to_init = 127;
374 		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
375 			     card->qdio.no_in_queues - 1, 0,
376 			     127);
377 		if (rc) {
378 			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
379 			goto out;
380 		}
381 	}
382 	rc = 0;
383 out:
384 	return rc;
385 }
386 
387 static int qeth_alloc_cq(struct qeth_card *card)
388 {
389 	int rc;
390 
391 	if (card->options.cq == QETH_CQ_ENABLED) {
392 		int i;
393 		struct qdio_outbuf_state *outbuf_states;
394 
395 		QETH_CARD_TEXT(card, 2, "cqon");
396 		card->qdio.c_q = qeth_alloc_qdio_queue();
397 		if (!card->qdio.c_q) {
398 			rc = -1;
399 			goto kmsg_out;
400 		}
401 		card->qdio.no_in_queues = 2;
402 		card->qdio.out_bufstates =
403 			kcalloc(card->qdio.no_out_queues *
404 					QDIO_MAX_BUFFERS_PER_Q,
405 				sizeof(struct qdio_outbuf_state),
406 				GFP_KERNEL);
407 		outbuf_states = card->qdio.out_bufstates;
408 		if (outbuf_states == NULL) {
409 			rc = -1;
410 			goto free_cq_out;
411 		}
412 		for (i = 0; i < card->qdio.no_out_queues; ++i) {
413 			card->qdio.out_qs[i]->bufstates = outbuf_states;
414 			outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
415 		}
416 	} else {
417 		QETH_CARD_TEXT(card, 2, "nocq");
418 		card->qdio.c_q = NULL;
419 		card->qdio.no_in_queues = 1;
420 	}
421 	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
422 	rc = 0;
423 out:
424 	return rc;
425 free_cq_out:
426 	qeth_free_qdio_queue(card->qdio.c_q);
427 	card->qdio.c_q = NULL;
428 kmsg_out:
429 	dev_err(&card->gdev->dev, "Failed to create completion queue\n");
430 	goto out;
431 }
432 
433 static void qeth_free_cq(struct qeth_card *card)
434 {
435 	if (card->qdio.c_q) {
436 		--card->qdio.no_in_queues;
437 		qeth_free_qdio_queue(card->qdio.c_q);
438 		card->qdio.c_q = NULL;
439 	}
440 	kfree(card->qdio.out_bufstates);
441 	card->qdio.out_bufstates = NULL;
442 }
443 
444 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
445 							int delayed)
446 {
447 	enum iucv_tx_notify n;
448 
449 	switch (sbalf15) {
450 	case 0:
451 		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
452 		break;
453 	case 4:
454 	case 16:
455 	case 17:
456 	case 18:
457 		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
458 			TX_NOTIFY_UNREACHABLE;
459 		break;
460 	default:
461 		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
462 			TX_NOTIFY_GENERALERROR;
463 		break;
464 	}
465 
466 	return n;
467 }
468 
469 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
470 					 int forced_cleanup)
471 {
472 	if (q->card->options.cq != QETH_CQ_ENABLED)
473 		return;
474 
475 	if (q->bufs[bidx]->next_pending != NULL) {
476 		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
477 		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
478 
479 		while (c) {
480 			if (forced_cleanup ||
481 			    atomic_read(&c->state) ==
482 			      QETH_QDIO_BUF_HANDLED_DELAYED) {
483 				struct qeth_qdio_out_buffer *f = c;
484 				QETH_CARD_TEXT(f->q->card, 5, "fp");
485 				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
486 				/* release here to avoid interleaving between
487 				   outbound tasklet and inbound tasklet
488 				   regarding notifications and lifecycle */
489 				qeth_tx_complete_buf(c, forced_cleanup, 0);
490 
491 				c = f->next_pending;
492 				WARN_ON_ONCE(head->next_pending != f);
493 				head->next_pending = c;
494 				kmem_cache_free(qeth_qdio_outbuf_cache, f);
495 			} else {
496 				head = c;
497 				c = c->next_pending;
498 			}
499 
500 		}
501 	}
502 	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
503 					QETH_QDIO_BUF_HANDLED_DELAYED)) {
504 		/* for recovery situations */
505 		qeth_init_qdio_out_buf(q, bidx);
506 		QETH_CARD_TEXT(q->card, 2, "clprecov");
507 	}
508 }
509 
510 
511 static void qeth_qdio_handle_aob(struct qeth_card *card,
512 				 unsigned long phys_aob_addr)
513 {
514 	struct qaob *aob;
515 	struct qeth_qdio_out_buffer *buffer;
516 	enum iucv_tx_notify notification;
517 	unsigned int i;
518 
519 	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
520 	QETH_CARD_TEXT(card, 5, "haob");
521 	QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
522 	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
523 	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
524 
525 	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
526 			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
527 		notification = TX_NOTIFY_OK;
528 	} else {
529 		WARN_ON_ONCE(atomic_read(&buffer->state) !=
530 							QETH_QDIO_BUF_PENDING);
531 		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
532 		notification = TX_NOTIFY_DELAYED_OK;
533 	}
534 
535 	if (aob->aorc != 0)  {
536 		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
537 		notification = qeth_compute_cq_notification(aob->aorc, 1);
538 	}
539 	qeth_notify_skbs(buffer->q, buffer, notification);
540 
541 	/* Free dangling allocations. The attached skbs are handled by
542 	 * qeth_cleanup_handled_pending().
543 	 */
544 	for (i = 0;
545 	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
546 	     i++) {
547 		void *data = phys_to_virt(aob->sba[i]);
548 
549 		if (data && buffer->is_header[i])
550 			kmem_cache_free(qeth_core_header_cache, data);
551 	}
552 	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
553 
554 	qdio_release_aob(aob);
555 }
556 
557 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
558 			   void *data)
559 {
560 	ccw->cmd_code = cmd_code;
561 	ccw->flags = flags | CCW_FLAG_SLI;
562 	ccw->count = len;
563 	ccw->cda = (__u32) __pa(data);
564 }
565 
566 static int __qeth_issue_next_read(struct qeth_card *card)
567 {
568 	struct qeth_cmd_buffer *iob = card->read_cmd;
569 	struct qeth_channel *channel = iob->channel;
570 	struct ccw1 *ccw = __ccw_from_cmd(iob);
571 	int rc;
572 
573 	QETH_CARD_TEXT(card, 5, "issnxrd");
574 	if (channel->state != CH_STATE_UP)
575 		return -EIO;
576 
577 	memset(iob->data, 0, iob->length);
578 	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
579 	iob->callback = qeth_issue_next_read_cb;
580 	/* keep the cmd alive after completion: */
581 	qeth_get_cmd(iob);
582 
583 	QETH_CARD_TEXT(card, 6, "noirqpnd");
584 	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
585 	if (!rc) {
586 		channel->active_cmd = iob;
587 	} else {
588 		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
589 				 rc, CARD_DEVID(card));
590 		qeth_unlock_channel(card, channel);
591 		qeth_put_cmd(iob);
592 		card->read_or_write_problem = 1;
593 		qeth_schedule_recovery(card);
594 	}
595 	return rc;
596 }
597 
598 static int qeth_issue_next_read(struct qeth_card *card)
599 {
600 	int ret;
601 
602 	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
603 	ret = __qeth_issue_next_read(card);
604 	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
605 
606 	return ret;
607 }
608 
609 static void qeth_enqueue_cmd(struct qeth_card *card,
610 			     struct qeth_cmd_buffer *iob)
611 {
612 	spin_lock_irq(&card->lock);
613 	list_add_tail(&iob->list, &card->cmd_waiter_list);
614 	spin_unlock_irq(&card->lock);
615 }
616 
617 static void qeth_dequeue_cmd(struct qeth_card *card,
618 			     struct qeth_cmd_buffer *iob)
619 {
620 	spin_lock_irq(&card->lock);
621 	list_del(&iob->list);
622 	spin_unlock_irq(&card->lock);
623 }
624 
625 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
626 {
627 	iob->rc = reason;
628 	complete(&iob->done);
629 }
630 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
631 
632 static void qeth_flush_local_addrs4(struct qeth_card *card)
633 {
634 	struct qeth_local_addr *addr;
635 	struct hlist_node *tmp;
636 	unsigned int i;
637 
638 	spin_lock_irq(&card->local_addrs4_lock);
639 	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
640 		hash_del_rcu(&addr->hnode);
641 		kfree_rcu(addr, rcu);
642 	}
643 	spin_unlock_irq(&card->local_addrs4_lock);
644 }
645 
646 static void qeth_flush_local_addrs6(struct qeth_card *card)
647 {
648 	struct qeth_local_addr *addr;
649 	struct hlist_node *tmp;
650 	unsigned int i;
651 
652 	spin_lock_irq(&card->local_addrs6_lock);
653 	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
654 		hash_del_rcu(&addr->hnode);
655 		kfree_rcu(addr, rcu);
656 	}
657 	spin_unlock_irq(&card->local_addrs6_lock);
658 }
659 
660 static void qeth_flush_local_addrs(struct qeth_card *card)
661 {
662 	qeth_flush_local_addrs4(card);
663 	qeth_flush_local_addrs6(card);
664 }
665 
666 static void qeth_add_local_addrs4(struct qeth_card *card,
667 				  struct qeth_ipacmd_local_addrs4 *cmd)
668 {
669 	unsigned int i;
670 
671 	if (cmd->addr_length !=
672 	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
673 		dev_err_ratelimited(&card->gdev->dev,
674 				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
675 				    cmd->addr_length);
676 		return;
677 	}
678 
679 	spin_lock(&card->local_addrs4_lock);
680 	for (i = 0; i < cmd->count; i++) {
681 		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
682 		struct qeth_local_addr *addr;
683 		bool duplicate = false;
684 
685 		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
686 			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
687 				duplicate = true;
688 				break;
689 			}
690 		}
691 
692 		if (duplicate)
693 			continue;
694 
695 		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
696 		if (!addr) {
697 			dev_err(&card->gdev->dev,
698 				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
699 				&cmd->addrs[i].addr);
700 			continue;
701 		}
702 
703 		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
704 		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
705 	}
706 	spin_unlock(&card->local_addrs4_lock);
707 }
708 
709 static void qeth_add_local_addrs6(struct qeth_card *card,
710 				  struct qeth_ipacmd_local_addrs6 *cmd)
711 {
712 	unsigned int i;
713 
714 	if (cmd->addr_length !=
715 	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
716 		dev_err_ratelimited(&card->gdev->dev,
717 				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
718 				    cmd->addr_length);
719 		return;
720 	}
721 
722 	spin_lock(&card->local_addrs6_lock);
723 	for (i = 0; i < cmd->count; i++) {
724 		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
725 		struct qeth_local_addr *addr;
726 		bool duplicate = false;
727 
728 		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
729 			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
730 				duplicate = true;
731 				break;
732 			}
733 		}
734 
735 		if (duplicate)
736 			continue;
737 
738 		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
739 		if (!addr) {
740 			dev_err(&card->gdev->dev,
741 				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
742 				&cmd->addrs[i].addr);
743 			continue;
744 		}
745 
746 		addr->addr = cmd->addrs[i].addr;
747 		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
748 	}
749 	spin_unlock(&card->local_addrs6_lock);
750 }
751 
752 static void qeth_del_local_addrs4(struct qeth_card *card,
753 				  struct qeth_ipacmd_local_addrs4 *cmd)
754 {
755 	unsigned int i;
756 
757 	if (cmd->addr_length !=
758 	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
759 		dev_err_ratelimited(&card->gdev->dev,
760 				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
761 				    cmd->addr_length);
762 		return;
763 	}
764 
765 	spin_lock(&card->local_addrs4_lock);
766 	for (i = 0; i < cmd->count; i++) {
767 		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
768 		unsigned int key = ipv4_addr_hash(addr->addr);
769 		struct qeth_local_addr *tmp;
770 
771 		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
772 			if (tmp->addr.s6_addr32[3] == addr->addr) {
773 				hash_del_rcu(&tmp->hnode);
774 				kfree_rcu(tmp, rcu);
775 				break;
776 			}
777 		}
778 	}
779 	spin_unlock(&card->local_addrs4_lock);
780 }
781 
782 static void qeth_del_local_addrs6(struct qeth_card *card,
783 				  struct qeth_ipacmd_local_addrs6 *cmd)
784 {
785 	unsigned int i;
786 
787 	if (cmd->addr_length !=
788 	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
789 		dev_err_ratelimited(&card->gdev->dev,
790 				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
791 				    cmd->addr_length);
792 		return;
793 	}
794 
795 	spin_lock(&card->local_addrs6_lock);
796 	for (i = 0; i < cmd->count; i++) {
797 		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
798 		u32 key = ipv6_addr_hash(&addr->addr);
799 		struct qeth_local_addr *tmp;
800 
801 		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
802 			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
803 				hash_del_rcu(&tmp->hnode);
804 				kfree_rcu(tmp, rcu);
805 				break;
806 			}
807 		}
808 	}
809 	spin_unlock(&card->local_addrs6_lock);
810 }
811 
812 static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
813 				      struct sk_buff *skb)
814 {
815 	struct qeth_local_addr *tmp;
816 	bool is_local = false;
817 	unsigned int key;
818 	__be32 next_hop;
819 
820 	if (hash_empty(card->local_addrs4))
821 		return false;
822 
823 	rcu_read_lock();
824 	next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
825 	key = ipv4_addr_hash(next_hop);
826 
827 	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
828 		if (tmp->addr.s6_addr32[3] == next_hop) {
829 			is_local = true;
830 			break;
831 		}
832 	}
833 	rcu_read_unlock();
834 
835 	return is_local;
836 }
837 
838 static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
839 				      struct sk_buff *skb)
840 {
841 	struct qeth_local_addr *tmp;
842 	struct in6_addr *next_hop;
843 	bool is_local = false;
844 	u32 key;
845 
846 	if (hash_empty(card->local_addrs6))
847 		return false;
848 
849 	rcu_read_lock();
850 	next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
851 	key = ipv6_addr_hash(next_hop);
852 
853 	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
854 		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
855 			is_local = true;
856 			break;
857 		}
858 	}
859 	rcu_read_unlock();
860 
861 	return is_local;
862 }
863 
864 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
865 {
866 	struct qeth_card *card = m->private;
867 	struct qeth_local_addr *tmp;
868 	unsigned int i;
869 
870 	rcu_read_lock();
871 	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
872 		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
873 	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
874 		seq_printf(m, "%pI6c\n", &tmp->addr);
875 	rcu_read_unlock();
876 
877 	return 0;
878 }
879 
880 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
881 
882 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
883 		struct qeth_card *card)
884 {
885 	const char *ipa_name;
886 	int com = cmd->hdr.command;
887 	ipa_name = qeth_get_ipa_cmd_name(com);
888 
889 	if (rc)
890 		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
891 				 ipa_name, com, CARD_DEVID(card), rc,
892 				 qeth_get_ipa_msg(rc));
893 	else
894 		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
895 				 ipa_name, com, CARD_DEVID(card));
896 }
897 
898 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
899 						struct qeth_ipa_cmd *cmd)
900 {
901 	QETH_CARD_TEXT(card, 5, "chkipad");
902 
903 	if (IS_IPA_REPLY(cmd)) {
904 		if (cmd->hdr.command != IPA_CMD_SETCCID &&
905 		    cmd->hdr.command != IPA_CMD_DELCCID &&
906 		    cmd->hdr.command != IPA_CMD_MODCCID &&
907 		    cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
908 			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
909 		return cmd;
910 	}
911 
912 	/* handle unsolicited event: */
913 	switch (cmd->hdr.command) {
914 	case IPA_CMD_STOPLAN:
915 		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
916 			dev_err(&card->gdev->dev,
917 				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
918 				QETH_CARD_IFNAME(card));
919 			schedule_work(&card->close_dev_work);
920 		} else {
921 			dev_warn(&card->gdev->dev,
922 				 "The link for interface %s on CHPID 0x%X failed\n",
923 				 QETH_CARD_IFNAME(card), card->info.chpid);
924 			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
925 			netif_carrier_off(card->dev);
926 		}
927 		return NULL;
928 	case IPA_CMD_STARTLAN:
929 		dev_info(&card->gdev->dev,
930 			 "The link for %s on CHPID 0x%X has been restored\n",
931 			 QETH_CARD_IFNAME(card), card->info.chpid);
932 		if (card->info.hwtrap)
933 			card->info.hwtrap = 2;
934 		qeth_schedule_recovery(card);
935 		return NULL;
936 	case IPA_CMD_SETBRIDGEPORT_IQD:
937 	case IPA_CMD_SETBRIDGEPORT_OSA:
938 	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
939 		if (card->discipline->control_event_handler(card, cmd))
940 			return cmd;
941 		return NULL;
942 	case IPA_CMD_MODCCID:
943 		return cmd;
944 	case IPA_CMD_REGISTER_LOCAL_ADDR:
945 		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
946 			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
947 		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
948 			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
949 
950 		QETH_CARD_TEXT(card, 3, "irla");
951 		return NULL;
952 	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
953 		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
954 			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
955 		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
956 			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
957 
958 		QETH_CARD_TEXT(card, 3, "urla");
959 		return NULL;
960 	default:
961 		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
962 		return cmd;
963 	}
964 }
965 
966 static void qeth_clear_ipacmd_list(struct qeth_card *card)
967 {
968 	struct qeth_cmd_buffer *iob;
969 	unsigned long flags;
970 
971 	QETH_CARD_TEXT(card, 4, "clipalst");
972 
973 	spin_lock_irqsave(&card->lock, flags);
974 	list_for_each_entry(iob, &card->cmd_waiter_list, list)
975 		qeth_notify_cmd(iob, -ECANCELED);
976 	spin_unlock_irqrestore(&card->lock, flags);
977 }
978 
979 static int qeth_check_idx_response(struct qeth_card *card,
980 	unsigned char *buffer)
981 {
982 	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
983 	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
984 		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
985 				 buffer[4]);
986 		QETH_CARD_TEXT(card, 2, "ckidxres");
987 		QETH_CARD_TEXT(card, 2, " idxterm");
988 		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
989 		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
990 		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
991 			dev_err(&card->gdev->dev,
992 				"The device does not support the configured transport mode\n");
993 			return -EPROTONOSUPPORT;
994 		}
995 		return -EIO;
996 	}
997 	return 0;
998 }
999 
1000 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
1001 {
1002 	if (refcount_dec_and_test(&iob->ref_count)) {
1003 		kfree(iob->data);
1004 		kfree(iob);
1005 	}
1006 }
1007 EXPORT_SYMBOL_GPL(qeth_put_cmd);
1008 
1009 static void qeth_release_buffer_cb(struct qeth_card *card,
1010 				   struct qeth_cmd_buffer *iob,
1011 				   unsigned int data_length)
1012 {
1013 	qeth_put_cmd(iob);
1014 }
1015 
1016 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
1017 {
1018 	qeth_notify_cmd(iob, rc);
1019 	qeth_put_cmd(iob);
1020 }
1021 
1022 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
1023 				       unsigned int length, unsigned int ccws,
1024 				       long timeout)
1025 {
1026 	struct qeth_cmd_buffer *iob;
1027 
1028 	if (length > QETH_BUFSIZE)
1029 		return NULL;
1030 
1031 	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1032 	if (!iob)
1033 		return NULL;
1034 
1035 	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1036 			    GFP_KERNEL | GFP_DMA);
1037 	if (!iob->data) {
1038 		kfree(iob);
1039 		return NULL;
1040 	}
1041 
1042 	init_completion(&iob->done);
1043 	spin_lock_init(&iob->lock);
1044 	INIT_LIST_HEAD(&iob->list);
1045 	refcount_set(&iob->ref_count, 1);
1046 	iob->channel = channel;
1047 	iob->timeout = timeout;
1048 	iob->length = length;
1049 	return iob;
1050 }
1051 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1052 
1053 static void qeth_issue_next_read_cb(struct qeth_card *card,
1054 				    struct qeth_cmd_buffer *iob,
1055 				    unsigned int data_length)
1056 {
1057 	struct qeth_cmd_buffer *request = NULL;
1058 	struct qeth_ipa_cmd *cmd = NULL;
1059 	struct qeth_reply *reply = NULL;
1060 	struct qeth_cmd_buffer *tmp;
1061 	unsigned long flags;
1062 	int rc = 0;
1063 
1064 	QETH_CARD_TEXT(card, 4, "sndctlcb");
1065 	rc = qeth_check_idx_response(card, iob->data);
1066 	switch (rc) {
1067 	case 0:
1068 		break;
1069 	case -EIO:
1070 		qeth_schedule_recovery(card);
1071 		fallthrough;
1072 	default:
1073 		qeth_clear_ipacmd_list(card);
1074 		goto err_idx;
1075 	}
1076 
1077 	cmd = __ipa_reply(iob);
1078 	if (cmd) {
1079 		cmd = qeth_check_ipa_data(card, cmd);
1080 		if (!cmd)
1081 			goto out;
1082 		if (IS_OSN(card) && card->osn_info.assist_cb &&
1083 		    cmd->hdr.command != IPA_CMD_STARTLAN) {
1084 			card->osn_info.assist_cb(card->dev, cmd);
1085 			goto out;
1086 		}
1087 	}
1088 
1089 	/* match against pending cmd requests */
1090 	spin_lock_irqsave(&card->lock, flags);
1091 	list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1092 		if (tmp->match && tmp->match(tmp, iob)) {
1093 			request = tmp;
1094 			/* take the object outside the lock */
1095 			qeth_get_cmd(request);
1096 			break;
1097 		}
1098 	}
1099 	spin_unlock_irqrestore(&card->lock, flags);
1100 
1101 	if (!request)
1102 		goto out;
1103 
1104 	reply = &request->reply;
1105 	if (!reply->callback) {
1106 		rc = 0;
1107 		goto no_callback;
1108 	}
1109 
1110 	spin_lock_irqsave(&request->lock, flags);
1111 	if (request->rc)
1112 		/* Bail out when the requestor has already left: */
1113 		rc = request->rc;
1114 	else
1115 		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1116 							(unsigned long)iob);
1117 	spin_unlock_irqrestore(&request->lock, flags);
1118 
1119 no_callback:
1120 	if (rc <= 0)
1121 		qeth_notify_cmd(request, rc);
1122 	qeth_put_cmd(request);
1123 out:
1124 	memcpy(&card->seqno.pdu_hdr_ack,
1125 		QETH_PDU_HEADER_SEQ_NO(iob->data),
1126 		QETH_SEQ_NO_LENGTH);
1127 	__qeth_issue_next_read(card);
1128 err_idx:
1129 	qeth_put_cmd(iob);
1130 }
1131 
1132 static int qeth_set_thread_start_bit(struct qeth_card *card,
1133 		unsigned long thread)
1134 {
1135 	unsigned long flags;
1136 	int rc = 0;
1137 
1138 	spin_lock_irqsave(&card->thread_mask_lock, flags);
1139 	if (!(card->thread_allowed_mask & thread))
1140 		rc = -EPERM;
1141 	else if (card->thread_start_mask & thread)
1142 		rc = -EBUSY;
1143 	else
1144 		card->thread_start_mask |= thread;
1145 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1146 
1147 	return rc;
1148 }
1149 
1150 static void qeth_clear_thread_start_bit(struct qeth_card *card,
1151 					unsigned long thread)
1152 {
1153 	unsigned long flags;
1154 
1155 	spin_lock_irqsave(&card->thread_mask_lock, flags);
1156 	card->thread_start_mask &= ~thread;
1157 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1158 	wake_up(&card->wait_q);
1159 }
1160 
1161 static void qeth_clear_thread_running_bit(struct qeth_card *card,
1162 					  unsigned long thread)
1163 {
1164 	unsigned long flags;
1165 
1166 	spin_lock_irqsave(&card->thread_mask_lock, flags);
1167 	card->thread_running_mask &= ~thread;
1168 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1169 	wake_up_all(&card->wait_q);
1170 }
1171 
1172 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1173 {
1174 	unsigned long flags;
1175 	int rc = 0;
1176 
1177 	spin_lock_irqsave(&card->thread_mask_lock, flags);
1178 	if (card->thread_start_mask & thread) {
1179 		if ((card->thread_allowed_mask & thread) &&
1180 		    !(card->thread_running_mask & thread)) {
1181 			rc = 1;
1182 			card->thread_start_mask &= ~thread;
1183 			card->thread_running_mask |= thread;
1184 		} else
1185 			rc = -EPERM;
1186 	}
1187 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1188 	return rc;
1189 }
1190 
1191 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1192 {
1193 	int rc = 0;
1194 
1195 	wait_event(card->wait_q,
1196 		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
1197 	return rc;
1198 }
1199 
1200 int qeth_schedule_recovery(struct qeth_card *card)
1201 {
1202 	int rc;
1203 
1204 	QETH_CARD_TEXT(card, 2, "startrec");
1205 
1206 	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1207 	if (!rc)
1208 		schedule_work(&card->kernel_thread_starter);
1209 
1210 	return rc;
1211 }
1212 
1213 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1214 			    struct irb *irb)
1215 {
1216 	int dstat, cstat;
1217 	char *sense;
1218 
1219 	sense = (char *) irb->ecw;
1220 	cstat = irb->scsw.cmd.cstat;
1221 	dstat = irb->scsw.cmd.dstat;
1222 
1223 	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1224 		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1225 		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1226 		QETH_CARD_TEXT(card, 2, "CGENCHK");
1227 		dev_warn(&cdev->dev, "The qeth device driver "
1228 			"failed to recover an error on the device\n");
1229 		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1230 				 CCW_DEVID(cdev), dstat, cstat);
1231 		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1232 				16, 1, irb, 64, 1);
1233 		return -EIO;
1234 	}
1235 
1236 	if (dstat & DEV_STAT_UNIT_CHECK) {
1237 		if (sense[SENSE_RESETTING_EVENT_BYTE] &
1238 		    SENSE_RESETTING_EVENT_FLAG) {
1239 			QETH_CARD_TEXT(card, 2, "REVIND");
1240 			return -EIO;
1241 		}
1242 		if (sense[SENSE_COMMAND_REJECT_BYTE] &
1243 		    SENSE_COMMAND_REJECT_FLAG) {
1244 			QETH_CARD_TEXT(card, 2, "CMDREJi");
1245 			return -EIO;
1246 		}
1247 		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1248 			QETH_CARD_TEXT(card, 2, "AFFE");
1249 			return -EIO;
1250 		}
1251 		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1252 			QETH_CARD_TEXT(card, 2, "ZEROSEN");
1253 			return 0;
1254 		}
1255 		QETH_CARD_TEXT(card, 2, "DGENCHK");
1256 			return -EIO;
1257 	}
1258 	return 0;
1259 }
1260 
1261 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1262 				struct irb *irb)
1263 {
1264 	if (!IS_ERR(irb))
1265 		return 0;
1266 
1267 	switch (PTR_ERR(irb)) {
1268 	case -EIO:
1269 		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1270 				 CCW_DEVID(cdev));
1271 		QETH_CARD_TEXT(card, 2, "ckirberr");
1272 		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1273 		return -EIO;
1274 	case -ETIMEDOUT:
1275 		dev_warn(&cdev->dev, "A hardware operation timed out"
1276 			" on the device\n");
1277 		QETH_CARD_TEXT(card, 2, "ckirberr");
1278 		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1279 		return -ETIMEDOUT;
1280 	default:
1281 		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1282 				 PTR_ERR(irb), CCW_DEVID(cdev));
1283 		QETH_CARD_TEXT(card, 2, "ckirberr");
1284 		QETH_CARD_TEXT(card, 2, "  rc???");
1285 		return PTR_ERR(irb);
1286 	}
1287 }
1288 
1289 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1290 		struct irb *irb)
1291 {
1292 	int rc;
1293 	int cstat, dstat;
1294 	struct qeth_cmd_buffer *iob = NULL;
1295 	struct ccwgroup_device *gdev;
1296 	struct qeth_channel *channel;
1297 	struct qeth_card *card;
1298 
1299 	/* while we hold the ccwdev lock, this stays valid: */
1300 	gdev = dev_get_drvdata(&cdev->dev);
1301 	card = dev_get_drvdata(&gdev->dev);
1302 
1303 	QETH_CARD_TEXT(card, 5, "irq");
1304 
1305 	if (card->read.ccwdev == cdev) {
1306 		channel = &card->read;
1307 		QETH_CARD_TEXT(card, 5, "read");
1308 	} else if (card->write.ccwdev == cdev) {
1309 		channel = &card->write;
1310 		QETH_CARD_TEXT(card, 5, "write");
1311 	} else {
1312 		channel = &card->data;
1313 		QETH_CARD_TEXT(card, 5, "data");
1314 	}
1315 
1316 	if (intparm == 0) {
1317 		QETH_CARD_TEXT(card, 5, "irqunsol");
1318 	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1319 		QETH_CARD_TEXT(card, 5, "irqunexp");
1320 
1321 		dev_err(&cdev->dev,
1322 			"Received IRQ with intparm %lx, expected %px\n",
1323 			intparm, channel->active_cmd);
1324 		if (channel->active_cmd)
1325 			qeth_cancel_cmd(channel->active_cmd, -EIO);
1326 	} else {
1327 		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1328 	}
1329 
1330 	channel->active_cmd = NULL;
1331 	qeth_unlock_channel(card, channel);
1332 
1333 	rc = qeth_check_irb_error(card, cdev, irb);
1334 	if (rc) {
1335 		/* IO was terminated, free its resources. */
1336 		if (iob)
1337 			qeth_cancel_cmd(iob, rc);
1338 		return;
1339 	}
1340 
1341 	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1342 		channel->state = CH_STATE_STOPPED;
1343 		wake_up(&card->wait_q);
1344 	}
1345 
1346 	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1347 		channel->state = CH_STATE_HALTED;
1348 		wake_up(&card->wait_q);
1349 	}
1350 
1351 	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1352 					  SCSW_FCTL_HALT_FUNC))) {
1353 		qeth_cancel_cmd(iob, -ECANCELED);
1354 		iob = NULL;
1355 	}
1356 
1357 	cstat = irb->scsw.cmd.cstat;
1358 	dstat = irb->scsw.cmd.dstat;
1359 
1360 	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1361 	    (dstat & DEV_STAT_UNIT_CHECK) ||
1362 	    (cstat)) {
1363 		if (irb->esw.esw0.erw.cons) {
1364 			dev_warn(&channel->ccwdev->dev,
1365 				"The qeth device driver failed to recover "
1366 				"an error on the device\n");
1367 			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1368 					 CCW_DEVID(channel->ccwdev), cstat,
1369 					 dstat);
1370 			print_hex_dump(KERN_WARNING, "qeth: irb ",
1371 				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1372 			print_hex_dump(KERN_WARNING, "qeth: sense data ",
1373 				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1374 		}
1375 
1376 		rc = qeth_get_problem(card, cdev, irb);
1377 		if (rc) {
1378 			card->read_or_write_problem = 1;
1379 			if (iob)
1380 				qeth_cancel_cmd(iob, rc);
1381 			qeth_clear_ipacmd_list(card);
1382 			qeth_schedule_recovery(card);
1383 			return;
1384 		}
1385 	}
1386 
1387 	if (iob) {
1388 		/* sanity check: */
1389 		if (irb->scsw.cmd.count > iob->length) {
1390 			qeth_cancel_cmd(iob, -EIO);
1391 			return;
1392 		}
1393 		if (iob->callback)
1394 			iob->callback(card, iob,
1395 				      iob->length - irb->scsw.cmd.count);
1396 	}
1397 }
1398 
1399 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1400 		struct qeth_qdio_out_buffer *buf,
1401 		enum iucv_tx_notify notification)
1402 {
1403 	struct sk_buff *skb;
1404 
1405 	skb_queue_walk(&buf->skb_list, skb) {
1406 		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1407 		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1408 		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1409 			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1410 	}
1411 }
1412 
1413 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1414 				 int budget)
1415 {
1416 	struct qeth_qdio_out_q *queue = buf->q;
1417 	struct sk_buff *skb;
1418 
1419 	/* release may never happen from within CQ tasklet scope */
1420 	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1421 
1422 	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1423 		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1424 
1425 	/* Empty buffer? */
1426 	if (buf->next_element_to_fill == 0)
1427 		return;
1428 
1429 	QETH_TXQ_STAT_INC(queue, bufs);
1430 	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1431 	if (error) {
1432 		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1433 	} else {
1434 		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1435 		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1436 	}
1437 
1438 	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1439 		unsigned int bytes = qdisc_pkt_len(skb);
1440 		bool is_tso = skb_is_gso(skb);
1441 		unsigned int packets;
1442 
1443 		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1444 		if (!error) {
1445 			if (skb->ip_summed == CHECKSUM_PARTIAL)
1446 				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1447 			if (skb_is_nonlinear(skb))
1448 				QETH_TXQ_STAT_INC(queue, skbs_sg);
1449 			if (is_tso) {
1450 				QETH_TXQ_STAT_INC(queue, skbs_tso);
1451 				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1452 			}
1453 		}
1454 
1455 		napi_consume_skb(skb, budget);
1456 	}
1457 }
1458 
1459 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1460 				     struct qeth_qdio_out_buffer *buf,
1461 				     bool error, int budget)
1462 {
1463 	int i;
1464 
1465 	/* is PCI flag set on buffer? */
1466 	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1467 		atomic_dec(&queue->set_pci_flags_count);
1468 
1469 	qeth_tx_complete_buf(buf, error, budget);
1470 
1471 	for (i = 0; i < queue->max_elements; ++i) {
1472 		void *data = phys_to_virt(buf->buffer->element[i].addr);
1473 
1474 		if (data && buf->is_header[i])
1475 			kmem_cache_free(qeth_core_header_cache, data);
1476 		buf->is_header[i] = 0;
1477 	}
1478 
1479 	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1480 	buf->next_element_to_fill = 0;
1481 	buf->frames = 0;
1482 	buf->bytes = 0;
1483 	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1484 }
1485 
1486 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1487 {
1488 	int j;
1489 
1490 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1491 		if (!q->bufs[j])
1492 			continue;
1493 		qeth_cleanup_handled_pending(q, j, 1);
1494 		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1495 		if (free) {
1496 			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1497 			q->bufs[j] = NULL;
1498 		}
1499 	}
1500 }
1501 
1502 static void qeth_drain_output_queues(struct qeth_card *card)
1503 {
1504 	int i;
1505 
1506 	QETH_CARD_TEXT(card, 2, "clearqdbf");
1507 	/* clear outbound buffers to free skbs */
1508 	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1509 		if (card->qdio.out_qs[i])
1510 			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1511 	}
1512 }
1513 
1514 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1515 {
1516 	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1517 	unsigned int count;
1518 	int rc;
1519 
1520 	count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
1521 
1522 	rtnl_lock();
1523 	rc = netif_set_real_num_tx_queues(card->dev, count);
1524 	rtnl_unlock();
1525 
1526 	if (rc)
1527 		return rc;
1528 
1529 	if (card->qdio.no_out_queues == max)
1530 		return 0;
1531 
1532 	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1533 		qeth_free_qdio_queues(card);
1534 
1535 	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1536 		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1537 
1538 	card->qdio.no_out_queues = max;
1539 	return 0;
1540 }
1541 
1542 static int qeth_update_from_chp_desc(struct qeth_card *card)
1543 {
1544 	struct ccw_device *ccwdev;
1545 	struct channel_path_desc_fmt0 *chp_dsc;
1546 	int rc = 0;
1547 
1548 	QETH_CARD_TEXT(card, 2, "chp_desc");
1549 
1550 	ccwdev = card->data.ccwdev;
1551 	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1552 	if (!chp_dsc)
1553 		return -ENOMEM;
1554 
1555 	card->info.func_level = 0x4100 + chp_dsc->desc;
1556 
1557 	if (IS_OSD(card) || IS_OSX(card))
1558 		/* CHPP field bit 6 == 1 -> single queue */
1559 		rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1560 
1561 	kfree(chp_dsc);
1562 	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1563 	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1564 	return rc;
1565 }
1566 
1567 static void qeth_init_qdio_info(struct qeth_card *card)
1568 {
1569 	QETH_CARD_TEXT(card, 4, "intqdinf");
1570 	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1571 	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1572 	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1573 
1574 	/* inbound */
1575 	card->qdio.no_in_queues = 1;
1576 	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1577 	if (IS_IQD(card))
1578 		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1579 	else
1580 		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1581 	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1582 	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1583 	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1584 }
1585 
1586 static void qeth_set_initial_options(struct qeth_card *card)
1587 {
1588 	card->options.route4.type = NO_ROUTER;
1589 	card->options.route6.type = NO_ROUTER;
1590 	card->options.isolation = ISOLATION_MODE_NONE;
1591 	card->options.cq = QETH_CQ_DISABLED;
1592 	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1593 }
1594 
1595 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1596 {
1597 	unsigned long flags;
1598 	int rc = 0;
1599 
1600 	spin_lock_irqsave(&card->thread_mask_lock, flags);
1601 	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1602 			(u8) card->thread_start_mask,
1603 			(u8) card->thread_allowed_mask,
1604 			(u8) card->thread_running_mask);
1605 	rc = (card->thread_start_mask & thread);
1606 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1607 	return rc;
1608 }
1609 
1610 static int qeth_do_reset(void *data);
1611 static void qeth_start_kernel_thread(struct work_struct *work)
1612 {
1613 	struct task_struct *ts;
1614 	struct qeth_card *card = container_of(work, struct qeth_card,
1615 					kernel_thread_starter);
1616 	QETH_CARD_TEXT(card , 2, "strthrd");
1617 
1618 	if (card->read.state != CH_STATE_UP &&
1619 	    card->write.state != CH_STATE_UP)
1620 		return;
1621 	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1622 		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1623 		if (IS_ERR(ts)) {
1624 			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1625 			qeth_clear_thread_running_bit(card,
1626 				QETH_RECOVER_THREAD);
1627 		}
1628 	}
1629 }
1630 
1631 static void qeth_buffer_reclaim_work(struct work_struct *);
1632 static void qeth_setup_card(struct qeth_card *card)
1633 {
1634 	QETH_CARD_TEXT(card, 2, "setupcrd");
1635 
1636 	card->info.type = CARD_RDEV(card)->id.driver_info;
1637 	card->state = CARD_STATE_DOWN;
1638 	spin_lock_init(&card->lock);
1639 	spin_lock_init(&card->thread_mask_lock);
1640 	mutex_init(&card->conf_mutex);
1641 	mutex_init(&card->discipline_mutex);
1642 	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1643 	INIT_LIST_HEAD(&card->cmd_waiter_list);
1644 	init_waitqueue_head(&card->wait_q);
1645 	qeth_set_initial_options(card);
1646 	/* IP address takeover */
1647 	INIT_LIST_HEAD(&card->ipato.entries);
1648 	qeth_init_qdio_info(card);
1649 	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1650 	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1651 	hash_init(card->rx_mode_addrs);
1652 	hash_init(card->local_addrs4);
1653 	hash_init(card->local_addrs6);
1654 	spin_lock_init(&card->local_addrs4_lock);
1655 	spin_lock_init(&card->local_addrs6_lock);
1656 }
1657 
1658 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1659 {
1660 	struct qeth_card *card = container_of(slr, struct qeth_card,
1661 					qeth_service_level);
1662 	if (card->info.mcl_level[0])
1663 		seq_printf(m, "qeth: %s firmware level %s\n",
1664 			CARD_BUS_ID(card), card->info.mcl_level);
1665 }
1666 
1667 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1668 {
1669 	struct qeth_card *card;
1670 
1671 	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1672 	card = kzalloc(sizeof(*card), GFP_KERNEL);
1673 	if (!card)
1674 		goto out;
1675 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1676 
1677 	card->gdev = gdev;
1678 	dev_set_drvdata(&gdev->dev, card);
1679 	CARD_RDEV(card) = gdev->cdev[0];
1680 	CARD_WDEV(card) = gdev->cdev[1];
1681 	CARD_DDEV(card) = gdev->cdev[2];
1682 
1683 	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1684 						 dev_name(&gdev->dev));
1685 	if (!card->event_wq)
1686 		goto out_wq;
1687 
1688 	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1689 	if (!card->read_cmd)
1690 		goto out_read_cmd;
1691 
1692 	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1693 					   qeth_debugfs_root);
1694 	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1695 			    &qeth_debugfs_local_addr_fops);
1696 
1697 	card->qeth_service_level.seq_print = qeth_core_sl_print;
1698 	register_service_level(&card->qeth_service_level);
1699 	return card;
1700 
1701 out_read_cmd:
1702 	destroy_workqueue(card->event_wq);
1703 out_wq:
1704 	dev_set_drvdata(&gdev->dev, NULL);
1705 	kfree(card);
1706 out:
1707 	return NULL;
1708 }
1709 
1710 static int qeth_clear_channel(struct qeth_card *card,
1711 			      struct qeth_channel *channel)
1712 {
1713 	int rc;
1714 
1715 	QETH_CARD_TEXT(card, 3, "clearch");
1716 	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1717 	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1718 	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1719 
1720 	if (rc)
1721 		return rc;
1722 	rc = wait_event_interruptible_timeout(card->wait_q,
1723 			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1724 	if (rc == -ERESTARTSYS)
1725 		return rc;
1726 	if (channel->state != CH_STATE_STOPPED)
1727 		return -ETIME;
1728 	channel->state = CH_STATE_DOWN;
1729 	return 0;
1730 }
1731 
1732 static int qeth_halt_channel(struct qeth_card *card,
1733 			     struct qeth_channel *channel)
1734 {
1735 	int rc;
1736 
1737 	QETH_CARD_TEXT(card, 3, "haltch");
1738 	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1739 	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1740 	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1741 
1742 	if (rc)
1743 		return rc;
1744 	rc = wait_event_interruptible_timeout(card->wait_q,
1745 			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1746 	if (rc == -ERESTARTSYS)
1747 		return rc;
1748 	if (channel->state != CH_STATE_HALTED)
1749 		return -ETIME;
1750 	return 0;
1751 }
1752 
1753 static int qeth_stop_channel(struct qeth_channel *channel)
1754 {
1755 	struct ccw_device *cdev = channel->ccwdev;
1756 	int rc;
1757 
1758 	rc = ccw_device_set_offline(cdev);
1759 
1760 	spin_lock_irq(get_ccwdev_lock(cdev));
1761 	if (channel->active_cmd) {
1762 		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1763 			channel->active_cmd);
1764 		channel->active_cmd = NULL;
1765 	}
1766 	cdev->handler = NULL;
1767 	spin_unlock_irq(get_ccwdev_lock(cdev));
1768 
1769 	return rc;
1770 }
1771 
1772 static int qeth_start_channel(struct qeth_channel *channel)
1773 {
1774 	struct ccw_device *cdev = channel->ccwdev;
1775 	int rc;
1776 
1777 	channel->state = CH_STATE_DOWN;
1778 	atomic_set(&channel->irq_pending, 0);
1779 
1780 	spin_lock_irq(get_ccwdev_lock(cdev));
1781 	cdev->handler = qeth_irq;
1782 	spin_unlock_irq(get_ccwdev_lock(cdev));
1783 
1784 	rc = ccw_device_set_online(cdev);
1785 	if (rc)
1786 		goto err;
1787 
1788 	return 0;
1789 
1790 err:
1791 	spin_lock_irq(get_ccwdev_lock(cdev));
1792 	cdev->handler = NULL;
1793 	spin_unlock_irq(get_ccwdev_lock(cdev));
1794 	return rc;
1795 }
1796 
1797 static int qeth_halt_channels(struct qeth_card *card)
1798 {
1799 	int rc1 = 0, rc2 = 0, rc3 = 0;
1800 
1801 	QETH_CARD_TEXT(card, 3, "haltchs");
1802 	rc1 = qeth_halt_channel(card, &card->read);
1803 	rc2 = qeth_halt_channel(card, &card->write);
1804 	rc3 = qeth_halt_channel(card, &card->data);
1805 	if (rc1)
1806 		return rc1;
1807 	if (rc2)
1808 		return rc2;
1809 	return rc3;
1810 }
1811 
1812 static int qeth_clear_channels(struct qeth_card *card)
1813 {
1814 	int rc1 = 0, rc2 = 0, rc3 = 0;
1815 
1816 	QETH_CARD_TEXT(card, 3, "clearchs");
1817 	rc1 = qeth_clear_channel(card, &card->read);
1818 	rc2 = qeth_clear_channel(card, &card->write);
1819 	rc3 = qeth_clear_channel(card, &card->data);
1820 	if (rc1)
1821 		return rc1;
1822 	if (rc2)
1823 		return rc2;
1824 	return rc3;
1825 }
1826 
1827 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1828 {
1829 	int rc = 0;
1830 
1831 	QETH_CARD_TEXT(card, 3, "clhacrd");
1832 
1833 	if (halt)
1834 		rc = qeth_halt_channels(card);
1835 	if (rc)
1836 		return rc;
1837 	return qeth_clear_channels(card);
1838 }
1839 
1840 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1841 {
1842 	int rc = 0;
1843 
1844 	QETH_CARD_TEXT(card, 3, "qdioclr");
1845 	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1846 		QETH_QDIO_CLEANING)) {
1847 	case QETH_QDIO_ESTABLISHED:
1848 		if (IS_IQD(card))
1849 			rc = qdio_shutdown(CARD_DDEV(card),
1850 				QDIO_FLAG_CLEANUP_USING_HALT);
1851 		else
1852 			rc = qdio_shutdown(CARD_DDEV(card),
1853 				QDIO_FLAG_CLEANUP_USING_CLEAR);
1854 		if (rc)
1855 			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1856 		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1857 		break;
1858 	case QETH_QDIO_CLEANING:
1859 		return rc;
1860 	default:
1861 		break;
1862 	}
1863 	rc = qeth_clear_halt_card(card, use_halt);
1864 	if (rc)
1865 		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1866 	return rc;
1867 }
1868 
1869 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1870 {
1871 	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1872 	struct diag26c_vnic_resp *response = NULL;
1873 	struct diag26c_vnic_req *request = NULL;
1874 	struct ccw_dev_id id;
1875 	char userid[80];
1876 	int rc = 0;
1877 
1878 	QETH_CARD_TEXT(card, 2, "vmlayer");
1879 
1880 	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1881 	if (rc)
1882 		goto out;
1883 
1884 	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1885 	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1886 	if (!request || !response) {
1887 		rc = -ENOMEM;
1888 		goto out;
1889 	}
1890 
1891 	ccw_device_get_id(CARD_RDEV(card), &id);
1892 	request->resp_buf_len = sizeof(*response);
1893 	request->resp_version = DIAG26C_VERSION6_VM65918;
1894 	request->req_format = DIAG26C_VNIC_INFO;
1895 	ASCEBC(userid, 8);
1896 	memcpy(&request->sys_name, userid, 8);
1897 	request->devno = id.devno;
1898 
1899 	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1900 	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1901 	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1902 	if (rc)
1903 		goto out;
1904 	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1905 
1906 	if (request->resp_buf_len < sizeof(*response) ||
1907 	    response->version != request->resp_version) {
1908 		rc = -EIO;
1909 		goto out;
1910 	}
1911 
1912 	if (response->protocol == VNIC_INFO_PROT_L2)
1913 		disc = QETH_DISCIPLINE_LAYER2;
1914 	else if (response->protocol == VNIC_INFO_PROT_L3)
1915 		disc = QETH_DISCIPLINE_LAYER3;
1916 
1917 out:
1918 	kfree(response);
1919 	kfree(request);
1920 	if (rc)
1921 		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1922 	return disc;
1923 }
1924 
1925 /* Determine whether the device requires a specific layer discipline */
1926 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1927 {
1928 	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1929 
1930 	if (IS_OSM(card) || IS_OSN(card))
1931 		disc = QETH_DISCIPLINE_LAYER2;
1932 	else if (IS_VM_NIC(card))
1933 		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1934 				      qeth_vm_detect_layer(card);
1935 
1936 	switch (disc) {
1937 	case QETH_DISCIPLINE_LAYER2:
1938 		QETH_CARD_TEXT(card, 3, "force l2");
1939 		break;
1940 	case QETH_DISCIPLINE_LAYER3:
1941 		QETH_CARD_TEXT(card, 3, "force l3");
1942 		break;
1943 	default:
1944 		QETH_CARD_TEXT(card, 3, "force no");
1945 	}
1946 
1947 	return disc;
1948 }
1949 
1950 static void qeth_set_blkt_defaults(struct qeth_card *card)
1951 {
1952 	QETH_CARD_TEXT(card, 2, "cfgblkt");
1953 
1954 	if (card->info.use_v1_blkt) {
1955 		card->info.blkt.time_total = 0;
1956 		card->info.blkt.inter_packet = 0;
1957 		card->info.blkt.inter_packet_jumbo = 0;
1958 	} else {
1959 		card->info.blkt.time_total = 250;
1960 		card->info.blkt.inter_packet = 5;
1961 		card->info.blkt.inter_packet_jumbo = 15;
1962 	}
1963 }
1964 
1965 static void qeth_idx_init(struct qeth_card *card)
1966 {
1967 	memset(&card->seqno, 0, sizeof(card->seqno));
1968 
1969 	card->token.issuer_rm_w = 0x00010103UL;
1970 	card->token.cm_filter_w = 0x00010108UL;
1971 	card->token.cm_connection_w = 0x0001010aUL;
1972 	card->token.ulp_filter_w = 0x0001010bUL;
1973 	card->token.ulp_connection_w = 0x0001010dUL;
1974 
1975 	switch (card->info.type) {
1976 	case QETH_CARD_TYPE_IQD:
1977 		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1978 		break;
1979 	case QETH_CARD_TYPE_OSD:
1980 	case QETH_CARD_TYPE_OSN:
1981 		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1982 		break;
1983 	default:
1984 		break;
1985 	}
1986 }
1987 
1988 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1989 				  struct qeth_cmd_buffer *iob)
1990 {
1991 	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1992 	       QETH_SEQ_NO_LENGTH);
1993 	if (iob->channel == &card->write)
1994 		card->seqno.trans_hdr++;
1995 }
1996 
1997 static int qeth_peer_func_level(int level)
1998 {
1999 	if ((level & 0xff) == 8)
2000 		return (level & 0xff) + 0x400;
2001 	if (((level >> 8) & 3) == 1)
2002 		return (level & 0xff) + 0x200;
2003 	return level;
2004 }
2005 
2006 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
2007 				  struct qeth_cmd_buffer *iob)
2008 {
2009 	qeth_idx_finalize_cmd(card, iob);
2010 
2011 	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2012 	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2013 	card->seqno.pdu_hdr++;
2014 	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2015 	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2016 
2017 	iob->callback = qeth_release_buffer_cb;
2018 }
2019 
2020 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
2021 				 struct qeth_cmd_buffer *reply)
2022 {
2023 	/* MPC cmds are issued strictly in sequence. */
2024 	return !IS_IPA(reply->data);
2025 }
2026 
2027 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2028 						  const void *data,
2029 						  unsigned int data_length)
2030 {
2031 	struct qeth_cmd_buffer *iob;
2032 
2033 	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2034 	if (!iob)
2035 		return NULL;
2036 
2037 	memcpy(iob->data, data, data_length);
2038 	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2039 		       iob->data);
2040 	iob->finalize = qeth_mpc_finalize_cmd;
2041 	iob->match = qeth_mpc_match_reply;
2042 	return iob;
2043 }
2044 
2045 /**
2046  * qeth_send_control_data() -	send control command to the card
2047  * @card:			qeth_card structure pointer
2048  * @iob:			qeth_cmd_buffer pointer
2049  * @reply_cb:			callback function pointer
2050  * @cb_card:			pointer to the qeth_card structure
2051  * @cb_reply:			pointer to the qeth_reply structure
2052  * @cb_cmd:			pointer to the original iob for non-IPA
2053  *				commands, or to the qeth_ipa_cmd structure
2054  *				for the IPA commands.
2055  * @reply_param:		private pointer passed to the callback
2056  *
2057  * Callback function gets called one or more times, with cb_cmd
2058  * pointing to the response returned by the hardware. Callback
2059  * function must return
2060  *   > 0 if more reply blocks are expected,
2061  *     0 if the last or only reply block is received, and
2062  *   < 0 on error.
2063  * Callback function can get the value of the reply_param pointer from the
2064  * field 'param' of the structure qeth_reply.
2065  */
2066 
2067 static int qeth_send_control_data(struct qeth_card *card,
2068 				  struct qeth_cmd_buffer *iob,
2069 				  int (*reply_cb)(struct qeth_card *cb_card,
2070 						  struct qeth_reply *cb_reply,
2071 						  unsigned long cb_cmd),
2072 				  void *reply_param)
2073 {
2074 	struct qeth_channel *channel = iob->channel;
2075 	struct qeth_reply *reply = &iob->reply;
2076 	long timeout = iob->timeout;
2077 	int rc;
2078 
2079 	QETH_CARD_TEXT(card, 2, "sendctl");
2080 
2081 	reply->callback = reply_cb;
2082 	reply->param = reply_param;
2083 
2084 	timeout = wait_event_interruptible_timeout(card->wait_q,
2085 						   qeth_trylock_channel(channel),
2086 						   timeout);
2087 	if (timeout <= 0) {
2088 		qeth_put_cmd(iob);
2089 		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2090 	}
2091 
2092 	if (iob->finalize)
2093 		iob->finalize(card, iob);
2094 	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2095 
2096 	qeth_enqueue_cmd(card, iob);
2097 
2098 	/* This pairs with iob->callback, and keeps the iob alive after IO: */
2099 	qeth_get_cmd(iob);
2100 
2101 	QETH_CARD_TEXT(card, 6, "noirqpnd");
2102 	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2103 	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2104 				      (addr_t) iob, 0, 0, timeout);
2105 	if (!rc)
2106 		channel->active_cmd = iob;
2107 	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2108 	if (rc) {
2109 		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2110 				 CARD_DEVID(card), rc);
2111 		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2112 		qeth_dequeue_cmd(card, iob);
2113 		qeth_put_cmd(iob);
2114 		qeth_unlock_channel(card, channel);
2115 		goto out;
2116 	}
2117 
2118 	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2119 							    timeout);
2120 	if (timeout <= 0)
2121 		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2122 
2123 	qeth_dequeue_cmd(card, iob);
2124 
2125 	if (reply_cb) {
2126 		/* Wait until the callback for a late reply has completed: */
2127 		spin_lock_irq(&iob->lock);
2128 		if (rc)
2129 			/* Zap any callback that's still pending: */
2130 			iob->rc = rc;
2131 		spin_unlock_irq(&iob->lock);
2132 	}
2133 
2134 	if (!rc)
2135 		rc = iob->rc;
2136 
2137 out:
2138 	qeth_put_cmd(iob);
2139 	return rc;
2140 }
2141 
2142 struct qeth_node_desc {
2143 	struct node_descriptor nd1;
2144 	struct node_descriptor nd2;
2145 	struct node_descriptor nd3;
2146 };
2147 
2148 static void qeth_read_conf_data_cb(struct qeth_card *card,
2149 				   struct qeth_cmd_buffer *iob,
2150 				   unsigned int data_length)
2151 {
2152 	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2153 	int rc = 0;
2154 	u8 *tag;
2155 
2156 	QETH_CARD_TEXT(card, 2, "cfgunit");
2157 
2158 	if (data_length < sizeof(*nd)) {
2159 		rc = -EINVAL;
2160 		goto out;
2161 	}
2162 
2163 	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2164 			       nd->nd1.plant[1] == _ascebc['M'];
2165 	tag = (u8 *)&nd->nd1.tag;
2166 	card->info.chpid = tag[0];
2167 	card->info.unit_addr2 = tag[1];
2168 
2169 	tag = (u8 *)&nd->nd2.tag;
2170 	card->info.cula = tag[1];
2171 
2172 	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2173 				 nd->nd3.model[1] == 0xF0 &&
2174 				 nd->nd3.model[2] >= 0xF1 &&
2175 				 nd->nd3.model[2] <= 0xF4;
2176 
2177 out:
2178 	qeth_notify_cmd(iob, rc);
2179 	qeth_put_cmd(iob);
2180 }
2181 
2182 static int qeth_read_conf_data(struct qeth_card *card)
2183 {
2184 	struct qeth_channel *channel = &card->data;
2185 	struct qeth_cmd_buffer *iob;
2186 	struct ciw *ciw;
2187 
2188 	/* scan for RCD command in extended SenseID data */
2189 	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2190 	if (!ciw || ciw->cmd == 0)
2191 		return -EOPNOTSUPP;
2192 	if (ciw->count < sizeof(struct qeth_node_desc))
2193 		return -EINVAL;
2194 
2195 	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2196 	if (!iob)
2197 		return -ENOMEM;
2198 
2199 	iob->callback = qeth_read_conf_data_cb;
2200 	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2201 		       iob->data);
2202 
2203 	return qeth_send_control_data(card, iob, NULL, NULL);
2204 }
2205 
2206 static int qeth_idx_check_activate_response(struct qeth_card *card,
2207 					    struct qeth_channel *channel,
2208 					    struct qeth_cmd_buffer *iob)
2209 {
2210 	int rc;
2211 
2212 	rc = qeth_check_idx_response(card, iob->data);
2213 	if (rc)
2214 		return rc;
2215 
2216 	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2217 		return 0;
2218 
2219 	/* negative reply: */
2220 	QETH_CARD_TEXT_(card, 2, "idxneg%c",
2221 			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2222 
2223 	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2224 	case QETH_IDX_ACT_ERR_EXCL:
2225 		dev_err(&channel->ccwdev->dev,
2226 			"The adapter is used exclusively by another host\n");
2227 		return -EBUSY;
2228 	case QETH_IDX_ACT_ERR_AUTH:
2229 	case QETH_IDX_ACT_ERR_AUTH_USER:
2230 		dev_err(&channel->ccwdev->dev,
2231 			"Setting the device online failed because of insufficient authorization\n");
2232 		return -EPERM;
2233 	default:
2234 		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2235 				 CCW_DEVID(channel->ccwdev));
2236 		return -EIO;
2237 	}
2238 }
2239 
2240 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2241 					      struct qeth_cmd_buffer *iob,
2242 					      unsigned int data_length)
2243 {
2244 	struct qeth_channel *channel = iob->channel;
2245 	u16 peer_level;
2246 	int rc;
2247 
2248 	QETH_CARD_TEXT(card, 2, "idxrdcb");
2249 
2250 	rc = qeth_idx_check_activate_response(card, channel, iob);
2251 	if (rc)
2252 		goto out;
2253 
2254 	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2255 	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2256 		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2257 				 CCW_DEVID(channel->ccwdev),
2258 				 card->info.func_level, peer_level);
2259 		rc = -EINVAL;
2260 		goto out;
2261 	}
2262 
2263 	memcpy(&card->token.issuer_rm_r,
2264 	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2265 	       QETH_MPC_TOKEN_LENGTH);
2266 	memcpy(&card->info.mcl_level[0],
2267 	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2268 
2269 out:
2270 	qeth_notify_cmd(iob, rc);
2271 	qeth_put_cmd(iob);
2272 }
2273 
2274 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2275 					       struct qeth_cmd_buffer *iob,
2276 					       unsigned int data_length)
2277 {
2278 	struct qeth_channel *channel = iob->channel;
2279 	u16 peer_level;
2280 	int rc;
2281 
2282 	QETH_CARD_TEXT(card, 2, "idxwrcb");
2283 
2284 	rc = qeth_idx_check_activate_response(card, channel, iob);
2285 	if (rc)
2286 		goto out;
2287 
2288 	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2289 	if ((peer_level & ~0x0100) !=
2290 	    qeth_peer_func_level(card->info.func_level)) {
2291 		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2292 				 CCW_DEVID(channel->ccwdev),
2293 				 card->info.func_level, peer_level);
2294 		rc = -EINVAL;
2295 	}
2296 
2297 out:
2298 	qeth_notify_cmd(iob, rc);
2299 	qeth_put_cmd(iob);
2300 }
2301 
2302 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2303 					struct qeth_cmd_buffer *iob)
2304 {
2305 	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2306 	u8 port = ((u8)card->dev->dev_port) | 0x80;
2307 	struct ccw1 *ccw = __ccw_from_cmd(iob);
2308 
2309 	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2310 		       iob->data);
2311 	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2312 	iob->finalize = qeth_idx_finalize_cmd;
2313 
2314 	port |= QETH_IDX_ACT_INVAL_FRAME;
2315 	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2316 	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2317 	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2318 	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2319 	       &card->info.func_level, 2);
2320 	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2321 	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2322 }
2323 
2324 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2325 {
2326 	struct qeth_channel *channel = &card->read;
2327 	struct qeth_cmd_buffer *iob;
2328 	int rc;
2329 
2330 	QETH_CARD_TEXT(card, 2, "idxread");
2331 
2332 	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2333 	if (!iob)
2334 		return -ENOMEM;
2335 
2336 	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2337 	qeth_idx_setup_activate_cmd(card, iob);
2338 	iob->callback = qeth_idx_activate_read_channel_cb;
2339 
2340 	rc = qeth_send_control_data(card, iob, NULL, NULL);
2341 	if (rc)
2342 		return rc;
2343 
2344 	channel->state = CH_STATE_UP;
2345 	return 0;
2346 }
2347 
2348 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2349 {
2350 	struct qeth_channel *channel = &card->write;
2351 	struct qeth_cmd_buffer *iob;
2352 	int rc;
2353 
2354 	QETH_CARD_TEXT(card, 2, "idxwrite");
2355 
2356 	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2357 	if (!iob)
2358 		return -ENOMEM;
2359 
2360 	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2361 	qeth_idx_setup_activate_cmd(card, iob);
2362 	iob->callback = qeth_idx_activate_write_channel_cb;
2363 
2364 	rc = qeth_send_control_data(card, iob, NULL, NULL);
2365 	if (rc)
2366 		return rc;
2367 
2368 	channel->state = CH_STATE_UP;
2369 	return 0;
2370 }
2371 
2372 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2373 		unsigned long data)
2374 {
2375 	struct qeth_cmd_buffer *iob;
2376 
2377 	QETH_CARD_TEXT(card, 2, "cmenblcb");
2378 
2379 	iob = (struct qeth_cmd_buffer *) data;
2380 	memcpy(&card->token.cm_filter_r,
2381 	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2382 	       QETH_MPC_TOKEN_LENGTH);
2383 	return 0;
2384 }
2385 
2386 static int qeth_cm_enable(struct qeth_card *card)
2387 {
2388 	struct qeth_cmd_buffer *iob;
2389 
2390 	QETH_CARD_TEXT(card, 2, "cmenable");
2391 
2392 	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2393 	if (!iob)
2394 		return -ENOMEM;
2395 
2396 	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2397 	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2398 	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2399 	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2400 
2401 	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2402 }
2403 
2404 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2405 		unsigned long data)
2406 {
2407 	struct qeth_cmd_buffer *iob;
2408 
2409 	QETH_CARD_TEXT(card, 2, "cmsetpcb");
2410 
2411 	iob = (struct qeth_cmd_buffer *) data;
2412 	memcpy(&card->token.cm_connection_r,
2413 	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2414 	       QETH_MPC_TOKEN_LENGTH);
2415 	return 0;
2416 }
2417 
2418 static int qeth_cm_setup(struct qeth_card *card)
2419 {
2420 	struct qeth_cmd_buffer *iob;
2421 
2422 	QETH_CARD_TEXT(card, 2, "cmsetup");
2423 
2424 	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2425 	if (!iob)
2426 		return -ENOMEM;
2427 
2428 	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2429 	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2430 	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2431 	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2432 	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2433 	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2434 	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2435 }
2436 
2437 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2438 {
2439 	if (link_type == QETH_LINK_TYPE_LANE_TR ||
2440 	    link_type == QETH_LINK_TYPE_HSTR) {
2441 		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2442 		return false;
2443 	}
2444 
2445 	return true;
2446 }
2447 
2448 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2449 {
2450 	struct net_device *dev = card->dev;
2451 	unsigned int new_mtu;
2452 
2453 	if (!max_mtu) {
2454 		/* IQD needs accurate max MTU to set up its RX buffers: */
2455 		if (IS_IQD(card))
2456 			return -EINVAL;
2457 		/* tolerate quirky HW: */
2458 		max_mtu = ETH_MAX_MTU;
2459 	}
2460 
2461 	rtnl_lock();
2462 	if (IS_IQD(card)) {
2463 		/* move any device with default MTU to new max MTU: */
2464 		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2465 
2466 		/* adjust RX buffer size to new max MTU: */
2467 		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2468 		if (dev->max_mtu && dev->max_mtu != max_mtu)
2469 			qeth_free_qdio_queues(card);
2470 	} else {
2471 		if (dev->mtu)
2472 			new_mtu = dev->mtu;
2473 		/* default MTUs for first setup: */
2474 		else if (IS_LAYER2(card))
2475 			new_mtu = ETH_DATA_LEN;
2476 		else
2477 			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2478 	}
2479 
2480 	dev->max_mtu = max_mtu;
2481 	dev->mtu = min(new_mtu, max_mtu);
2482 	rtnl_unlock();
2483 	return 0;
2484 }
2485 
2486 static int qeth_get_mtu_outof_framesize(int framesize)
2487 {
2488 	switch (framesize) {
2489 	case 0x4000:
2490 		return 8192;
2491 	case 0x6000:
2492 		return 16384;
2493 	case 0xa000:
2494 		return 32768;
2495 	case 0xffff:
2496 		return 57344;
2497 	default:
2498 		return 0;
2499 	}
2500 }
2501 
2502 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2503 		unsigned long data)
2504 {
2505 	__u16 mtu, framesize;
2506 	__u16 len;
2507 	struct qeth_cmd_buffer *iob;
2508 	u8 link_type = 0;
2509 
2510 	QETH_CARD_TEXT(card, 2, "ulpenacb");
2511 
2512 	iob = (struct qeth_cmd_buffer *) data;
2513 	memcpy(&card->token.ulp_filter_r,
2514 	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2515 	       QETH_MPC_TOKEN_LENGTH);
2516 	if (IS_IQD(card)) {
2517 		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2518 		mtu = qeth_get_mtu_outof_framesize(framesize);
2519 	} else {
2520 		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2521 	}
2522 	*(u16 *)reply->param = mtu;
2523 
2524 	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2525 	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2526 		memcpy(&link_type,
2527 		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2528 		if (!qeth_is_supported_link_type(card, link_type))
2529 			return -EPROTONOSUPPORT;
2530 	}
2531 
2532 	card->info.link_type = link_type;
2533 	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2534 	return 0;
2535 }
2536 
2537 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2538 {
2539 	if (IS_OSN(card))
2540 		return QETH_PROT_OSN2;
2541 	return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2542 }
2543 
2544 static int qeth_ulp_enable(struct qeth_card *card)
2545 {
2546 	u8 prot_type = qeth_mpc_select_prot_type(card);
2547 	struct qeth_cmd_buffer *iob;
2548 	u16 max_mtu;
2549 	int rc;
2550 
2551 	QETH_CARD_TEXT(card, 2, "ulpenabl");
2552 
2553 	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2554 	if (!iob)
2555 		return -ENOMEM;
2556 
2557 	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2558 	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2559 	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2560 	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2561 	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2562 	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2563 	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2564 	if (rc)
2565 		return rc;
2566 	return qeth_update_max_mtu(card, max_mtu);
2567 }
2568 
2569 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2570 		unsigned long data)
2571 {
2572 	struct qeth_cmd_buffer *iob;
2573 
2574 	QETH_CARD_TEXT(card, 2, "ulpstpcb");
2575 
2576 	iob = (struct qeth_cmd_buffer *) data;
2577 	memcpy(&card->token.ulp_connection_r,
2578 	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2579 	       QETH_MPC_TOKEN_LENGTH);
2580 	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2581 		     3)) {
2582 		QETH_CARD_TEXT(card, 2, "olmlimit");
2583 		dev_err(&card->gdev->dev, "A connection could not be "
2584 			"established because of an OLM limit\n");
2585 		return -EMLINK;
2586 	}
2587 	return 0;
2588 }
2589 
2590 static int qeth_ulp_setup(struct qeth_card *card)
2591 {
2592 	__u16 temp;
2593 	struct qeth_cmd_buffer *iob;
2594 
2595 	QETH_CARD_TEXT(card, 2, "ulpsetup");
2596 
2597 	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2598 	if (!iob)
2599 		return -ENOMEM;
2600 
2601 	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2602 	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2603 	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2604 	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2605 	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2606 	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2607 
2608 	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2609 	temp = (card->info.cula << 8) + card->info.unit_addr2;
2610 	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2611 	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2612 }
2613 
2614 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2615 {
2616 	struct qeth_qdio_out_buffer *newbuf;
2617 
2618 	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2619 	if (!newbuf)
2620 		return -ENOMEM;
2621 
2622 	newbuf->buffer = q->qdio_bufs[bidx];
2623 	skb_queue_head_init(&newbuf->skb_list);
2624 	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2625 	newbuf->q = q;
2626 	newbuf->next_pending = q->bufs[bidx];
2627 	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2628 	q->bufs[bidx] = newbuf;
2629 	return 0;
2630 }
2631 
2632 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2633 {
2634 	if (!q)
2635 		return;
2636 
2637 	qeth_drain_output_queue(q, true);
2638 	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2639 	kfree(q);
2640 }
2641 
2642 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2643 {
2644 	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2645 
2646 	if (!q)
2647 		return NULL;
2648 
2649 	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2650 		kfree(q);
2651 		return NULL;
2652 	}
2653 	return q;
2654 }
2655 
2656 static void qeth_tx_completion_timer(struct timer_list *timer)
2657 {
2658 	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2659 
2660 	napi_schedule(&queue->napi);
2661 	QETH_TXQ_STAT_INC(queue, completion_timer);
2662 }
2663 
2664 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2665 {
2666 	int i, j;
2667 
2668 	QETH_CARD_TEXT(card, 2, "allcqdbf");
2669 
2670 	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2671 		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2672 		return 0;
2673 
2674 	QETH_CARD_TEXT(card, 2, "inq");
2675 	card->qdio.in_q = qeth_alloc_qdio_queue();
2676 	if (!card->qdio.in_q)
2677 		goto out_nomem;
2678 
2679 	/* inbound buffer pool */
2680 	if (qeth_alloc_buffer_pool(card))
2681 		goto out_freeinq;
2682 
2683 	/* outbound */
2684 	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2685 		struct qeth_qdio_out_q *queue;
2686 
2687 		queue = qeth_alloc_output_queue();
2688 		if (!queue)
2689 			goto out_freeoutq;
2690 		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2691 		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2692 		card->qdio.out_qs[i] = queue;
2693 		queue->card = card;
2694 		queue->queue_no = i;
2695 		spin_lock_init(&queue->lock);
2696 		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2697 		queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2698 		queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2699 
2700 		/* give outbound qeth_qdio_buffers their qdio_buffers */
2701 		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2702 			WARN_ON(queue->bufs[j]);
2703 			if (qeth_init_qdio_out_buf(queue, j))
2704 				goto out_freeoutqbufs;
2705 		}
2706 	}
2707 
2708 	/* completion */
2709 	if (qeth_alloc_cq(card))
2710 		goto out_freeoutq;
2711 
2712 	return 0;
2713 
2714 out_freeoutqbufs:
2715 	while (j > 0) {
2716 		--j;
2717 		kmem_cache_free(qeth_qdio_outbuf_cache,
2718 				card->qdio.out_qs[i]->bufs[j]);
2719 		card->qdio.out_qs[i]->bufs[j] = NULL;
2720 	}
2721 out_freeoutq:
2722 	while (i > 0) {
2723 		qeth_free_output_queue(card->qdio.out_qs[--i]);
2724 		card->qdio.out_qs[i] = NULL;
2725 	}
2726 	qeth_free_buffer_pool(card);
2727 out_freeinq:
2728 	qeth_free_qdio_queue(card->qdio.in_q);
2729 	card->qdio.in_q = NULL;
2730 out_nomem:
2731 	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2732 	return -ENOMEM;
2733 }
2734 
2735 static void qeth_free_qdio_queues(struct qeth_card *card)
2736 {
2737 	int i, j;
2738 
2739 	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2740 		QETH_QDIO_UNINITIALIZED)
2741 		return;
2742 
2743 	qeth_free_cq(card);
2744 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2745 		if (card->qdio.in_q->bufs[j].rx_skb)
2746 			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2747 	}
2748 	qeth_free_qdio_queue(card->qdio.in_q);
2749 	card->qdio.in_q = NULL;
2750 	/* inbound buffer pool */
2751 	qeth_free_buffer_pool(card);
2752 	/* free outbound qdio_qs */
2753 	for (i = 0; i < card->qdio.no_out_queues; i++) {
2754 		qeth_free_output_queue(card->qdio.out_qs[i]);
2755 		card->qdio.out_qs[i] = NULL;
2756 	}
2757 }
2758 
2759 static void qeth_create_qib_param_field(struct qeth_card *card,
2760 		char *param_field)
2761 {
2762 
2763 	param_field[0] = _ascebc['P'];
2764 	param_field[1] = _ascebc['C'];
2765 	param_field[2] = _ascebc['I'];
2766 	param_field[3] = _ascebc['T'];
2767 	*((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2768 	*((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2769 	*((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2770 }
2771 
2772 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2773 		char *param_field)
2774 {
2775 	param_field[16] = _ascebc['B'];
2776 	param_field[17] = _ascebc['L'];
2777 	param_field[18] = _ascebc['K'];
2778 	param_field[19] = _ascebc['T'];
2779 	*((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2780 	*((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2781 	*((unsigned int *) (&param_field[28])) =
2782 		card->info.blkt.inter_packet_jumbo;
2783 }
2784 
2785 static int qeth_qdio_activate(struct qeth_card *card)
2786 {
2787 	QETH_CARD_TEXT(card, 3, "qdioact");
2788 	return qdio_activate(CARD_DDEV(card));
2789 }
2790 
2791 static int qeth_dm_act(struct qeth_card *card)
2792 {
2793 	struct qeth_cmd_buffer *iob;
2794 
2795 	QETH_CARD_TEXT(card, 2, "dmact");
2796 
2797 	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2798 	if (!iob)
2799 		return -ENOMEM;
2800 
2801 	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2802 	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2803 	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2804 	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2805 	return qeth_send_control_data(card, iob, NULL, NULL);
2806 }
2807 
2808 static int qeth_mpc_initialize(struct qeth_card *card)
2809 {
2810 	int rc;
2811 
2812 	QETH_CARD_TEXT(card, 2, "mpcinit");
2813 
2814 	rc = qeth_issue_next_read(card);
2815 	if (rc) {
2816 		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2817 		return rc;
2818 	}
2819 	rc = qeth_cm_enable(card);
2820 	if (rc) {
2821 		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2822 		return rc;
2823 	}
2824 	rc = qeth_cm_setup(card);
2825 	if (rc) {
2826 		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2827 		return rc;
2828 	}
2829 	rc = qeth_ulp_enable(card);
2830 	if (rc) {
2831 		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2832 		return rc;
2833 	}
2834 	rc = qeth_ulp_setup(card);
2835 	if (rc) {
2836 		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2837 		return rc;
2838 	}
2839 	rc = qeth_alloc_qdio_queues(card);
2840 	if (rc) {
2841 		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2842 		return rc;
2843 	}
2844 	rc = qeth_qdio_establish(card);
2845 	if (rc) {
2846 		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2847 		qeth_free_qdio_queues(card);
2848 		return rc;
2849 	}
2850 	rc = qeth_qdio_activate(card);
2851 	if (rc) {
2852 		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2853 		return rc;
2854 	}
2855 	rc = qeth_dm_act(card);
2856 	if (rc) {
2857 		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2858 		return rc;
2859 	}
2860 
2861 	return 0;
2862 }
2863 
2864 static void qeth_print_status_message(struct qeth_card *card)
2865 {
2866 	switch (card->info.type) {
2867 	case QETH_CARD_TYPE_OSD:
2868 	case QETH_CARD_TYPE_OSM:
2869 	case QETH_CARD_TYPE_OSX:
2870 		/* VM will use a non-zero first character
2871 		 * to indicate a HiperSockets like reporting
2872 		 * of the level OSA sets the first character to zero
2873 		 * */
2874 		if (!card->info.mcl_level[0]) {
2875 			sprintf(card->info.mcl_level, "%02x%02x",
2876 				card->info.mcl_level[2],
2877 				card->info.mcl_level[3]);
2878 			break;
2879 		}
2880 		fallthrough;
2881 	case QETH_CARD_TYPE_IQD:
2882 		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2883 			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2884 				card->info.mcl_level[0]];
2885 			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2886 				card->info.mcl_level[1]];
2887 			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2888 				card->info.mcl_level[2]];
2889 			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2890 				card->info.mcl_level[3]];
2891 			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2892 		}
2893 		break;
2894 	default:
2895 		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2896 	}
2897 	dev_info(&card->gdev->dev,
2898 		 "Device is a%s card%s%s%s\nwith link type %s.\n",
2899 		 qeth_get_cardname(card),
2900 		 (card->info.mcl_level[0]) ? " (level: " : "",
2901 		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2902 		 (card->info.mcl_level[0]) ? ")" : "",
2903 		 qeth_get_cardname_short(card));
2904 }
2905 
2906 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2907 {
2908 	struct qeth_buffer_pool_entry *entry;
2909 
2910 	QETH_CARD_TEXT(card, 5, "inwrklst");
2911 
2912 	list_for_each_entry(entry,
2913 			    &card->qdio.init_pool.entry_list, init_list) {
2914 		qeth_put_buffer_pool_entry(card, entry);
2915 	}
2916 }
2917 
2918 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2919 					struct qeth_card *card)
2920 {
2921 	struct qeth_buffer_pool_entry *entry;
2922 	int i, free;
2923 
2924 	if (list_empty(&card->qdio.in_buf_pool.entry_list))
2925 		return NULL;
2926 
2927 	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2928 		free = 1;
2929 		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2930 			if (page_count(entry->elements[i]) > 1) {
2931 				free = 0;
2932 				break;
2933 			}
2934 		}
2935 		if (free) {
2936 			list_del_init(&entry->list);
2937 			return entry;
2938 		}
2939 	}
2940 
2941 	/* no free buffer in pool so take first one and swap pages */
2942 	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2943 				 struct qeth_buffer_pool_entry, list);
2944 	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2945 		if (page_count(entry->elements[i]) > 1) {
2946 			struct page *page = dev_alloc_page();
2947 
2948 			if (!page)
2949 				return NULL;
2950 
2951 			__free_page(entry->elements[i]);
2952 			entry->elements[i] = page;
2953 			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2954 		}
2955 	}
2956 	list_del_init(&entry->list);
2957 	return entry;
2958 }
2959 
2960 static int qeth_init_input_buffer(struct qeth_card *card,
2961 		struct qeth_qdio_buffer *buf)
2962 {
2963 	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2964 	int i;
2965 
2966 	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2967 		buf->rx_skb = netdev_alloc_skb(card->dev,
2968 					       ETH_HLEN +
2969 					       sizeof(struct ipv6hdr));
2970 		if (!buf->rx_skb)
2971 			return -ENOMEM;
2972 	}
2973 
2974 	if (!pool_entry) {
2975 		pool_entry = qeth_find_free_buffer_pool_entry(card);
2976 		if (!pool_entry)
2977 			return -ENOBUFS;
2978 
2979 		buf->pool_entry = pool_entry;
2980 	}
2981 
2982 	/*
2983 	 * since the buffer is accessed only from the input_tasklet
2984 	 * there shouldn't be a need to synchronize; also, since we use
2985 	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2986 	 * buffers
2987 	 */
2988 	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2989 		buf->buffer->element[i].length = PAGE_SIZE;
2990 		buf->buffer->element[i].addr =
2991 			page_to_phys(pool_entry->elements[i]);
2992 		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2993 			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2994 		else
2995 			buf->buffer->element[i].eflags = 0;
2996 		buf->buffer->element[i].sflags = 0;
2997 	}
2998 	return 0;
2999 }
3000 
3001 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
3002 					    struct qeth_qdio_out_q *queue)
3003 {
3004 	if (!IS_IQD(card) ||
3005 	    qeth_iqd_is_mcast_queue(card, queue) ||
3006 	    card->options.cq == QETH_CQ_ENABLED ||
3007 	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
3008 		return 1;
3009 
3010 	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
3011 }
3012 
3013 static int qeth_init_qdio_queues(struct qeth_card *card)
3014 {
3015 	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3016 	unsigned int i;
3017 	int rc;
3018 
3019 	QETH_CARD_TEXT(card, 2, "initqdqs");
3020 
3021 	/* inbound queue */
3022 	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3023 	memset(&card->rx, 0, sizeof(struct qeth_rx));
3024 
3025 	qeth_initialize_working_pool_list(card);
3026 	/*give only as many buffers to hardware as we have buffer pool entries*/
3027 	for (i = 0; i < rx_bufs; i++) {
3028 		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3029 		if (rc)
3030 			return rc;
3031 	}
3032 
3033 	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3034 	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
3035 	if (rc) {
3036 		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3037 		return rc;
3038 	}
3039 
3040 	/* completion */
3041 	rc = qeth_cq_init(card);
3042 	if (rc) {
3043 		return rc;
3044 	}
3045 
3046 	/* outbound queue */
3047 	for (i = 0; i < card->qdio.no_out_queues; ++i) {
3048 		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3049 
3050 		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3051 		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3052 		queue->next_buf_to_fill = 0;
3053 		queue->do_pack = 0;
3054 		queue->prev_hdr = NULL;
3055 		queue->coalesced_frames = 0;
3056 		queue->bulk_start = 0;
3057 		queue->bulk_count = 0;
3058 		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3059 		atomic_set(&queue->used_buffers, 0);
3060 		atomic_set(&queue->set_pci_flags_count, 0);
3061 		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3062 	}
3063 	return 0;
3064 }
3065 
3066 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3067 				  struct qeth_cmd_buffer *iob)
3068 {
3069 	qeth_mpc_finalize_cmd(card, iob);
3070 
3071 	/* override with IPA-specific values: */
3072 	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3073 }
3074 
3075 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3076 			  u16 cmd_length,
3077 			  bool (*match)(struct qeth_cmd_buffer *iob,
3078 					struct qeth_cmd_buffer *reply))
3079 {
3080 	u8 prot_type = qeth_mpc_select_prot_type(card);
3081 	u16 total_length = iob->length;
3082 
3083 	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3084 		       iob->data);
3085 	iob->finalize = qeth_ipa_finalize_cmd;
3086 	iob->match = match;
3087 
3088 	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3089 	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3090 	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3091 	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3092 	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3093 	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3094 	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3095 	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3096 }
3097 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3098 
3099 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3100 				 struct qeth_cmd_buffer *reply)
3101 {
3102 	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3103 
3104 	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3105 }
3106 
3107 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3108 					   enum qeth_ipa_cmds cmd_code,
3109 					   enum qeth_prot_versions prot,
3110 					   unsigned int data_length)
3111 {
3112 	struct qeth_cmd_buffer *iob;
3113 	struct qeth_ipacmd_hdr *hdr;
3114 
3115 	data_length += offsetof(struct qeth_ipa_cmd, data);
3116 	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3117 			     QETH_IPA_TIMEOUT);
3118 	if (!iob)
3119 		return NULL;
3120 
3121 	qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3122 
3123 	hdr = &__ipa_cmd(iob)->hdr;
3124 	hdr->command = cmd_code;
3125 	hdr->initiator = IPA_CMD_INITIATOR_HOST;
3126 	/* hdr->seqno is set by qeth_send_control_data() */
3127 	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3128 	hdr->rel_adapter_no = (u8) card->dev->dev_port;
3129 	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3130 	hdr->param_count = 1;
3131 	hdr->prot_version = prot;
3132 	return iob;
3133 }
3134 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3135 
3136 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3137 				struct qeth_reply *reply, unsigned long data)
3138 {
3139 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3140 
3141 	return (cmd->hdr.return_code) ? -EIO : 0;
3142 }
3143 
3144 /**
3145  * qeth_send_ipa_cmd() - send an IPA command
3146  *
3147  * See qeth_send_control_data() for explanation of the arguments.
3148  */
3149 
3150 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3151 		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3152 			unsigned long),
3153 		void *reply_param)
3154 {
3155 	int rc;
3156 
3157 	QETH_CARD_TEXT(card, 4, "sendipa");
3158 
3159 	if (card->read_or_write_problem) {
3160 		qeth_put_cmd(iob);
3161 		return -EIO;
3162 	}
3163 
3164 	if (reply_cb == NULL)
3165 		reply_cb = qeth_send_ipa_cmd_cb;
3166 	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3167 	if (rc == -ETIME) {
3168 		qeth_clear_ipacmd_list(card);
3169 		qeth_schedule_recovery(card);
3170 	}
3171 	return rc;
3172 }
3173 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3174 
3175 static int qeth_send_startlan_cb(struct qeth_card *card,
3176 				 struct qeth_reply *reply, unsigned long data)
3177 {
3178 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3179 
3180 	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3181 		return -ENETDOWN;
3182 
3183 	return (cmd->hdr.return_code) ? -EIO : 0;
3184 }
3185 
3186 static int qeth_send_startlan(struct qeth_card *card)
3187 {
3188 	struct qeth_cmd_buffer *iob;
3189 
3190 	QETH_CARD_TEXT(card, 2, "strtlan");
3191 
3192 	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3193 	if (!iob)
3194 		return -ENOMEM;
3195 	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3196 }
3197 
3198 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3199 {
3200 	if (!cmd->hdr.return_code)
3201 		cmd->hdr.return_code =
3202 			cmd->data.setadapterparms.hdr.return_code;
3203 	return cmd->hdr.return_code;
3204 }
3205 
3206 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3207 		struct qeth_reply *reply, unsigned long data)
3208 {
3209 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3210 	struct qeth_query_cmds_supp *query_cmd;
3211 
3212 	QETH_CARD_TEXT(card, 3, "quyadpcb");
3213 	if (qeth_setadpparms_inspect_rc(cmd))
3214 		return -EIO;
3215 
3216 	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3217 	if (query_cmd->lan_type & 0x7f) {
3218 		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3219 			return -EPROTONOSUPPORT;
3220 
3221 		card->info.link_type = query_cmd->lan_type;
3222 		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3223 	}
3224 
3225 	card->options.adp.supported = query_cmd->supported_cmds;
3226 	return 0;
3227 }
3228 
3229 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3230 						    enum qeth_ipa_setadp_cmd adp_cmd,
3231 						    unsigned int data_length)
3232 {
3233 	struct qeth_ipacmd_setadpparms_hdr *hdr;
3234 	struct qeth_cmd_buffer *iob;
3235 
3236 	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3237 				 data_length +
3238 				 offsetof(struct qeth_ipacmd_setadpparms,
3239 					  data));
3240 	if (!iob)
3241 		return NULL;
3242 
3243 	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3244 	hdr->cmdlength = sizeof(*hdr) + data_length;
3245 	hdr->command_code = adp_cmd;
3246 	hdr->used_total = 1;
3247 	hdr->seq_no = 1;
3248 	return iob;
3249 }
3250 
3251 static int qeth_query_setadapterparms(struct qeth_card *card)
3252 {
3253 	int rc;
3254 	struct qeth_cmd_buffer *iob;
3255 
3256 	QETH_CARD_TEXT(card, 3, "queryadp");
3257 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3258 				   SETADP_DATA_SIZEOF(query_cmds_supp));
3259 	if (!iob)
3260 		return -ENOMEM;
3261 	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3262 	return rc;
3263 }
3264 
3265 static int qeth_query_ipassists_cb(struct qeth_card *card,
3266 		struct qeth_reply *reply, unsigned long data)
3267 {
3268 	struct qeth_ipa_cmd *cmd;
3269 
3270 	QETH_CARD_TEXT(card, 2, "qipasscb");
3271 
3272 	cmd = (struct qeth_ipa_cmd *) data;
3273 
3274 	switch (cmd->hdr.return_code) {
3275 	case IPA_RC_SUCCESS:
3276 		break;
3277 	case IPA_RC_NOTSUPP:
3278 	case IPA_RC_L2_UNSUPPORTED_CMD:
3279 		QETH_CARD_TEXT(card, 2, "ipaunsup");
3280 		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3281 		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3282 		return -EOPNOTSUPP;
3283 	default:
3284 		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3285 				 CARD_DEVID(card), cmd->hdr.return_code);
3286 		return -EIO;
3287 	}
3288 
3289 	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3290 		card->options.ipa4 = cmd->hdr.assists;
3291 	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3292 		card->options.ipa6 = cmd->hdr.assists;
3293 	else
3294 		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3295 				 CARD_DEVID(card));
3296 	return 0;
3297 }
3298 
3299 static int qeth_query_ipassists(struct qeth_card *card,
3300 				enum qeth_prot_versions prot)
3301 {
3302 	int rc;
3303 	struct qeth_cmd_buffer *iob;
3304 
3305 	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3306 	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3307 	if (!iob)
3308 		return -ENOMEM;
3309 	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3310 	return rc;
3311 }
3312 
3313 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3314 				struct qeth_reply *reply, unsigned long data)
3315 {
3316 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3317 	struct qeth_query_switch_attributes *attrs;
3318 	struct qeth_switch_info *sw_info;
3319 
3320 	QETH_CARD_TEXT(card, 2, "qswiatcb");
3321 	if (qeth_setadpparms_inspect_rc(cmd))
3322 		return -EIO;
3323 
3324 	sw_info = (struct qeth_switch_info *)reply->param;
3325 	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3326 	sw_info->capabilities = attrs->capabilities;
3327 	sw_info->settings = attrs->settings;
3328 	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3329 			sw_info->settings);
3330 	return 0;
3331 }
3332 
3333 int qeth_query_switch_attributes(struct qeth_card *card,
3334 				 struct qeth_switch_info *sw_info)
3335 {
3336 	struct qeth_cmd_buffer *iob;
3337 
3338 	QETH_CARD_TEXT(card, 2, "qswiattr");
3339 	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3340 		return -EOPNOTSUPP;
3341 	if (!netif_carrier_ok(card->dev))
3342 		return -ENOMEDIUM;
3343 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3344 	if (!iob)
3345 		return -ENOMEM;
3346 	return qeth_send_ipa_cmd(card, iob,
3347 				qeth_query_switch_attributes_cb, sw_info);
3348 }
3349 
3350 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3351 					  enum qeth_diags_cmds sub_cmd,
3352 					  unsigned int data_length)
3353 {
3354 	struct qeth_ipacmd_diagass *cmd;
3355 	struct qeth_cmd_buffer *iob;
3356 
3357 	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3358 				 DIAG_HDR_LEN + data_length);
3359 	if (!iob)
3360 		return NULL;
3361 
3362 	cmd = &__ipa_cmd(iob)->data.diagass;
3363 	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3364 	cmd->subcmd = sub_cmd;
3365 	return iob;
3366 }
3367 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3368 
3369 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3370 		struct qeth_reply *reply, unsigned long data)
3371 {
3372 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3373 	u16 rc = cmd->hdr.return_code;
3374 
3375 	if (rc) {
3376 		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3377 		return -EIO;
3378 	}
3379 
3380 	card->info.diagass_support = cmd->data.diagass.ext;
3381 	return 0;
3382 }
3383 
3384 static int qeth_query_setdiagass(struct qeth_card *card)
3385 {
3386 	struct qeth_cmd_buffer *iob;
3387 
3388 	QETH_CARD_TEXT(card, 2, "qdiagass");
3389 	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3390 	if (!iob)
3391 		return -ENOMEM;
3392 	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3393 }
3394 
3395 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3396 {
3397 	unsigned long info = get_zeroed_page(GFP_KERNEL);
3398 	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3399 	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3400 	struct ccw_dev_id ccwid;
3401 	int level;
3402 
3403 	tid->chpid = card->info.chpid;
3404 	ccw_device_get_id(CARD_RDEV(card), &ccwid);
3405 	tid->ssid = ccwid.ssid;
3406 	tid->devno = ccwid.devno;
3407 	if (!info)
3408 		return;
3409 	level = stsi(NULL, 0, 0, 0);
3410 	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3411 		tid->lparnr = info222->lpar_number;
3412 	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3413 		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3414 		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3415 	}
3416 	free_page(info);
3417 	return;
3418 }
3419 
3420 static int qeth_hw_trap_cb(struct qeth_card *card,
3421 		struct qeth_reply *reply, unsigned long data)
3422 {
3423 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3424 	u16 rc = cmd->hdr.return_code;
3425 
3426 	if (rc) {
3427 		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3428 		return -EIO;
3429 	}
3430 	return 0;
3431 }
3432 
3433 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3434 {
3435 	struct qeth_cmd_buffer *iob;
3436 	struct qeth_ipa_cmd *cmd;
3437 
3438 	QETH_CARD_TEXT(card, 2, "diagtrap");
3439 	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3440 	if (!iob)
3441 		return -ENOMEM;
3442 	cmd = __ipa_cmd(iob);
3443 	cmd->data.diagass.type = 1;
3444 	cmd->data.diagass.action = action;
3445 	switch (action) {
3446 	case QETH_DIAGS_TRAP_ARM:
3447 		cmd->data.diagass.options = 0x0003;
3448 		cmd->data.diagass.ext = 0x00010000 +
3449 			sizeof(struct qeth_trap_id);
3450 		qeth_get_trap_id(card,
3451 			(struct qeth_trap_id *)cmd->data.diagass.cdata);
3452 		break;
3453 	case QETH_DIAGS_TRAP_DISARM:
3454 		cmd->data.diagass.options = 0x0001;
3455 		break;
3456 	case QETH_DIAGS_TRAP_CAPTURE:
3457 		break;
3458 	}
3459 	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3460 }
3461 
3462 static int qeth_check_qdio_errors(struct qeth_card *card,
3463 				  struct qdio_buffer *buf,
3464 				  unsigned int qdio_error,
3465 				  const char *dbftext)
3466 {
3467 	if (qdio_error) {
3468 		QETH_CARD_TEXT(card, 2, dbftext);
3469 		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3470 			       buf->element[15].sflags);
3471 		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3472 			       buf->element[14].sflags);
3473 		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3474 		if ((buf->element[15].sflags) == 0x12) {
3475 			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3476 			return 0;
3477 		} else
3478 			return 1;
3479 	}
3480 	return 0;
3481 }
3482 
3483 static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3484 					 unsigned int count)
3485 {
3486 	struct qeth_qdio_q *queue = card->qdio.in_q;
3487 	struct list_head *lh;
3488 	int i;
3489 	int rc;
3490 	int newcount = 0;
3491 
3492 	/* only requeue at a certain threshold to avoid SIGAs */
3493 	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3494 		for (i = queue->next_buf_to_init;
3495 		     i < queue->next_buf_to_init + count; ++i) {
3496 			if (qeth_init_input_buffer(card,
3497 				&queue->bufs[QDIO_BUFNR(i)])) {
3498 				break;
3499 			} else {
3500 				newcount++;
3501 			}
3502 		}
3503 
3504 		if (newcount < count) {
3505 			/* we are in memory shortage so we switch back to
3506 			   traditional skb allocation and drop packages */
3507 			atomic_set(&card->force_alloc_skb, 3);
3508 			count = newcount;
3509 		} else {
3510 			atomic_add_unless(&card->force_alloc_skb, -1, 0);
3511 		}
3512 
3513 		if (!count) {
3514 			i = 0;
3515 			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3516 				i++;
3517 			if (i == card->qdio.in_buf_pool.buf_count) {
3518 				QETH_CARD_TEXT(card, 2, "qsarbw");
3519 				schedule_delayed_work(
3520 					&card->buffer_reclaim_work,
3521 					QETH_RECLAIM_WORK_TIME);
3522 			}
3523 			return 0;
3524 		}
3525 
3526 		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3527 			     queue->next_buf_to_init, count);
3528 		if (rc) {
3529 			QETH_CARD_TEXT(card, 2, "qinberr");
3530 		}
3531 		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3532 						     count);
3533 		return count;
3534 	}
3535 
3536 	return 0;
3537 }
3538 
3539 static void qeth_buffer_reclaim_work(struct work_struct *work)
3540 {
3541 	struct qeth_card *card = container_of(to_delayed_work(work),
3542 					      struct qeth_card,
3543 					      buffer_reclaim_work);
3544 
3545 	local_bh_disable();
3546 	napi_schedule(&card->napi);
3547 	/* kick-start the NAPI softirq: */
3548 	local_bh_enable();
3549 }
3550 
3551 static void qeth_handle_send_error(struct qeth_card *card,
3552 		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3553 {
3554 	int sbalf15 = buffer->buffer->element[15].sflags;
3555 
3556 	QETH_CARD_TEXT(card, 6, "hdsnderr");
3557 	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3558 
3559 	if (!qdio_err)
3560 		return;
3561 
3562 	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3563 		return;
3564 
3565 	QETH_CARD_TEXT(card, 1, "lnkfail");
3566 	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3567 		       (u16)qdio_err, (u8)sbalf15);
3568 }
3569 
3570 /**
3571  * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3572  * @queue: queue to check for packing buffer
3573  *
3574  * Returns number of buffers that were prepared for flush.
3575  */
3576 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3577 {
3578 	struct qeth_qdio_out_buffer *buffer;
3579 
3580 	buffer = queue->bufs[queue->next_buf_to_fill];
3581 	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3582 	    (buffer->next_element_to_fill > 0)) {
3583 		/* it's a packing buffer */
3584 		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3585 		queue->next_buf_to_fill =
3586 			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3587 		return 1;
3588 	}
3589 	return 0;
3590 }
3591 
3592 /*
3593  * Switched to packing state if the number of used buffers on a queue
3594  * reaches a certain limit.
3595  */
3596 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3597 {
3598 	if (!queue->do_pack) {
3599 		if (atomic_read(&queue->used_buffers)
3600 		    >= QETH_HIGH_WATERMARK_PACK){
3601 			/* switch non-PACKING -> PACKING */
3602 			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3603 			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3604 			queue->do_pack = 1;
3605 		}
3606 	}
3607 }
3608 
3609 /*
3610  * Switches from packing to non-packing mode. If there is a packing
3611  * buffer on the queue this buffer will be prepared to be flushed.
3612  * In that case 1 is returned to inform the caller. If no buffer
3613  * has to be flushed, zero is returned.
3614  */
3615 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3616 {
3617 	if (queue->do_pack) {
3618 		if (atomic_read(&queue->used_buffers)
3619 		    <= QETH_LOW_WATERMARK_PACK) {
3620 			/* switch PACKING -> non-PACKING */
3621 			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3622 			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3623 			queue->do_pack = 0;
3624 			return qeth_prep_flush_pack_buffer(queue);
3625 		}
3626 	}
3627 	return 0;
3628 }
3629 
3630 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3631 			       int count)
3632 {
3633 	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3634 	unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3635 	struct qeth_card *card = queue->card;
3636 	int rc;
3637 	int i;
3638 
3639 	for (i = index; i < index + count; ++i) {
3640 		unsigned int bidx = QDIO_BUFNR(i);
3641 		struct sk_buff *skb;
3642 
3643 		buf = queue->bufs[bidx];
3644 		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3645 				SBAL_EFLAGS_LAST_ENTRY;
3646 		queue->coalesced_frames += buf->frames;
3647 
3648 		if (queue->bufstates)
3649 			queue->bufstates[bidx].user = buf;
3650 
3651 		if (IS_IQD(card)) {
3652 			skb_queue_walk(&buf->skb_list, skb)
3653 				skb_tx_timestamp(skb);
3654 		}
3655 	}
3656 
3657 	if (!IS_IQD(card)) {
3658 		if (!queue->do_pack) {
3659 			if ((atomic_read(&queue->used_buffers) >=
3660 				(QETH_HIGH_WATERMARK_PACK -
3661 				 QETH_WATERMARK_PACK_FUZZ)) &&
3662 			    !atomic_read(&queue->set_pci_flags_count)) {
3663 				/* it's likely that we'll go to packing
3664 				 * mode soon */
3665 				atomic_inc(&queue->set_pci_flags_count);
3666 				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3667 			}
3668 		} else {
3669 			if (!atomic_read(&queue->set_pci_flags_count)) {
3670 				/*
3671 				 * there's no outstanding PCI any more, so we
3672 				 * have to request a PCI to be sure the the PCI
3673 				 * will wake at some time in the future then we
3674 				 * can flush packed buffers that might still be
3675 				 * hanging around, which can happen if no
3676 				 * further send was requested by the stack
3677 				 */
3678 				atomic_inc(&queue->set_pci_flags_count);
3679 				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3680 			}
3681 		}
3682 
3683 		if (atomic_read(&queue->set_pci_flags_count))
3684 			qdio_flags |= QDIO_FLAG_PCI_OUT;
3685 	}
3686 
3687 	QETH_TXQ_STAT_INC(queue, doorbell);
3688 	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3689 		     queue->queue_no, index, count);
3690 
3691 	/* Fake the TX completion interrupt: */
3692 	if (IS_IQD(card)) {
3693 		unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3694 		unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
3695 
3696 		if (frames && queue->coalesced_frames >= frames) {
3697 			napi_schedule(&queue->napi);
3698 			queue->coalesced_frames = 0;
3699 			QETH_TXQ_STAT_INC(queue, coal_frames);
3700 		} else if (usecs) {
3701 			qeth_tx_arm_timer(queue, usecs);
3702 		}
3703 	}
3704 
3705 	if (rc) {
3706 		/* ignore temporary SIGA errors without busy condition */
3707 		if (rc == -ENOBUFS)
3708 			return;
3709 		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3710 		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3711 		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3712 		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3713 		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3714 
3715 		/* this must not happen under normal circumstances. if it
3716 		 * happens something is really wrong -> recover */
3717 		qeth_schedule_recovery(queue->card);
3718 		return;
3719 	}
3720 }
3721 
3722 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3723 {
3724 	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3725 
3726 	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3727 	queue->prev_hdr = NULL;
3728 	queue->bulk_count = 0;
3729 }
3730 
3731 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3732 {
3733 	/*
3734 	 * check if weed have to switch to non-packing mode or if
3735 	 * we have to get a pci flag out on the queue
3736 	 */
3737 	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3738 	    !atomic_read(&queue->set_pci_flags_count)) {
3739 		unsigned int index, flush_cnt;
3740 		bool q_was_packing;
3741 
3742 		spin_lock(&queue->lock);
3743 
3744 		index = queue->next_buf_to_fill;
3745 		q_was_packing = queue->do_pack;
3746 
3747 		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3748 		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3749 			flush_cnt = qeth_prep_flush_pack_buffer(queue);
3750 
3751 		if (flush_cnt) {
3752 			qeth_flush_buffers(queue, index, flush_cnt);
3753 			if (q_was_packing)
3754 				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3755 		}
3756 
3757 		spin_unlock(&queue->lock);
3758 	}
3759 }
3760 
3761 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3762 {
3763 	struct qeth_card *card = (struct qeth_card *)card_ptr;
3764 
3765 	napi_schedule_irqoff(&card->napi);
3766 }
3767 
3768 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3769 {
3770 	int rc;
3771 
3772 	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3773 		rc = -1;
3774 		goto out;
3775 	} else {
3776 		if (card->options.cq == cq) {
3777 			rc = 0;
3778 			goto out;
3779 		}
3780 
3781 		qeth_free_qdio_queues(card);
3782 		card->options.cq = cq;
3783 		rc = 0;
3784 	}
3785 out:
3786 	return rc;
3787 
3788 }
3789 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3790 
3791 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3792 				 unsigned int queue, int first_element,
3793 				 int count)
3794 {
3795 	struct qeth_qdio_q *cq = card->qdio.c_q;
3796 	int i;
3797 	int rc;
3798 
3799 	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3800 	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3801 	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3802 
3803 	if (qdio_err) {
3804 		netif_tx_stop_all_queues(card->dev);
3805 		qeth_schedule_recovery(card);
3806 		return;
3807 	}
3808 
3809 	for (i = first_element; i < first_element + count; ++i) {
3810 		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3811 		int e = 0;
3812 
3813 		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3814 		       buffer->element[e].addr) {
3815 			unsigned long phys_aob_addr = buffer->element[e].addr;
3816 
3817 			qeth_qdio_handle_aob(card, phys_aob_addr);
3818 			++e;
3819 		}
3820 		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3821 	}
3822 	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3823 		    card->qdio.c_q->next_buf_to_init,
3824 		    count);
3825 	if (rc) {
3826 		dev_warn(&card->gdev->dev,
3827 			"QDIO reported an error, rc=%i\n", rc);
3828 		QETH_CARD_TEXT(card, 2, "qcqherr");
3829 	}
3830 
3831 	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3832 }
3833 
3834 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3835 				    unsigned int qdio_err, int queue,
3836 				    int first_elem, int count,
3837 				    unsigned long card_ptr)
3838 {
3839 	struct qeth_card *card = (struct qeth_card *)card_ptr;
3840 
3841 	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3842 	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3843 
3844 	if (qdio_err)
3845 		qeth_schedule_recovery(card);
3846 }
3847 
3848 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3849 				     unsigned int qdio_error, int __queue,
3850 				     int first_element, int count,
3851 				     unsigned long card_ptr)
3852 {
3853 	struct qeth_card *card        = (struct qeth_card *) card_ptr;
3854 	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3855 	struct net_device *dev = card->dev;
3856 	struct netdev_queue *txq;
3857 	int i;
3858 
3859 	QETH_CARD_TEXT(card, 6, "qdouhdl");
3860 	if (qdio_error & QDIO_ERROR_FATAL) {
3861 		QETH_CARD_TEXT(card, 2, "achkcond");
3862 		netif_tx_stop_all_queues(dev);
3863 		qeth_schedule_recovery(card);
3864 		return;
3865 	}
3866 
3867 	for (i = first_element; i < (first_element + count); ++i) {
3868 		struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3869 
3870 		qeth_handle_send_error(card, buf, qdio_error);
3871 		qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3872 	}
3873 
3874 	atomic_sub(count, &queue->used_buffers);
3875 	qeth_check_outbound_queue(queue);
3876 
3877 	txq = netdev_get_tx_queue(dev, __queue);
3878 	/* xmit may have observed the full-condition, but not yet stopped the
3879 	 * txq. In which case the code below won't trigger. So before returning,
3880 	 * xmit will re-check the txq's fill level and wake it up if needed.
3881 	 */
3882 	if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3883 		netif_tx_wake_queue(txq);
3884 }
3885 
3886 /**
3887  * Note: Function assumes that we have 4 outbound queues.
3888  */
3889 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3890 {
3891 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3892 	u8 tos;
3893 
3894 	switch (card->qdio.do_prio_queueing) {
3895 	case QETH_PRIO_Q_ING_TOS:
3896 	case QETH_PRIO_Q_ING_PREC:
3897 		switch (qeth_get_ip_version(skb)) {
3898 		case 4:
3899 			tos = ipv4_get_dsfield(ip_hdr(skb));
3900 			break;
3901 		case 6:
3902 			tos = ipv6_get_dsfield(ipv6_hdr(skb));
3903 			break;
3904 		default:
3905 			return card->qdio.default_out_queue;
3906 		}
3907 		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3908 			return ~tos >> 6 & 3;
3909 		if (tos & IPTOS_MINCOST)
3910 			return 3;
3911 		if (tos & IPTOS_RELIABILITY)
3912 			return 2;
3913 		if (tos & IPTOS_THROUGHPUT)
3914 			return 1;
3915 		if (tos & IPTOS_LOWDELAY)
3916 			return 0;
3917 		break;
3918 	case QETH_PRIO_Q_ING_SKB:
3919 		if (skb->priority > 5)
3920 			return 0;
3921 		return ~skb->priority >> 1 & 3;
3922 	case QETH_PRIO_Q_ING_VLAN:
3923 		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3924 			return ~ntohs(veth->h_vlan_TCI) >>
3925 			       (VLAN_PRIO_SHIFT + 1) & 3;
3926 		break;
3927 	case QETH_PRIO_Q_ING_FIXED:
3928 		return card->qdio.default_out_queue;
3929 	default:
3930 		break;
3931 	}
3932 	return card->qdio.default_out_queue;
3933 }
3934 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3935 
3936 /**
3937  * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
3938  * @skb:				SKB address
3939  *
3940  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3941  * fragmented part of the SKB. Returns zero for linear SKB.
3942  */
3943 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3944 {
3945 	int cnt, elements = 0;
3946 
3947 	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3948 		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3949 
3950 		elements += qeth_get_elements_for_range(
3951 			(addr_t)skb_frag_address(frag),
3952 			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3953 	}
3954 	return elements;
3955 }
3956 
3957 /**
3958  * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
3959  *				to transmit an skb.
3960  * @skb:			the skb to operate on.
3961  * @data_offset:		skip this part of the skb's linear data
3962  *
3963  * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3964  * skb's data (both its linear part and paged fragments).
3965  */
3966 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3967 {
3968 	unsigned int elements = qeth_get_elements_for_frags(skb);
3969 	addr_t end = (addr_t)skb->data + skb_headlen(skb);
3970 	addr_t start = (addr_t)skb->data + data_offset;
3971 
3972 	if (start != end)
3973 		elements += qeth_get_elements_for_range(start, end);
3974 	return elements;
3975 }
3976 EXPORT_SYMBOL_GPL(qeth_count_elements);
3977 
3978 #define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
3979 					 MAX_TCP_HEADER)
3980 
3981 /**
3982  * qeth_add_hw_header() - add a HW header to an skb.
3983  * @skb: skb that the HW header should be added to.
3984  * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3985  *	 it contains a valid pointer to a qeth_hdr.
3986  * @hdr_len: length of the HW header.
3987  * @proto_len: length of protocol headers that need to be in same page as the
3988  *	       HW header.
3989  *
3990  * Returns the pushed length. If the header can't be pushed on
3991  * (eg. because it would cross a page boundary), it is allocated from
3992  * the cache instead and 0 is returned.
3993  * The number of needed buffer elements is returned in @elements.
3994  * Error to create the hdr is indicated by returning with < 0.
3995  */
3996 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3997 			      struct sk_buff *skb, struct qeth_hdr **hdr,
3998 			      unsigned int hdr_len, unsigned int proto_len,
3999 			      unsigned int *elements)
4000 {
4001 	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
4002 	const unsigned int contiguous = proto_len ? proto_len : 1;
4003 	const unsigned int max_elements = queue->max_elements;
4004 	unsigned int __elements;
4005 	addr_t start, end;
4006 	bool push_ok;
4007 	int rc;
4008 
4009 check_layout:
4010 	start = (addr_t)skb->data - hdr_len;
4011 	end = (addr_t)skb->data;
4012 
4013 	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
4014 		/* Push HW header into same page as first protocol header. */
4015 		push_ok = true;
4016 		/* ... but TSO always needs a separate element for headers: */
4017 		if (skb_is_gso(skb))
4018 			__elements = 1 + qeth_count_elements(skb, proto_len);
4019 		else
4020 			__elements = qeth_count_elements(skb, 0);
4021 	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4022 		/* Push HW header into preceding page, flush with skb->data. */
4023 		push_ok = true;
4024 		__elements = 1 + qeth_count_elements(skb, 0);
4025 	} else {
4026 		/* Use header cache, copy protocol headers up. */
4027 		push_ok = false;
4028 		__elements = 1 + qeth_count_elements(skb, proto_len);
4029 	}
4030 
4031 	/* Compress skb to fit into one IO buffer: */
4032 	if (__elements > max_elements) {
4033 		if (!skb_is_nonlinear(skb)) {
4034 			/* Drop it, no easy way of shrinking it further. */
4035 			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4036 					 max_elements, __elements, skb->len);
4037 			return -E2BIG;
4038 		}
4039 
4040 		rc = skb_linearize(skb);
4041 		if (rc) {
4042 			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4043 			return rc;
4044 		}
4045 
4046 		QETH_TXQ_STAT_INC(queue, skbs_linearized);
4047 		/* Linearization changed the layout, re-evaluate: */
4048 		goto check_layout;
4049 	}
4050 
4051 	*elements = __elements;
4052 	/* Add the header: */
4053 	if (push_ok) {
4054 		*hdr = skb_push(skb, hdr_len);
4055 		return hdr_len;
4056 	}
4057 
4058 	/* Fall back to cache element with known-good alignment: */
4059 	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4060 		return -E2BIG;
4061 	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4062 	if (!*hdr)
4063 		return -ENOMEM;
4064 	/* Copy protocol headers behind HW header: */
4065 	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4066 	return 0;
4067 }
4068 
4069 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4070 			      struct sk_buff *curr_skb,
4071 			      struct qeth_hdr *curr_hdr)
4072 {
4073 	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4074 	struct qeth_hdr *prev_hdr = queue->prev_hdr;
4075 
4076 	if (!prev_hdr)
4077 		return true;
4078 
4079 	/* All packets must have the same target: */
4080 	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4081 		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4082 
4083 		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4084 					eth_hdr(curr_skb)->h_dest) &&
4085 		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4086 	}
4087 
4088 	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4089 	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4090 }
4091 
4092 /**
4093  * qeth_fill_buffer() - map skb into an output buffer
4094  * @buf:	buffer to transport the skb
4095  * @skb:	skb to map into the buffer
4096  * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
4097  *		from qeth_core_header_cache.
4098  * @offset:	when mapping the skb, start at skb->data + offset
4099  * @hd_len:	if > 0, build a dedicated header element of this size
4100  */
4101 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4102 				     struct sk_buff *skb, struct qeth_hdr *hdr,
4103 				     unsigned int offset, unsigned int hd_len)
4104 {
4105 	struct qdio_buffer *buffer = buf->buffer;
4106 	int element = buf->next_element_to_fill;
4107 	int length = skb_headlen(skb) - offset;
4108 	char *data = skb->data + offset;
4109 	unsigned int elem_length, cnt;
4110 	bool is_first_elem = true;
4111 
4112 	__skb_queue_tail(&buf->skb_list, skb);
4113 
4114 	/* build dedicated element for HW Header */
4115 	if (hd_len) {
4116 		is_first_elem = false;
4117 
4118 		buffer->element[element].addr = virt_to_phys(hdr);
4119 		buffer->element[element].length = hd_len;
4120 		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4121 
4122 		/* HW header is allocated from cache: */
4123 		if ((void *)hdr != skb->data)
4124 			buf->is_header[element] = 1;
4125 		/* HW header was pushed and is contiguous with linear part: */
4126 		else if (length > 0 && !PAGE_ALIGNED(data) &&
4127 			 (data == (char *)hdr + hd_len))
4128 			buffer->element[element].eflags |=
4129 				SBAL_EFLAGS_CONTIGUOUS;
4130 
4131 		element++;
4132 	}
4133 
4134 	/* map linear part into buffer element(s) */
4135 	while (length > 0) {
4136 		elem_length = min_t(unsigned int, length,
4137 				    PAGE_SIZE - offset_in_page(data));
4138 
4139 		buffer->element[element].addr = virt_to_phys(data);
4140 		buffer->element[element].length = elem_length;
4141 		length -= elem_length;
4142 		if (is_first_elem) {
4143 			is_first_elem = false;
4144 			if (length || skb_is_nonlinear(skb))
4145 				/* skb needs additional elements */
4146 				buffer->element[element].eflags =
4147 					SBAL_EFLAGS_FIRST_FRAG;
4148 			else
4149 				buffer->element[element].eflags = 0;
4150 		} else {
4151 			buffer->element[element].eflags =
4152 				SBAL_EFLAGS_MIDDLE_FRAG;
4153 		}
4154 
4155 		data += elem_length;
4156 		element++;
4157 	}
4158 
4159 	/* map page frags into buffer element(s) */
4160 	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4161 		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4162 
4163 		data = skb_frag_address(frag);
4164 		length = skb_frag_size(frag);
4165 		while (length > 0) {
4166 			elem_length = min_t(unsigned int, length,
4167 					    PAGE_SIZE - offset_in_page(data));
4168 
4169 			buffer->element[element].addr = virt_to_phys(data);
4170 			buffer->element[element].length = elem_length;
4171 			buffer->element[element].eflags =
4172 				SBAL_EFLAGS_MIDDLE_FRAG;
4173 
4174 			length -= elem_length;
4175 			data += elem_length;
4176 			element++;
4177 		}
4178 	}
4179 
4180 	if (buffer->element[element - 1].eflags)
4181 		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4182 	buf->next_element_to_fill = element;
4183 	return element;
4184 }
4185 
4186 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4187 		       struct sk_buff *skb, unsigned int elements,
4188 		       struct qeth_hdr *hdr, unsigned int offset,
4189 		       unsigned int hd_len)
4190 {
4191 	unsigned int bytes = qdisc_pkt_len(skb);
4192 	struct qeth_qdio_out_buffer *buffer;
4193 	unsigned int next_element;
4194 	struct netdev_queue *txq;
4195 	bool stopped = false;
4196 	bool flush;
4197 
4198 	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4199 	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4200 
4201 	/* Just a sanity check, the wake/stop logic should ensure that we always
4202 	 * get a free buffer.
4203 	 */
4204 	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4205 		return -EBUSY;
4206 
4207 	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4208 
4209 	if (flush ||
4210 	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
4211 		if (buffer->next_element_to_fill > 0) {
4212 			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4213 			queue->bulk_count++;
4214 		}
4215 
4216 		if (queue->bulk_count >= queue->bulk_max)
4217 			flush = true;
4218 
4219 		if (flush)
4220 			qeth_flush_queue(queue);
4221 
4222 		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4223 						queue->bulk_count)];
4224 
4225 		/* Sanity-check again: */
4226 		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4227 			return -EBUSY;
4228 	}
4229 
4230 	if (buffer->next_element_to_fill == 0 &&
4231 	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4232 		/* If a TX completion happens right _here_ and misses to wake
4233 		 * the txq, then our re-check below will catch the race.
4234 		 */
4235 		QETH_TXQ_STAT_INC(queue, stopped);
4236 		netif_tx_stop_queue(txq);
4237 		stopped = true;
4238 	}
4239 
4240 	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4241 	buffer->bytes += bytes;
4242 	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4243 	queue->prev_hdr = hdr;
4244 
4245 	flush = __netdev_tx_sent_queue(txq, bytes,
4246 				       !stopped && netdev_xmit_more());
4247 
4248 	if (flush || next_element >= queue->max_elements) {
4249 		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4250 		queue->bulk_count++;
4251 
4252 		if (queue->bulk_count >= queue->bulk_max)
4253 			flush = true;
4254 
4255 		if (flush)
4256 			qeth_flush_queue(queue);
4257 	}
4258 
4259 	if (stopped && !qeth_out_queue_is_full(queue))
4260 		netif_tx_start_queue(txq);
4261 	return 0;
4262 }
4263 
4264 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4265 			struct sk_buff *skb, struct qeth_hdr *hdr,
4266 			unsigned int offset, unsigned int hd_len,
4267 			int elements_needed)
4268 {
4269 	unsigned int start_index = queue->next_buf_to_fill;
4270 	struct qeth_qdio_out_buffer *buffer;
4271 	unsigned int next_element;
4272 	struct netdev_queue *txq;
4273 	bool stopped = false;
4274 	int flush_count = 0;
4275 	int do_pack = 0;
4276 	int rc = 0;
4277 
4278 	buffer = queue->bufs[queue->next_buf_to_fill];
4279 
4280 	/* Just a sanity check, the wake/stop logic should ensure that we always
4281 	 * get a free buffer.
4282 	 */
4283 	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4284 		return -EBUSY;
4285 
4286 	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4287 
4288 	/* check if we need to switch packing state of this queue */
4289 	qeth_switch_to_packing_if_needed(queue);
4290 	if (queue->do_pack) {
4291 		do_pack = 1;
4292 		/* does packet fit in current buffer? */
4293 		if (buffer->next_element_to_fill + elements_needed >
4294 		    queue->max_elements) {
4295 			/* ... no -> set state PRIMED */
4296 			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4297 			flush_count++;
4298 			queue->next_buf_to_fill =
4299 				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4300 			buffer = queue->bufs[queue->next_buf_to_fill];
4301 
4302 			/* We stepped forward, so sanity-check again: */
4303 			if (atomic_read(&buffer->state) !=
4304 			    QETH_QDIO_BUF_EMPTY) {
4305 				qeth_flush_buffers(queue, start_index,
4306 							   flush_count);
4307 				rc = -EBUSY;
4308 				goto out;
4309 			}
4310 		}
4311 	}
4312 
4313 	if (buffer->next_element_to_fill == 0 &&
4314 	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4315 		/* If a TX completion happens right _here_ and misses to wake
4316 		 * the txq, then our re-check below will catch the race.
4317 		 */
4318 		QETH_TXQ_STAT_INC(queue, stopped);
4319 		netif_tx_stop_queue(txq);
4320 		stopped = true;
4321 	}
4322 
4323 	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4324 	buffer->bytes += qdisc_pkt_len(skb);
4325 	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4326 
4327 	if (queue->do_pack)
4328 		QETH_TXQ_STAT_INC(queue, skbs_pack);
4329 	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4330 		flush_count++;
4331 		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4332 		queue->next_buf_to_fill =
4333 				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4334 	}
4335 
4336 	if (flush_count)
4337 		qeth_flush_buffers(queue, start_index, flush_count);
4338 
4339 out:
4340 	if (do_pack)
4341 		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4342 
4343 	if (stopped && !qeth_out_queue_is_full(queue))
4344 		netif_tx_start_queue(txq);
4345 	return rc;
4346 }
4347 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4348 
4349 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4350 			      unsigned int payload_len, struct sk_buff *skb,
4351 			      unsigned int proto_len)
4352 {
4353 	struct qeth_hdr_ext_tso *ext = &hdr->ext;
4354 
4355 	ext->hdr_tot_len = sizeof(*ext);
4356 	ext->imb_hdr_no = 1;
4357 	ext->hdr_type = 1;
4358 	ext->hdr_version = 1;
4359 	ext->hdr_len = 28;
4360 	ext->payload_len = payload_len;
4361 	ext->mss = skb_shinfo(skb)->gso_size;
4362 	ext->dg_hdr_len = proto_len;
4363 }
4364 
4365 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4366 	      struct qeth_qdio_out_q *queue, int ipv,
4367 	      void (*fill_header)(struct qeth_qdio_out_q *queue,
4368 				  struct qeth_hdr *hdr, struct sk_buff *skb,
4369 				  int ipv, unsigned int data_len))
4370 {
4371 	unsigned int proto_len, hw_hdr_len;
4372 	unsigned int frame_len = skb->len;
4373 	bool is_tso = skb_is_gso(skb);
4374 	unsigned int data_offset = 0;
4375 	struct qeth_hdr *hdr = NULL;
4376 	unsigned int hd_len = 0;
4377 	unsigned int elements;
4378 	int push_len, rc;
4379 
4380 	if (is_tso) {
4381 		hw_hdr_len = sizeof(struct qeth_hdr_tso);
4382 		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4383 	} else {
4384 		hw_hdr_len = sizeof(struct qeth_hdr);
4385 		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4386 	}
4387 
4388 	rc = skb_cow_head(skb, hw_hdr_len);
4389 	if (rc)
4390 		return rc;
4391 
4392 	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4393 				      &elements);
4394 	if (push_len < 0)
4395 		return push_len;
4396 	if (is_tso || !push_len) {
4397 		/* HW header needs its own buffer element. */
4398 		hd_len = hw_hdr_len + proto_len;
4399 		data_offset = push_len + proto_len;
4400 	}
4401 	memset(hdr, 0, hw_hdr_len);
4402 	fill_header(queue, hdr, skb, ipv, frame_len);
4403 	if (is_tso)
4404 		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4405 				  frame_len - proto_len, skb, proto_len);
4406 
4407 	if (IS_IQD(card)) {
4408 		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4409 				 hd_len);
4410 	} else {
4411 		/* TODO: drop skb_orphan() once TX completion is fast enough */
4412 		skb_orphan(skb);
4413 		spin_lock(&queue->lock);
4414 		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4415 					 hd_len, elements);
4416 		spin_unlock(&queue->lock);
4417 	}
4418 
4419 	if (rc && !push_len)
4420 		kmem_cache_free(qeth_core_header_cache, hdr);
4421 
4422 	return rc;
4423 }
4424 EXPORT_SYMBOL_GPL(qeth_xmit);
4425 
4426 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4427 		struct qeth_reply *reply, unsigned long data)
4428 {
4429 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4430 	struct qeth_ipacmd_setadpparms *setparms;
4431 
4432 	QETH_CARD_TEXT(card, 4, "prmadpcb");
4433 
4434 	setparms = &(cmd->data.setadapterparms);
4435 	if (qeth_setadpparms_inspect_rc(cmd)) {
4436 		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4437 		setparms->data.mode = SET_PROMISC_MODE_OFF;
4438 	}
4439 	card->info.promisc_mode = setparms->data.mode;
4440 	return (cmd->hdr.return_code) ? -EIO : 0;
4441 }
4442 
4443 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4444 {
4445 	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4446 						    SET_PROMISC_MODE_OFF;
4447 	struct qeth_cmd_buffer *iob;
4448 	struct qeth_ipa_cmd *cmd;
4449 
4450 	QETH_CARD_TEXT(card, 4, "setprom");
4451 	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4452 
4453 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4454 				   SETADP_DATA_SIZEOF(mode));
4455 	if (!iob)
4456 		return;
4457 	cmd = __ipa_cmd(iob);
4458 	cmd->data.setadapterparms.data.mode = mode;
4459 	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4460 }
4461 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4462 
4463 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4464 		struct qeth_reply *reply, unsigned long data)
4465 {
4466 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4467 	struct qeth_ipacmd_setadpparms *adp_cmd;
4468 
4469 	QETH_CARD_TEXT(card, 4, "chgmaccb");
4470 	if (qeth_setadpparms_inspect_rc(cmd))
4471 		return -EIO;
4472 
4473 	adp_cmd = &cmd->data.setadapterparms;
4474 	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4475 		return -EADDRNOTAVAIL;
4476 
4477 	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4478 	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4479 		return -EADDRNOTAVAIL;
4480 
4481 	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4482 	return 0;
4483 }
4484 
4485 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4486 {
4487 	int rc;
4488 	struct qeth_cmd_buffer *iob;
4489 	struct qeth_ipa_cmd *cmd;
4490 
4491 	QETH_CARD_TEXT(card, 4, "chgmac");
4492 
4493 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4494 				   SETADP_DATA_SIZEOF(change_addr));
4495 	if (!iob)
4496 		return -ENOMEM;
4497 	cmd = __ipa_cmd(iob);
4498 	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4499 	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4500 	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4501 			card->dev->dev_addr);
4502 	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4503 			       NULL);
4504 	return rc;
4505 }
4506 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4507 
4508 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4509 		struct qeth_reply *reply, unsigned long data)
4510 {
4511 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4512 	struct qeth_set_access_ctrl *access_ctrl_req;
4513 
4514 	QETH_CARD_TEXT(card, 4, "setaccb");
4515 
4516 	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4517 	QETH_CARD_TEXT_(card, 2, "rc=%d",
4518 			cmd->data.setadapterparms.hdr.return_code);
4519 	if (cmd->data.setadapterparms.hdr.return_code !=
4520 						SET_ACCESS_CTRL_RC_SUCCESS)
4521 		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4522 				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4523 				 cmd->data.setadapterparms.hdr.return_code);
4524 	switch (qeth_setadpparms_inspect_rc(cmd)) {
4525 	case SET_ACCESS_CTRL_RC_SUCCESS:
4526 		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4527 			dev_info(&card->gdev->dev,
4528 			    "QDIO data connection isolation is deactivated\n");
4529 		else
4530 			dev_info(&card->gdev->dev,
4531 			    "QDIO data connection isolation is activated\n");
4532 		return 0;
4533 	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4534 		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4535 				 CARD_DEVID(card));
4536 		return 0;
4537 	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4538 		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4539 				 CARD_DEVID(card));
4540 		return 0;
4541 	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4542 		dev_err(&card->gdev->dev, "Adapter does not "
4543 			"support QDIO data connection isolation\n");
4544 		return -EOPNOTSUPP;
4545 	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4546 		dev_err(&card->gdev->dev,
4547 			"Adapter is dedicated. "
4548 			"QDIO data connection isolation not supported\n");
4549 		return -EOPNOTSUPP;
4550 	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4551 		dev_err(&card->gdev->dev,
4552 			"TSO does not permit QDIO data connection isolation\n");
4553 		return -EPERM;
4554 	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4555 		dev_err(&card->gdev->dev, "The adjacent switch port does not "
4556 			"support reflective relay mode\n");
4557 		return -EOPNOTSUPP;
4558 	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4559 		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4560 					"enabled at the adjacent switch port");
4561 		return -EREMOTEIO;
4562 	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4563 		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4564 					"at the adjacent switch failed\n");
4565 		/* benign error while disabling ISOLATION_MODE_FWD */
4566 		return 0;
4567 	default:
4568 		return -EIO;
4569 	}
4570 }
4571 
4572 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4573 				     enum qeth_ipa_isolation_modes mode)
4574 {
4575 	int rc;
4576 	struct qeth_cmd_buffer *iob;
4577 	struct qeth_ipa_cmd *cmd;
4578 	struct qeth_set_access_ctrl *access_ctrl_req;
4579 
4580 	QETH_CARD_TEXT(card, 4, "setacctl");
4581 
4582 	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4583 		dev_err(&card->gdev->dev,
4584 			"Adapter does not support QDIO data connection isolation\n");
4585 		return -EOPNOTSUPP;
4586 	}
4587 
4588 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4589 				   SETADP_DATA_SIZEOF(set_access_ctrl));
4590 	if (!iob)
4591 		return -ENOMEM;
4592 	cmd = __ipa_cmd(iob);
4593 	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4594 	access_ctrl_req->subcmd_code = mode;
4595 
4596 	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4597 			       NULL);
4598 	if (rc) {
4599 		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4600 		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4601 				 rc, CARD_DEVID(card));
4602 	}
4603 
4604 	return rc;
4605 }
4606 
4607 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4608 {
4609 	struct qeth_card *card;
4610 
4611 	card = dev->ml_priv;
4612 	QETH_CARD_TEXT(card, 4, "txtimeo");
4613 	qeth_schedule_recovery(card);
4614 }
4615 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4616 
4617 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4618 {
4619 	struct qeth_card *card = dev->ml_priv;
4620 	int rc = 0;
4621 
4622 	switch (regnum) {
4623 	case MII_BMCR: /* Basic mode control register */
4624 		rc = BMCR_FULLDPLX;
4625 		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4626 		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4627 		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4628 		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4629 			rc |= BMCR_SPEED100;
4630 		break;
4631 	case MII_BMSR: /* Basic mode status register */
4632 		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4633 		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4634 		     BMSR_100BASE4;
4635 		break;
4636 	case MII_PHYSID1: /* PHYS ID 1 */
4637 		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4638 		     dev->dev_addr[2];
4639 		rc = (rc >> 5) & 0xFFFF;
4640 		break;
4641 	case MII_PHYSID2: /* PHYS ID 2 */
4642 		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4643 		break;
4644 	case MII_ADVERTISE: /* Advertisement control reg */
4645 		rc = ADVERTISE_ALL;
4646 		break;
4647 	case MII_LPA: /* Link partner ability reg */
4648 		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4649 		     LPA_100BASE4 | LPA_LPACK;
4650 		break;
4651 	case MII_EXPANSION: /* Expansion register */
4652 		break;
4653 	case MII_DCOUNTER: /* disconnect counter */
4654 		break;
4655 	case MII_FCSCOUNTER: /* false carrier counter */
4656 		break;
4657 	case MII_NWAYTEST: /* N-way auto-neg test register */
4658 		break;
4659 	case MII_RERRCOUNTER: /* rx error counter */
4660 		rc = card->stats.rx_length_errors +
4661 		     card->stats.rx_frame_errors +
4662 		     card->stats.rx_fifo_errors;
4663 		break;
4664 	case MII_SREVISION: /* silicon revision */
4665 		break;
4666 	case MII_RESV1: /* reserved 1 */
4667 		break;
4668 	case MII_LBRERROR: /* loopback, rx, bypass error */
4669 		break;
4670 	case MII_PHYADDR: /* physical address */
4671 		break;
4672 	case MII_RESV2: /* reserved 2 */
4673 		break;
4674 	case MII_TPISTATUS: /* TPI status for 10mbps */
4675 		break;
4676 	case MII_NCONFIG: /* network interface config */
4677 		break;
4678 	default:
4679 		break;
4680 	}
4681 	return rc;
4682 }
4683 
4684 static int qeth_snmp_command_cb(struct qeth_card *card,
4685 				struct qeth_reply *reply, unsigned long data)
4686 {
4687 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4688 	struct qeth_arp_query_info *qinfo = reply->param;
4689 	struct qeth_ipacmd_setadpparms *adp_cmd;
4690 	unsigned int data_len;
4691 	void *snmp_data;
4692 
4693 	QETH_CARD_TEXT(card, 3, "snpcmdcb");
4694 
4695 	if (cmd->hdr.return_code) {
4696 		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4697 		return -EIO;
4698 	}
4699 	if (cmd->data.setadapterparms.hdr.return_code) {
4700 		cmd->hdr.return_code =
4701 			cmd->data.setadapterparms.hdr.return_code;
4702 		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4703 		return -EIO;
4704 	}
4705 
4706 	adp_cmd = &cmd->data.setadapterparms;
4707 	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4708 	if (adp_cmd->hdr.seq_no == 1) {
4709 		snmp_data = &adp_cmd->data.snmp;
4710 	} else {
4711 		snmp_data = &adp_cmd->data.snmp.request;
4712 		data_len -= offsetof(struct qeth_snmp_cmd, request);
4713 	}
4714 
4715 	/* check if there is enough room in userspace */
4716 	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4717 		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4718 		return -ENOSPC;
4719 	}
4720 	QETH_CARD_TEXT_(card, 4, "snore%i",
4721 			cmd->data.setadapterparms.hdr.used_total);
4722 	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4723 			cmd->data.setadapterparms.hdr.seq_no);
4724 	/*copy entries to user buffer*/
4725 	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4726 	qinfo->udata_offset += data_len;
4727 
4728 	if (cmd->data.setadapterparms.hdr.seq_no <
4729 	    cmd->data.setadapterparms.hdr.used_total)
4730 		return 1;
4731 	return 0;
4732 }
4733 
4734 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4735 {
4736 	struct qeth_snmp_ureq __user *ureq;
4737 	struct qeth_cmd_buffer *iob;
4738 	unsigned int req_len;
4739 	struct qeth_arp_query_info qinfo = {0, };
4740 	int rc = 0;
4741 
4742 	QETH_CARD_TEXT(card, 3, "snmpcmd");
4743 
4744 	if (IS_VM_NIC(card))
4745 		return -EOPNOTSUPP;
4746 
4747 	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4748 	    IS_LAYER3(card))
4749 		return -EOPNOTSUPP;
4750 
4751 	ureq = (struct qeth_snmp_ureq __user *) udata;
4752 	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4753 	    get_user(req_len, &ureq->hdr.req_len))
4754 		return -EFAULT;
4755 
4756 	/* Sanitize user input, to avoid overflows in iob size calculation: */
4757 	if (req_len > QETH_BUFSIZE)
4758 		return -EINVAL;
4759 
4760 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4761 	if (!iob)
4762 		return -ENOMEM;
4763 
4764 	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4765 			   &ureq->cmd, req_len)) {
4766 		qeth_put_cmd(iob);
4767 		return -EFAULT;
4768 	}
4769 
4770 	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4771 	if (!qinfo.udata) {
4772 		qeth_put_cmd(iob);
4773 		return -ENOMEM;
4774 	}
4775 	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4776 
4777 	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4778 	if (rc)
4779 		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4780 				 CARD_DEVID(card), rc);
4781 	else {
4782 		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4783 			rc = -EFAULT;
4784 	}
4785 
4786 	kfree(qinfo.udata);
4787 	return rc;
4788 }
4789 
4790 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4791 					 struct qeth_reply *reply,
4792 					 unsigned long data)
4793 {
4794 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4795 	struct qeth_qoat_priv *priv = reply->param;
4796 	int resdatalen;
4797 
4798 	QETH_CARD_TEXT(card, 3, "qoatcb");
4799 	if (qeth_setadpparms_inspect_rc(cmd))
4800 		return -EIO;
4801 
4802 	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4803 
4804 	if (resdatalen > (priv->buffer_len - priv->response_len))
4805 		return -ENOSPC;
4806 
4807 	memcpy(priv->buffer + priv->response_len,
4808 	       &cmd->data.setadapterparms.hdr, resdatalen);
4809 	priv->response_len += resdatalen;
4810 
4811 	if (cmd->data.setadapterparms.hdr.seq_no <
4812 	    cmd->data.setadapterparms.hdr.used_total)
4813 		return 1;
4814 	return 0;
4815 }
4816 
4817 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4818 {
4819 	int rc = 0;
4820 	struct qeth_cmd_buffer *iob;
4821 	struct qeth_ipa_cmd *cmd;
4822 	struct qeth_query_oat *oat_req;
4823 	struct qeth_query_oat_data oat_data;
4824 	struct qeth_qoat_priv priv;
4825 	void __user *tmp;
4826 
4827 	QETH_CARD_TEXT(card, 3, "qoatcmd");
4828 
4829 	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4830 		return -EOPNOTSUPP;
4831 
4832 	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4833 		return -EFAULT;
4834 
4835 	priv.buffer_len = oat_data.buffer_len;
4836 	priv.response_len = 0;
4837 	priv.buffer = vzalloc(oat_data.buffer_len);
4838 	if (!priv.buffer)
4839 		return -ENOMEM;
4840 
4841 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4842 				   SETADP_DATA_SIZEOF(query_oat));
4843 	if (!iob) {
4844 		rc = -ENOMEM;
4845 		goto out_free;
4846 	}
4847 	cmd = __ipa_cmd(iob);
4848 	oat_req = &cmd->data.setadapterparms.data.query_oat;
4849 	oat_req->subcmd_code = oat_data.command;
4850 
4851 	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4852 	if (!rc) {
4853 		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4854 					 u64_to_user_ptr(oat_data.ptr);
4855 		oat_data.response_len = priv.response_len;
4856 
4857 		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4858 		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4859 			rc = -EFAULT;
4860 	}
4861 
4862 out_free:
4863 	vfree(priv.buffer);
4864 	return rc;
4865 }
4866 
4867 static int qeth_query_card_info_cb(struct qeth_card *card,
4868 				   struct qeth_reply *reply, unsigned long data)
4869 {
4870 	struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4871 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4872 	struct qeth_query_card_info *card_info;
4873 
4874 	QETH_CARD_TEXT(card, 2, "qcrdincb");
4875 	if (qeth_setadpparms_inspect_rc(cmd))
4876 		return -EIO;
4877 
4878 	card_info = &cmd->data.setadapterparms.data.card_info;
4879 	carrier_info->card_type = card_info->card_type;
4880 	carrier_info->port_mode = card_info->port_mode;
4881 	carrier_info->port_speed = card_info->port_speed;
4882 	return 0;
4883 }
4884 
4885 int qeth_query_card_info(struct qeth_card *card,
4886 			 struct carrier_info *carrier_info)
4887 {
4888 	struct qeth_cmd_buffer *iob;
4889 
4890 	QETH_CARD_TEXT(card, 2, "qcrdinfo");
4891 	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4892 		return -EOPNOTSUPP;
4893 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4894 	if (!iob)
4895 		return -ENOMEM;
4896 	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4897 					(void *)carrier_info);
4898 }
4899 
4900 /**
4901  * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4902  * @card: pointer to a qeth_card
4903  *
4904  * Returns
4905  *	0, if a MAC address has been set for the card's netdevice
4906  *	a return code, for various error conditions
4907  */
4908 int qeth_vm_request_mac(struct qeth_card *card)
4909 {
4910 	struct diag26c_mac_resp *response;
4911 	struct diag26c_mac_req *request;
4912 	int rc;
4913 
4914 	QETH_CARD_TEXT(card, 2, "vmreqmac");
4915 
4916 	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4917 	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4918 	if (!request || !response) {
4919 		rc = -ENOMEM;
4920 		goto out;
4921 	}
4922 
4923 	request->resp_buf_len = sizeof(*response);
4924 	request->resp_version = DIAG26C_VERSION2;
4925 	request->op_code = DIAG26C_GET_MAC;
4926 	request->devno = card->info.ddev_devno;
4927 
4928 	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4929 	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4930 	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4931 	if (rc)
4932 		goto out;
4933 	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4934 
4935 	if (request->resp_buf_len < sizeof(*response) ||
4936 	    response->version != request->resp_version) {
4937 		rc = -EIO;
4938 		QETH_CARD_TEXT(card, 2, "badresp");
4939 		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4940 			      sizeof(request->resp_buf_len));
4941 	} else if (!is_valid_ether_addr(response->mac)) {
4942 		rc = -EINVAL;
4943 		QETH_CARD_TEXT(card, 2, "badmac");
4944 		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4945 	} else {
4946 		ether_addr_copy(card->dev->dev_addr, response->mac);
4947 	}
4948 
4949 out:
4950 	kfree(response);
4951 	kfree(request);
4952 	return rc;
4953 }
4954 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4955 
4956 static void qeth_determine_capabilities(struct qeth_card *card)
4957 {
4958 	struct qeth_channel *channel = &card->data;
4959 	struct ccw_device *ddev = channel->ccwdev;
4960 	int rc;
4961 	int ddev_offline = 0;
4962 
4963 	QETH_CARD_TEXT(card, 2, "detcapab");
4964 	if (!ddev->online) {
4965 		ddev_offline = 1;
4966 		rc = qeth_start_channel(channel);
4967 		if (rc) {
4968 			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4969 			goto out;
4970 		}
4971 	}
4972 
4973 	rc = qeth_read_conf_data(card);
4974 	if (rc) {
4975 		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4976 				 CARD_DEVID(card), rc);
4977 		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4978 		goto out_offline;
4979 	}
4980 
4981 	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4982 	if (rc)
4983 		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4984 
4985 	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4986 	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4987 	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4988 	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4989 	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4990 	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4991 	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4992 	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4993 		dev_info(&card->gdev->dev,
4994 			"Completion Queueing supported\n");
4995 	} else {
4996 		card->options.cq = QETH_CQ_NOTAVAILABLE;
4997 	}
4998 
4999 
5000 out_offline:
5001 	if (ddev_offline == 1)
5002 		qeth_stop_channel(channel);
5003 out:
5004 	return;
5005 }
5006 
5007 static void qeth_read_ccw_conf_data(struct qeth_card *card)
5008 {
5009 	struct qeth_card_info *info = &card->info;
5010 	struct ccw_device *cdev = CARD_DDEV(card);
5011 	struct ccw_dev_id dev_id;
5012 
5013 	QETH_CARD_TEXT(card, 2, "ccwconfd");
5014 	ccw_device_get_id(cdev, &dev_id);
5015 
5016 	info->ddev_devno = dev_id.devno;
5017 	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5018 			  !ccw_device_get_iid(cdev, &info->iid) &&
5019 			  !ccw_device_get_chid(cdev, 0, &info->chid);
5020 	info->ssid = dev_id.ssid;
5021 
5022 	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5023 		 info->chid, info->chpid);
5024 
5025 	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5026 	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5027 	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5028 	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5029 	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5030 	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5031 	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5032 }
5033 
5034 static int qeth_qdio_establish(struct qeth_card *card)
5035 {
5036 	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5037 	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5038 	struct qdio_initialize init_data;
5039 	char *qib_param_field;
5040 	unsigned int i;
5041 	int rc = 0;
5042 
5043 	QETH_CARD_TEXT(card, 2, "qdioest");
5044 
5045 	qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5046 	if (!qib_param_field) {
5047 		rc =  -ENOMEM;
5048 		goto out_free_nothing;
5049 	}
5050 
5051 	qeth_create_qib_param_field(card, qib_param_field);
5052 	qeth_create_qib_param_field_blkt(card, qib_param_field);
5053 
5054 	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5055 	if (card->options.cq == QETH_CQ_ENABLED)
5056 		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5057 
5058 	for (i = 0; i < card->qdio.no_out_queues; i++)
5059 		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5060 
5061 	memset(&init_data, 0, sizeof(struct qdio_initialize));
5062 	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5063 							  QDIO_QETH_QFMT;
5064 	init_data.qib_param_field_format = 0;
5065 	init_data.qib_param_field        = qib_param_field;
5066 	init_data.no_input_qs            = card->qdio.no_in_queues;
5067 	init_data.no_output_qs           = card->qdio.no_out_queues;
5068 	init_data.input_handler		 = qeth_qdio_input_handler;
5069 	init_data.output_handler	 = qeth_qdio_output_handler;
5070 	init_data.irq_poll		 = qeth_qdio_poll;
5071 	init_data.int_parm               = (unsigned long) card;
5072 	init_data.input_sbal_addr_array  = in_sbal_ptrs;
5073 	init_data.output_sbal_addr_array = out_sbal_ptrs;
5074 	init_data.output_sbal_state_array = card->qdio.out_bufstates;
5075 	init_data.scan_threshold	 = IS_IQD(card) ? 0 : 32;
5076 
5077 	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5078 		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5079 		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5080 				   init_data.no_output_qs);
5081 		if (rc) {
5082 			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5083 			goto out;
5084 		}
5085 		rc = qdio_establish(CARD_DDEV(card), &init_data);
5086 		if (rc) {
5087 			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5088 			qdio_free(CARD_DDEV(card));
5089 		}
5090 	}
5091 
5092 	switch (card->options.cq) {
5093 	case QETH_CQ_ENABLED:
5094 		dev_info(&card->gdev->dev, "Completion Queue support enabled");
5095 		break;
5096 	case QETH_CQ_DISABLED:
5097 		dev_info(&card->gdev->dev, "Completion Queue support disabled");
5098 		break;
5099 	default:
5100 		break;
5101 	}
5102 out:
5103 	kfree(qib_param_field);
5104 out_free_nothing:
5105 	return rc;
5106 }
5107 
5108 static void qeth_core_free_card(struct qeth_card *card)
5109 {
5110 	QETH_CARD_TEXT(card, 2, "freecrd");
5111 
5112 	unregister_service_level(&card->qeth_service_level);
5113 	debugfs_remove_recursive(card->debugfs);
5114 	qeth_put_cmd(card->read_cmd);
5115 	destroy_workqueue(card->event_wq);
5116 	dev_set_drvdata(&card->gdev->dev, NULL);
5117 	kfree(card);
5118 }
5119 
5120 static void qeth_trace_features(struct qeth_card *card)
5121 {
5122 	QETH_CARD_TEXT(card, 2, "features");
5123 	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5124 	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5125 	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5126 	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5127 		      sizeof(card->info.diagass_support));
5128 }
5129 
5130 static struct ccw_device_id qeth_ids[] = {
5131 	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5132 					.driver_info = QETH_CARD_TYPE_OSD},
5133 	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5134 					.driver_info = QETH_CARD_TYPE_IQD},
5135 #ifdef CONFIG_QETH_OSN
5136 	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5137 					.driver_info = QETH_CARD_TYPE_OSN},
5138 #endif
5139 	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5140 					.driver_info = QETH_CARD_TYPE_OSM},
5141 #ifdef CONFIG_QETH_OSX
5142 	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5143 					.driver_info = QETH_CARD_TYPE_OSX},
5144 #endif
5145 	{},
5146 };
5147 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5148 
5149 static struct ccw_driver qeth_ccw_driver = {
5150 	.driver = {
5151 		.owner = THIS_MODULE,
5152 		.name = "qeth",
5153 	},
5154 	.ids = qeth_ids,
5155 	.probe = ccwgroup_probe_ccwdev,
5156 	.remove = ccwgroup_remove_ccwdev,
5157 };
5158 
5159 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5160 {
5161 	int retries = 3;
5162 	int rc;
5163 
5164 	QETH_CARD_TEXT(card, 2, "hrdsetup");
5165 	atomic_set(&card->force_alloc_skb, 0);
5166 	rc = qeth_update_from_chp_desc(card);
5167 	if (rc)
5168 		return rc;
5169 retry:
5170 	if (retries < 3)
5171 		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5172 				 CARD_DEVID(card));
5173 	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5174 	qeth_stop_channel(&card->data);
5175 	qeth_stop_channel(&card->write);
5176 	qeth_stop_channel(&card->read);
5177 	qdio_free(CARD_DDEV(card));
5178 
5179 	rc = qeth_start_channel(&card->read);
5180 	if (rc)
5181 		goto retriable;
5182 	rc = qeth_start_channel(&card->write);
5183 	if (rc)
5184 		goto retriable;
5185 	rc = qeth_start_channel(&card->data);
5186 	if (rc)
5187 		goto retriable;
5188 retriable:
5189 	if (rc == -ERESTARTSYS) {
5190 		QETH_CARD_TEXT(card, 2, "break1");
5191 		return rc;
5192 	} else if (rc) {
5193 		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5194 		if (--retries < 0)
5195 			goto out;
5196 		else
5197 			goto retry;
5198 	}
5199 
5200 	qeth_determine_capabilities(card);
5201 	qeth_read_ccw_conf_data(card);
5202 	qeth_idx_init(card);
5203 
5204 	rc = qeth_idx_activate_read_channel(card);
5205 	if (rc == -EINTR) {
5206 		QETH_CARD_TEXT(card, 2, "break2");
5207 		return rc;
5208 	} else if (rc) {
5209 		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5210 		if (--retries < 0)
5211 			goto out;
5212 		else
5213 			goto retry;
5214 	}
5215 
5216 	rc = qeth_idx_activate_write_channel(card);
5217 	if (rc == -EINTR) {
5218 		QETH_CARD_TEXT(card, 2, "break3");
5219 		return rc;
5220 	} else if (rc) {
5221 		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5222 		if (--retries < 0)
5223 			goto out;
5224 		else
5225 			goto retry;
5226 	}
5227 	card->read_or_write_problem = 0;
5228 	rc = qeth_mpc_initialize(card);
5229 	if (rc) {
5230 		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5231 		goto out;
5232 	}
5233 
5234 	rc = qeth_send_startlan(card);
5235 	if (rc) {
5236 		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5237 		if (rc == -ENETDOWN) {
5238 			dev_warn(&card->gdev->dev, "The LAN is offline\n");
5239 			*carrier_ok = false;
5240 		} else {
5241 			goto out;
5242 		}
5243 	} else {
5244 		*carrier_ok = true;
5245 	}
5246 
5247 	card->options.ipa4.supported = 0;
5248 	card->options.ipa6.supported = 0;
5249 	card->options.adp.supported = 0;
5250 	card->options.sbp.supported_funcs = 0;
5251 	card->info.diagass_support = 0;
5252 	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5253 	if (rc == -ENOMEM)
5254 		goto out;
5255 	if (qeth_is_supported(card, IPA_IPV6)) {
5256 		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5257 		if (rc == -ENOMEM)
5258 			goto out;
5259 	}
5260 	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5261 		rc = qeth_query_setadapterparms(card);
5262 		if (rc < 0) {
5263 			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5264 			goto out;
5265 		}
5266 	}
5267 	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5268 		rc = qeth_query_setdiagass(card);
5269 		if (rc)
5270 			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5271 	}
5272 
5273 	qeth_trace_features(card);
5274 
5275 	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5276 	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5277 		card->info.hwtrap = 0;
5278 
5279 	if (card->options.isolation != ISOLATION_MODE_NONE) {
5280 		rc = qeth_setadpparms_set_access_ctrl(card,
5281 						      card->options.isolation);
5282 		if (rc)
5283 			goto out;
5284 	}
5285 
5286 	rc = qeth_init_qdio_queues(card);
5287 	if (rc) {
5288 		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5289 		goto out;
5290 	}
5291 
5292 	return 0;
5293 out:
5294 	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5295 		"an error on the device\n");
5296 	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5297 			 CARD_DEVID(card), rc);
5298 	return rc;
5299 }
5300 
5301 static int qeth_set_online(struct qeth_card *card)
5302 {
5303 	bool carrier_ok;
5304 	int rc;
5305 
5306 	mutex_lock(&card->discipline_mutex);
5307 	mutex_lock(&card->conf_mutex);
5308 	QETH_CARD_TEXT(card, 2, "setonlin");
5309 
5310 	rc = qeth_hardsetup_card(card, &carrier_ok);
5311 	if (rc) {
5312 		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5313 		rc = -ENODEV;
5314 		goto err_hardsetup;
5315 	}
5316 
5317 	qeth_print_status_message(card);
5318 
5319 	rc = card->discipline->set_online(card, carrier_ok);
5320 	if (rc)
5321 		goto err_online;
5322 
5323 	/* let user_space know that device is online */
5324 	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5325 
5326 	mutex_unlock(&card->conf_mutex);
5327 	mutex_unlock(&card->discipline_mutex);
5328 	return 0;
5329 
5330 err_online:
5331 err_hardsetup:
5332 	qeth_qdio_clear_card(card, 0);
5333 	qeth_clear_working_pool_list(card);
5334 	qeth_flush_local_addrs(card);
5335 
5336 	qeth_stop_channel(&card->data);
5337 	qeth_stop_channel(&card->write);
5338 	qeth_stop_channel(&card->read);
5339 	qdio_free(CARD_DDEV(card));
5340 
5341 	mutex_unlock(&card->conf_mutex);
5342 	mutex_unlock(&card->discipline_mutex);
5343 	return rc;
5344 }
5345 
5346 int qeth_set_offline(struct qeth_card *card, bool resetting)
5347 {
5348 	int rc, rc2, rc3;
5349 
5350 	mutex_lock(&card->discipline_mutex);
5351 	mutex_lock(&card->conf_mutex);
5352 	QETH_CARD_TEXT(card, 3, "setoffl");
5353 
5354 	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5355 		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5356 		card->info.hwtrap = 1;
5357 	}
5358 
5359 	/* cancel any stalled cmd that might block the rtnl: */
5360 	qeth_clear_ipacmd_list(card);
5361 
5362 	rtnl_lock();
5363 	card->info.open_when_online = card->dev->flags & IFF_UP;
5364 	dev_close(card->dev);
5365 	netif_device_detach(card->dev);
5366 	netif_carrier_off(card->dev);
5367 	rtnl_unlock();
5368 
5369 	cancel_work_sync(&card->rx_mode_work);
5370 
5371 	card->discipline->set_offline(card);
5372 
5373 	qeth_qdio_clear_card(card, 0);
5374 	qeth_drain_output_queues(card);
5375 	qeth_clear_working_pool_list(card);
5376 	qeth_flush_local_addrs(card);
5377 	card->info.promisc_mode = 0;
5378 
5379 	rc  = qeth_stop_channel(&card->data);
5380 	rc2 = qeth_stop_channel(&card->write);
5381 	rc3 = qeth_stop_channel(&card->read);
5382 	if (!rc)
5383 		rc = (rc2) ? rc2 : rc3;
5384 	if (rc)
5385 		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5386 	qdio_free(CARD_DDEV(card));
5387 
5388 	/* let user_space know that device is offline */
5389 	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5390 
5391 	mutex_unlock(&card->conf_mutex);
5392 	mutex_unlock(&card->discipline_mutex);
5393 	return 0;
5394 }
5395 EXPORT_SYMBOL_GPL(qeth_set_offline);
5396 
5397 static int qeth_do_reset(void *data)
5398 {
5399 	struct qeth_card *card = data;
5400 	int rc;
5401 
5402 	QETH_CARD_TEXT(card, 2, "recover1");
5403 	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5404 		return 0;
5405 	QETH_CARD_TEXT(card, 2, "recover2");
5406 	dev_warn(&card->gdev->dev,
5407 		 "A recovery process has been started for the device\n");
5408 
5409 	qeth_set_offline(card, true);
5410 	rc = qeth_set_online(card);
5411 	if (!rc) {
5412 		dev_info(&card->gdev->dev,
5413 			 "Device successfully recovered!\n");
5414 	} else {
5415 		ccwgroup_set_offline(card->gdev);
5416 		dev_warn(&card->gdev->dev,
5417 			 "The qeth device driver failed to recover an error on the device\n");
5418 	}
5419 	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5420 	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5421 	return 0;
5422 }
5423 
5424 #if IS_ENABLED(CONFIG_QETH_L3)
5425 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5426 				struct qeth_hdr *hdr)
5427 {
5428 	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5429 	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5430 	struct net_device *dev = skb->dev;
5431 
5432 	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5433 		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5434 				"FAKELL", skb->len);
5435 		return;
5436 	}
5437 
5438 	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5439 		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5440 							     ETH_P_IP;
5441 		unsigned char tg_addr[ETH_ALEN];
5442 
5443 		skb_reset_network_header(skb);
5444 		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5445 		case QETH_CAST_MULTICAST:
5446 			if (prot == ETH_P_IP)
5447 				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5448 			else
5449 				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5450 			QETH_CARD_STAT_INC(card, rx_multicast);
5451 			break;
5452 		case QETH_CAST_BROADCAST:
5453 			ether_addr_copy(tg_addr, dev->broadcast);
5454 			QETH_CARD_STAT_INC(card, rx_multicast);
5455 			break;
5456 		default:
5457 			if (card->options.sniffer)
5458 				skb->pkt_type = PACKET_OTHERHOST;
5459 			ether_addr_copy(tg_addr, dev->dev_addr);
5460 		}
5461 
5462 		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5463 			dev_hard_header(skb, dev, prot, tg_addr,
5464 					&l3_hdr->next_hop.rx.src_mac, skb->len);
5465 		else
5466 			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5467 					skb->len);
5468 	}
5469 
5470 	/* copy VLAN tag from hdr into skb */
5471 	if (!card->options.sniffer &&
5472 	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5473 				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5474 		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5475 				l3_hdr->vlan_id :
5476 				l3_hdr->next_hop.rx.vlan_id;
5477 
5478 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5479 	}
5480 }
5481 #endif
5482 
5483 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5484 			     struct qeth_hdr *hdr, bool uses_frags)
5485 {
5486 	struct napi_struct *napi = &card->napi;
5487 	bool is_cso;
5488 
5489 	switch (hdr->hdr.l2.id) {
5490 	case QETH_HEADER_TYPE_OSN:
5491 		skb_push(skb, sizeof(*hdr));
5492 		skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5493 		QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5494 		QETH_CARD_STAT_INC(card, rx_packets);
5495 
5496 		card->osn_info.data_cb(skb);
5497 		return;
5498 #if IS_ENABLED(CONFIG_QETH_L3)
5499 	case QETH_HEADER_TYPE_LAYER3:
5500 		qeth_l3_rebuild_skb(card, skb, hdr);
5501 		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5502 		break;
5503 #endif
5504 	case QETH_HEADER_TYPE_LAYER2:
5505 		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5506 		break;
5507 	default:
5508 		/* never happens */
5509 		if (uses_frags)
5510 			napi_free_frags(napi);
5511 		else
5512 			dev_kfree_skb_any(skb);
5513 		return;
5514 	}
5515 
5516 	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5517 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5518 		QETH_CARD_STAT_INC(card, rx_skb_csum);
5519 	} else {
5520 		skb->ip_summed = CHECKSUM_NONE;
5521 	}
5522 
5523 	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5524 	QETH_CARD_STAT_INC(card, rx_packets);
5525 	if (skb_is_nonlinear(skb)) {
5526 		QETH_CARD_STAT_INC(card, rx_sg_skbs);
5527 		QETH_CARD_STAT_ADD(card, rx_sg_frags,
5528 				   skb_shinfo(skb)->nr_frags);
5529 	}
5530 
5531 	if (uses_frags) {
5532 		napi_gro_frags(napi);
5533 	} else {
5534 		skb->protocol = eth_type_trans(skb, skb->dev);
5535 		napi_gro_receive(napi, skb);
5536 	}
5537 }
5538 
5539 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5540 {
5541 	struct page *page = virt_to_page(data);
5542 	unsigned int next_frag;
5543 
5544 	next_frag = skb_shinfo(skb)->nr_frags;
5545 	get_page(page);
5546 	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5547 			data_len);
5548 }
5549 
5550 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5551 {
5552 	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5553 }
5554 
5555 static int qeth_extract_skb(struct qeth_card *card,
5556 			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5557 			    int *__offset)
5558 {
5559 	struct qeth_priv *priv = netdev_priv(card->dev);
5560 	struct qdio_buffer *buffer = qethbuffer->buffer;
5561 	struct napi_struct *napi = &card->napi;
5562 	struct qdio_buffer_element *element;
5563 	unsigned int linear_len = 0;
5564 	bool uses_frags = false;
5565 	int offset = *__offset;
5566 	bool use_rx_sg = false;
5567 	unsigned int headroom;
5568 	struct qeth_hdr *hdr;
5569 	struct sk_buff *skb;
5570 	int skb_len = 0;
5571 
5572 	element = &buffer->element[*element_no];
5573 
5574 next_packet:
5575 	/* qeth_hdr must not cross element boundaries */
5576 	while (element->length < offset + sizeof(struct qeth_hdr)) {
5577 		if (qeth_is_last_sbale(element))
5578 			return -ENODATA;
5579 		element++;
5580 		offset = 0;
5581 	}
5582 
5583 	hdr = phys_to_virt(element->addr) + offset;
5584 	offset += sizeof(*hdr);
5585 	skb = NULL;
5586 
5587 	switch (hdr->hdr.l2.id) {
5588 	case QETH_HEADER_TYPE_LAYER2:
5589 		skb_len = hdr->hdr.l2.pkt_length;
5590 		linear_len = ETH_HLEN;
5591 		headroom = 0;
5592 		break;
5593 	case QETH_HEADER_TYPE_LAYER3:
5594 		skb_len = hdr->hdr.l3.length;
5595 		if (!IS_LAYER3(card)) {
5596 			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5597 			goto walk_packet;
5598 		}
5599 
5600 		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5601 			linear_len = ETH_HLEN;
5602 			headroom = 0;
5603 			break;
5604 		}
5605 
5606 		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5607 			linear_len = sizeof(struct ipv6hdr);
5608 		else
5609 			linear_len = sizeof(struct iphdr);
5610 		headroom = ETH_HLEN;
5611 		break;
5612 	case QETH_HEADER_TYPE_OSN:
5613 		skb_len = hdr->hdr.osn.pdu_length;
5614 		if (!IS_OSN(card)) {
5615 			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5616 			goto walk_packet;
5617 		}
5618 
5619 		linear_len = skb_len;
5620 		headroom = sizeof(struct qeth_hdr);
5621 		break;
5622 	default:
5623 		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5624 			QETH_CARD_STAT_INC(card, rx_frame_errors);
5625 		else
5626 			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5627 
5628 		/* Can't determine packet length, drop the whole buffer. */
5629 		return -EPROTONOSUPPORT;
5630 	}
5631 
5632 	if (skb_len < linear_len) {
5633 		QETH_CARD_STAT_INC(card, rx_dropped_runt);
5634 		goto walk_packet;
5635 	}
5636 
5637 	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5638 		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5639 		     !atomic_read(&card->force_alloc_skb) &&
5640 		     !IS_OSN(card));
5641 
5642 	if (use_rx_sg) {
5643 		/* QETH_CQ_ENABLED only: */
5644 		if (qethbuffer->rx_skb &&
5645 		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5646 			skb = qethbuffer->rx_skb;
5647 			qethbuffer->rx_skb = NULL;
5648 			goto use_skb;
5649 		}
5650 
5651 		skb = napi_get_frags(napi);
5652 		if (!skb) {
5653 			/* -ENOMEM, no point in falling back further. */
5654 			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5655 			goto walk_packet;
5656 		}
5657 
5658 		if (skb_tailroom(skb) >= linear_len + headroom) {
5659 			uses_frags = true;
5660 			goto use_skb;
5661 		}
5662 
5663 		netdev_info_once(card->dev,
5664 				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5665 				 linear_len + headroom, skb_tailroom(skb));
5666 		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
5667 	}
5668 
5669 	linear_len = skb_len;
5670 	skb = napi_alloc_skb(napi, linear_len + headroom);
5671 	if (!skb) {
5672 		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5673 		goto walk_packet;
5674 	}
5675 
5676 use_skb:
5677 	if (headroom)
5678 		skb_reserve(skb, headroom);
5679 walk_packet:
5680 	while (skb_len) {
5681 		int data_len = min(skb_len, (int)(element->length - offset));
5682 		char *data = phys_to_virt(element->addr) + offset;
5683 
5684 		skb_len -= data_len;
5685 		offset += data_len;
5686 
5687 		/* Extract data from current element: */
5688 		if (skb && data_len) {
5689 			if (linear_len) {
5690 				unsigned int copy_len;
5691 
5692 				copy_len = min_t(unsigned int, linear_len,
5693 						 data_len);
5694 
5695 				skb_put_data(skb, data, copy_len);
5696 				linear_len -= copy_len;
5697 				data_len -= copy_len;
5698 				data += copy_len;
5699 			}
5700 
5701 			if (data_len)
5702 				qeth_create_skb_frag(skb, data, data_len);
5703 		}
5704 
5705 		/* Step forward to next element: */
5706 		if (skb_len) {
5707 			if (qeth_is_last_sbale(element)) {
5708 				QETH_CARD_TEXT(card, 4, "unexeob");
5709 				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5710 				if (skb) {
5711 					if (uses_frags)
5712 						napi_free_frags(napi);
5713 					else
5714 						dev_kfree_skb_any(skb);
5715 					QETH_CARD_STAT_INC(card,
5716 							   rx_length_errors);
5717 				}
5718 				return -EMSGSIZE;
5719 			}
5720 			element++;
5721 			offset = 0;
5722 		}
5723 	}
5724 
5725 	/* This packet was skipped, go get another one: */
5726 	if (!skb)
5727 		goto next_packet;
5728 
5729 	*element_no = element - &buffer->element[0];
5730 	*__offset = offset;
5731 
5732 	qeth_receive_skb(card, skb, hdr, uses_frags);
5733 	return 0;
5734 }
5735 
5736 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5737 				      struct qeth_qdio_buffer *buf, bool *done)
5738 {
5739 	unsigned int work_done = 0;
5740 
5741 	while (budget) {
5742 		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5743 				     &card->rx.e_offset)) {
5744 			*done = true;
5745 			break;
5746 		}
5747 
5748 		work_done++;
5749 		budget--;
5750 	}
5751 
5752 	return work_done;
5753 }
5754 
5755 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5756 {
5757 	struct qeth_rx *ctx = &card->rx;
5758 	unsigned int work_done = 0;
5759 
5760 	while (budget > 0) {
5761 		struct qeth_qdio_buffer *buffer;
5762 		unsigned int skbs_done = 0;
5763 		bool done = false;
5764 
5765 		/* Fetch completed RX buffers: */
5766 		if (!card->rx.b_count) {
5767 			card->rx.qdio_err = 0;
5768 			card->rx.b_count = qdio_get_next_buffers(
5769 				card->data.ccwdev, 0, &card->rx.b_index,
5770 				&card->rx.qdio_err);
5771 			if (card->rx.b_count <= 0) {
5772 				card->rx.b_count = 0;
5773 				break;
5774 			}
5775 		}
5776 
5777 		/* Process one completed RX buffer: */
5778 		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5779 		if (!(card->rx.qdio_err &&
5780 		      qeth_check_qdio_errors(card, buffer->buffer,
5781 					     card->rx.qdio_err, "qinerr")))
5782 			skbs_done = qeth_extract_skbs(card, budget, buffer,
5783 						      &done);
5784 		else
5785 			done = true;
5786 
5787 		work_done += skbs_done;
5788 		budget -= skbs_done;
5789 
5790 		if (done) {
5791 			QETH_CARD_STAT_INC(card, rx_bufs);
5792 			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5793 			buffer->pool_entry = NULL;
5794 			card->rx.b_count--;
5795 			ctx->bufs_refill++;
5796 			ctx->bufs_refill -= qeth_rx_refill_queue(card,
5797 								 ctx->bufs_refill);
5798 
5799 			/* Step forward to next buffer: */
5800 			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5801 			card->rx.buf_element = 0;
5802 			card->rx.e_offset = 0;
5803 		}
5804 	}
5805 
5806 	return work_done;
5807 }
5808 
5809 static void qeth_cq_poll(struct qeth_card *card)
5810 {
5811 	unsigned int work_done = 0;
5812 
5813 	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5814 		unsigned int start, error;
5815 		int completed;
5816 
5817 		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5818 					       &error);
5819 		if (completed <= 0)
5820 			return;
5821 
5822 		qeth_qdio_cq_handler(card, error, 1, start, completed);
5823 		work_done += completed;
5824 	}
5825 }
5826 
5827 int qeth_poll(struct napi_struct *napi, int budget)
5828 {
5829 	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5830 	unsigned int work_done;
5831 
5832 	work_done = qeth_rx_poll(card, budget);
5833 
5834 	if (card->options.cq == QETH_CQ_ENABLED)
5835 		qeth_cq_poll(card);
5836 
5837 	if (budget) {
5838 		struct qeth_rx *ctx = &card->rx;
5839 
5840 		/* Process any substantial refill backlog: */
5841 		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5842 
5843 		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5844 		if (work_done >= budget)
5845 			return work_done;
5846 	}
5847 
5848 	if (napi_complete_done(napi, work_done) &&
5849 	    qdio_start_irq(CARD_DDEV(card)))
5850 		napi_schedule(napi);
5851 
5852 	return work_done;
5853 }
5854 EXPORT_SYMBOL_GPL(qeth_poll);
5855 
5856 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5857 				 unsigned int bidx, bool error, int budget)
5858 {
5859 	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5860 	u8 sflags = buffer->buffer->element[15].sflags;
5861 	struct qeth_card *card = queue->card;
5862 
5863 	if (queue->bufstates && (queue->bufstates[bidx].flags &
5864 				 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5865 		WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5866 
5867 		if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5868 						   QETH_QDIO_BUF_PENDING) ==
5869 		    QETH_QDIO_BUF_PRIMED)
5870 			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5871 
5872 		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5873 
5874 		/* prepare the queue slot for re-use: */
5875 		qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5876 		if (qeth_init_qdio_out_buf(queue, bidx)) {
5877 			QETH_CARD_TEXT(card, 2, "outofbuf");
5878 			qeth_schedule_recovery(card);
5879 		}
5880 
5881 		return;
5882 	}
5883 
5884 	if (card->options.cq == QETH_CQ_ENABLED)
5885 		qeth_notify_skbs(queue, buffer,
5886 				 qeth_compute_cq_notification(sflags, 0));
5887 	qeth_clear_output_buffer(queue, buffer, error, budget);
5888 }
5889 
5890 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5891 {
5892 	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5893 	unsigned int queue_no = queue->queue_no;
5894 	struct qeth_card *card = queue->card;
5895 	struct net_device *dev = card->dev;
5896 	unsigned int work_done = 0;
5897 	struct netdev_queue *txq;
5898 
5899 	txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5900 
5901 	while (1) {
5902 		unsigned int start, error, i;
5903 		unsigned int packets = 0;
5904 		unsigned int bytes = 0;
5905 		int completed;
5906 
5907 		if (qeth_out_queue_is_empty(queue)) {
5908 			napi_complete(napi);
5909 			return 0;
5910 		}
5911 
5912 		/* Give the CPU a breather: */
5913 		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5914 			QETH_TXQ_STAT_INC(queue, completion_yield);
5915 			if (napi_complete_done(napi, 0))
5916 				napi_schedule(napi);
5917 			return 0;
5918 		}
5919 
5920 		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5921 					       &start, &error);
5922 		if (completed <= 0) {
5923 			/* Ensure we see TX completion for pending work: */
5924 			if (napi_complete_done(napi, 0))
5925 				qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
5926 			return 0;
5927 		}
5928 
5929 		for (i = start; i < start + completed; i++) {
5930 			struct qeth_qdio_out_buffer *buffer;
5931 			unsigned int bidx = QDIO_BUFNR(i);
5932 
5933 			buffer = queue->bufs[bidx];
5934 			packets += buffer->frames;
5935 			bytes += buffer->bytes;
5936 
5937 			qeth_handle_send_error(card, buffer, error);
5938 			qeth_iqd_tx_complete(queue, bidx, error, budget);
5939 			qeth_cleanup_handled_pending(queue, bidx, false);
5940 		}
5941 
5942 		netdev_tx_completed_queue(txq, packets, bytes);
5943 		atomic_sub(completed, &queue->used_buffers);
5944 		work_done += completed;
5945 
5946 		/* xmit may have observed the full-condition, but not yet
5947 		 * stopped the txq. In which case the code below won't trigger.
5948 		 * So before returning, xmit will re-check the txq's fill level
5949 		 * and wake it up if needed.
5950 		 */
5951 		if (netif_tx_queue_stopped(txq) &&
5952 		    !qeth_out_queue_is_full(queue))
5953 			netif_tx_wake_queue(txq);
5954 	}
5955 }
5956 
5957 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5958 {
5959 	if (!cmd->hdr.return_code)
5960 		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5961 	return cmd->hdr.return_code;
5962 }
5963 
5964 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5965 					struct qeth_reply *reply,
5966 					unsigned long data)
5967 {
5968 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5969 	struct qeth_ipa_caps *caps = reply->param;
5970 
5971 	if (qeth_setassparms_inspect_rc(cmd))
5972 		return -EIO;
5973 
5974 	caps->supported = cmd->data.setassparms.data.caps.supported;
5975 	caps->enabled = cmd->data.setassparms.data.caps.enabled;
5976 	return 0;
5977 }
5978 
5979 int qeth_setassparms_cb(struct qeth_card *card,
5980 			struct qeth_reply *reply, unsigned long data)
5981 {
5982 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5983 
5984 	QETH_CARD_TEXT(card, 4, "defadpcb");
5985 
5986 	if (cmd->hdr.return_code)
5987 		return -EIO;
5988 
5989 	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5990 	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5991 		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
5992 	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5993 		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
5994 	return 0;
5995 }
5996 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5997 
5998 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5999 						 enum qeth_ipa_funcs ipa_func,
6000 						 u16 cmd_code,
6001 						 unsigned int data_length,
6002 						 enum qeth_prot_versions prot)
6003 {
6004 	struct qeth_ipacmd_setassparms *setassparms;
6005 	struct qeth_ipacmd_setassparms_hdr *hdr;
6006 	struct qeth_cmd_buffer *iob;
6007 
6008 	QETH_CARD_TEXT(card, 4, "getasscm");
6009 	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6010 				 data_length +
6011 				 offsetof(struct qeth_ipacmd_setassparms,
6012 					  data));
6013 	if (!iob)
6014 		return NULL;
6015 
6016 	setassparms = &__ipa_cmd(iob)->data.setassparms;
6017 	setassparms->assist_no = ipa_func;
6018 
6019 	hdr = &setassparms->hdr;
6020 	hdr->length = sizeof(*hdr) + data_length;
6021 	hdr->command_code = cmd_code;
6022 	return iob;
6023 }
6024 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6025 
6026 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6027 				      enum qeth_ipa_funcs ipa_func,
6028 				      u16 cmd_code, u32 *data,
6029 				      enum qeth_prot_versions prot)
6030 {
6031 	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6032 	struct qeth_cmd_buffer *iob;
6033 
6034 	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6035 	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6036 	if (!iob)
6037 		return -ENOMEM;
6038 
6039 	if (data)
6040 		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6041 	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6042 }
6043 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6044 
6045 static void qeth_unregister_dbf_views(void)
6046 {
6047 	int x;
6048 	for (x = 0; x < QETH_DBF_INFOS; x++) {
6049 		debug_unregister(qeth_dbf[x].id);
6050 		qeth_dbf[x].id = NULL;
6051 	}
6052 }
6053 
6054 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6055 {
6056 	char dbf_txt_buf[32];
6057 	va_list args;
6058 
6059 	if (!debug_level_enabled(id, level))
6060 		return;
6061 	va_start(args, fmt);
6062 	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6063 	va_end(args);
6064 	debug_text_event(id, level, dbf_txt_buf);
6065 }
6066 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6067 
6068 static int qeth_register_dbf_views(void)
6069 {
6070 	int ret;
6071 	int x;
6072 
6073 	for (x = 0; x < QETH_DBF_INFOS; x++) {
6074 		/* register the areas */
6075 		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6076 						qeth_dbf[x].pages,
6077 						qeth_dbf[x].areas,
6078 						qeth_dbf[x].len);
6079 		if (qeth_dbf[x].id == NULL) {
6080 			qeth_unregister_dbf_views();
6081 			return -ENOMEM;
6082 		}
6083 
6084 		/* register a view */
6085 		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6086 		if (ret) {
6087 			qeth_unregister_dbf_views();
6088 			return ret;
6089 		}
6090 
6091 		/* set a passing level */
6092 		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6093 	}
6094 
6095 	return 0;
6096 }
6097 
6098 static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */
6099 
6100 int qeth_core_load_discipline(struct qeth_card *card,
6101 		enum qeth_discipline_id discipline)
6102 {
6103 	mutex_lock(&qeth_mod_mutex);
6104 	switch (discipline) {
6105 	case QETH_DISCIPLINE_LAYER3:
6106 		card->discipline = try_then_request_module(
6107 			symbol_get(qeth_l3_discipline), "qeth_l3");
6108 		break;
6109 	case QETH_DISCIPLINE_LAYER2:
6110 		card->discipline = try_then_request_module(
6111 			symbol_get(qeth_l2_discipline), "qeth_l2");
6112 		break;
6113 	default:
6114 		break;
6115 	}
6116 	mutex_unlock(&qeth_mod_mutex);
6117 
6118 	if (!card->discipline) {
6119 		dev_err(&card->gdev->dev, "There is no kernel module to "
6120 			"support discipline %d\n", discipline);
6121 		return -EINVAL;
6122 	}
6123 
6124 	card->options.layer = discipline;
6125 	return 0;
6126 }
6127 
6128 void qeth_core_free_discipline(struct qeth_card *card)
6129 {
6130 	if (IS_LAYER2(card))
6131 		symbol_put(qeth_l2_discipline);
6132 	else
6133 		symbol_put(qeth_l3_discipline);
6134 	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6135 	card->discipline = NULL;
6136 }
6137 
6138 const struct device_type qeth_generic_devtype = {
6139 	.name = "qeth_generic",
6140 	.groups = qeth_generic_attr_groups,
6141 };
6142 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
6143 
6144 static const struct device_type qeth_osn_devtype = {
6145 	.name = "qeth_osn",
6146 	.groups = qeth_osn_attr_groups,
6147 };
6148 
6149 #define DBF_NAME_LEN	20
6150 
6151 struct qeth_dbf_entry {
6152 	char dbf_name[DBF_NAME_LEN];
6153 	debug_info_t *dbf_info;
6154 	struct list_head dbf_list;
6155 };
6156 
6157 static LIST_HEAD(qeth_dbf_list);
6158 static DEFINE_MUTEX(qeth_dbf_list_mutex);
6159 
6160 static debug_info_t *qeth_get_dbf_entry(char *name)
6161 {
6162 	struct qeth_dbf_entry *entry;
6163 	debug_info_t *rc = NULL;
6164 
6165 	mutex_lock(&qeth_dbf_list_mutex);
6166 	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6167 		if (strcmp(entry->dbf_name, name) == 0) {
6168 			rc = entry->dbf_info;
6169 			break;
6170 		}
6171 	}
6172 	mutex_unlock(&qeth_dbf_list_mutex);
6173 	return rc;
6174 }
6175 
6176 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6177 {
6178 	struct qeth_dbf_entry *new_entry;
6179 
6180 	card->debug = debug_register(name, 2, 1, 8);
6181 	if (!card->debug) {
6182 		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6183 		goto err;
6184 	}
6185 	if (debug_register_view(card->debug, &debug_hex_ascii_view))
6186 		goto err_dbg;
6187 	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6188 	if (!new_entry)
6189 		goto err_dbg;
6190 	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6191 	new_entry->dbf_info = card->debug;
6192 	mutex_lock(&qeth_dbf_list_mutex);
6193 	list_add(&new_entry->dbf_list, &qeth_dbf_list);
6194 	mutex_unlock(&qeth_dbf_list_mutex);
6195 
6196 	return 0;
6197 
6198 err_dbg:
6199 	debug_unregister(card->debug);
6200 err:
6201 	return -ENOMEM;
6202 }
6203 
6204 static void qeth_clear_dbf_list(void)
6205 {
6206 	struct qeth_dbf_entry *entry, *tmp;
6207 
6208 	mutex_lock(&qeth_dbf_list_mutex);
6209 	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6210 		list_del(&entry->dbf_list);
6211 		debug_unregister(entry->dbf_info);
6212 		kfree(entry);
6213 	}
6214 	mutex_unlock(&qeth_dbf_list_mutex);
6215 }
6216 
6217 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6218 {
6219 	struct net_device *dev;
6220 	struct qeth_priv *priv;
6221 
6222 	switch (card->info.type) {
6223 	case QETH_CARD_TYPE_IQD:
6224 		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6225 				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6226 		break;
6227 	case QETH_CARD_TYPE_OSM:
6228 		dev = alloc_etherdev(sizeof(*priv));
6229 		break;
6230 	case QETH_CARD_TYPE_OSN:
6231 		dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6232 				   ether_setup);
6233 		break;
6234 	default:
6235 		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6236 	}
6237 
6238 	if (!dev)
6239 		return NULL;
6240 
6241 	priv = netdev_priv(dev);
6242 	priv->rx_copybreak = QETH_RX_COPYBREAK;
6243 
6244 	dev->ml_priv = card;
6245 	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6246 	dev->min_mtu = IS_OSN(card) ? 64 : 576;
6247 	 /* initialized when device first goes online: */
6248 	dev->max_mtu = 0;
6249 	dev->mtu = 0;
6250 	SET_NETDEV_DEV(dev, &card->gdev->dev);
6251 	netif_carrier_off(dev);
6252 
6253 	dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
6254 					  &qeth_ethtool_ops;
6255 
6256 	return dev;
6257 }
6258 
6259 struct net_device *qeth_clone_netdev(struct net_device *orig)
6260 {
6261 	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6262 
6263 	if (!clone)
6264 		return NULL;
6265 
6266 	clone->dev_port = orig->dev_port;
6267 	return clone;
6268 }
6269 
6270 int qeth_setup_netdev(struct qeth_card *card)
6271 {
6272 	struct net_device *dev = card->dev;
6273 	unsigned int num_tx_queues;
6274 
6275 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6276 	dev->hw_features |= NETIF_F_SG;
6277 	dev->vlan_features |= NETIF_F_SG;
6278 
6279 	if (IS_IQD(card)) {
6280 		dev->features |= NETIF_F_SG;
6281 		num_tx_queues = QETH_IQD_MIN_TXQ;
6282 	} else if (IS_VM_NIC(card)) {
6283 		num_tx_queues = 1;
6284 	} else {
6285 		num_tx_queues = dev->real_num_tx_queues;
6286 	}
6287 
6288 	return qeth_set_real_num_tx_queues(card, num_tx_queues);
6289 }
6290 EXPORT_SYMBOL_GPL(qeth_setup_netdev);
6291 
6292 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6293 {
6294 	struct qeth_card *card;
6295 	struct device *dev;
6296 	int rc;
6297 	enum qeth_discipline_id enforced_disc;
6298 	char dbf_name[DBF_NAME_LEN];
6299 
6300 	QETH_DBF_TEXT(SETUP, 2, "probedev");
6301 
6302 	dev = &gdev->dev;
6303 	if (!get_device(dev))
6304 		return -ENODEV;
6305 
6306 	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6307 
6308 	card = qeth_alloc_card(gdev);
6309 	if (!card) {
6310 		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6311 		rc = -ENOMEM;
6312 		goto err_dev;
6313 	}
6314 
6315 	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6316 		dev_name(&gdev->dev));
6317 	card->debug = qeth_get_dbf_entry(dbf_name);
6318 	if (!card->debug) {
6319 		rc = qeth_add_dbf_entry(card, dbf_name);
6320 		if (rc)
6321 			goto err_card;
6322 	}
6323 
6324 	qeth_setup_card(card);
6325 	card->dev = qeth_alloc_netdev(card);
6326 	if (!card->dev) {
6327 		rc = -ENOMEM;
6328 		goto err_card;
6329 	}
6330 
6331 	qeth_determine_capabilities(card);
6332 	qeth_set_blkt_defaults(card);
6333 
6334 	card->qdio.no_out_queues = card->dev->num_tx_queues;
6335 	rc = qeth_update_from_chp_desc(card);
6336 	if (rc)
6337 		goto err_chp_desc;
6338 
6339 	enforced_disc = qeth_enforce_discipline(card);
6340 	switch (enforced_disc) {
6341 	case QETH_DISCIPLINE_UNDETERMINED:
6342 		gdev->dev.type = &qeth_generic_devtype;
6343 		break;
6344 	default:
6345 		card->info.layer_enforced = true;
6346 		rc = qeth_core_load_discipline(card, enforced_disc);
6347 		if (rc)
6348 			goto err_load;
6349 
6350 		gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6351 						card->discipline->devtype;
6352 		rc = card->discipline->setup(card->gdev);
6353 		if (rc)
6354 			goto err_disc;
6355 		break;
6356 	}
6357 
6358 	return 0;
6359 
6360 err_disc:
6361 	qeth_core_free_discipline(card);
6362 err_load:
6363 err_chp_desc:
6364 	free_netdev(card->dev);
6365 err_card:
6366 	qeth_core_free_card(card);
6367 err_dev:
6368 	put_device(dev);
6369 	return rc;
6370 }
6371 
6372 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6373 {
6374 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6375 
6376 	QETH_CARD_TEXT(card, 2, "removedv");
6377 
6378 	if (card->discipline) {
6379 		card->discipline->remove(gdev);
6380 		qeth_core_free_discipline(card);
6381 	}
6382 
6383 	qeth_free_qdio_queues(card);
6384 
6385 	free_netdev(card->dev);
6386 	qeth_core_free_card(card);
6387 	put_device(&gdev->dev);
6388 }
6389 
6390 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6391 {
6392 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6393 	int rc = 0;
6394 	enum qeth_discipline_id def_discipline;
6395 
6396 	if (!card->discipline) {
6397 		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6398 						QETH_DISCIPLINE_LAYER2;
6399 		rc = qeth_core_load_discipline(card, def_discipline);
6400 		if (rc)
6401 			goto err;
6402 		rc = card->discipline->setup(card->gdev);
6403 		if (rc) {
6404 			qeth_core_free_discipline(card);
6405 			goto err;
6406 		}
6407 	}
6408 
6409 	rc = qeth_set_online(card);
6410 err:
6411 	return rc;
6412 }
6413 
6414 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6415 {
6416 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6417 
6418 	return qeth_set_offline(card, false);
6419 }
6420 
6421 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6422 {
6423 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6424 	qeth_set_allowed_threads(card, 0, 1);
6425 	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6426 		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6427 	qeth_qdio_clear_card(card, 0);
6428 	qeth_drain_output_queues(card);
6429 	qdio_free(CARD_DDEV(card));
6430 }
6431 
6432 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6433 			   size_t count)
6434 {
6435 	int err;
6436 
6437 	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6438 				  buf);
6439 
6440 	return err ? err : count;
6441 }
6442 static DRIVER_ATTR_WO(group);
6443 
6444 static struct attribute *qeth_drv_attrs[] = {
6445 	&driver_attr_group.attr,
6446 	NULL,
6447 };
6448 static struct attribute_group qeth_drv_attr_group = {
6449 	.attrs = qeth_drv_attrs,
6450 };
6451 static const struct attribute_group *qeth_drv_attr_groups[] = {
6452 	&qeth_drv_attr_group,
6453 	NULL,
6454 };
6455 
6456 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6457 	.driver = {
6458 		.groups = qeth_drv_attr_groups,
6459 		.owner = THIS_MODULE,
6460 		.name = "qeth",
6461 	},
6462 	.ccw_driver = &qeth_ccw_driver,
6463 	.setup = qeth_core_probe_device,
6464 	.remove = qeth_core_remove_device,
6465 	.set_online = qeth_core_set_online,
6466 	.set_offline = qeth_core_set_offline,
6467 	.shutdown = qeth_core_shutdown,
6468 };
6469 
6470 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6471 {
6472 	struct ccwgroup_device *gdev;
6473 	struct qeth_card *card;
6474 
6475 	gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6476 	if (!gdev)
6477 		return NULL;
6478 
6479 	card = dev_get_drvdata(&gdev->dev);
6480 	put_device(&gdev->dev);
6481 	return card;
6482 }
6483 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6484 
6485 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6486 {
6487 	struct qeth_card *card = dev->ml_priv;
6488 	struct mii_ioctl_data *mii_data;
6489 	int rc = 0;
6490 
6491 	switch (cmd) {
6492 	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6493 		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6494 		break;
6495 	case SIOC_QETH_GET_CARD_TYPE:
6496 		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6497 		    !IS_VM_NIC(card))
6498 			return 1;
6499 		return 0;
6500 	case SIOCGMIIPHY:
6501 		mii_data = if_mii(rq);
6502 		mii_data->phy_id = 0;
6503 		break;
6504 	case SIOCGMIIREG:
6505 		mii_data = if_mii(rq);
6506 		if (mii_data->phy_id != 0)
6507 			rc = -EINVAL;
6508 		else
6509 			mii_data->val_out = qeth_mdio_read(dev,
6510 				mii_data->phy_id, mii_data->reg_num);
6511 		break;
6512 	case SIOC_QETH_QUERY_OAT:
6513 		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6514 		break;
6515 	default:
6516 		if (card->discipline->do_ioctl)
6517 			rc = card->discipline->do_ioctl(dev, rq, cmd);
6518 		else
6519 			rc = -EOPNOTSUPP;
6520 	}
6521 	if (rc)
6522 		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6523 	return rc;
6524 }
6525 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6526 
6527 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6528 			      unsigned long data)
6529 {
6530 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6531 	u32 *features = reply->param;
6532 
6533 	if (qeth_setassparms_inspect_rc(cmd))
6534 		return -EIO;
6535 
6536 	*features = cmd->data.setassparms.data.flags_32bit;
6537 	return 0;
6538 }
6539 
6540 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6541 			     enum qeth_prot_versions prot)
6542 {
6543 	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6544 						 NULL, prot);
6545 }
6546 
6547 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6548 			    enum qeth_prot_versions prot, u8 *lp2lp)
6549 {
6550 	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6551 	struct qeth_cmd_buffer *iob;
6552 	struct qeth_ipa_caps caps;
6553 	u32 features;
6554 	int rc;
6555 
6556 	/* some L3 HW requires combined L3+L4 csum offload: */
6557 	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6558 	    cstype == IPA_OUTBOUND_CHECKSUM)
6559 		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6560 
6561 	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6562 				       prot);
6563 	if (!iob)
6564 		return -ENOMEM;
6565 
6566 	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6567 	if (rc)
6568 		return rc;
6569 
6570 	if ((required_features & features) != required_features) {
6571 		qeth_set_csum_off(card, cstype, prot);
6572 		return -EOPNOTSUPP;
6573 	}
6574 
6575 	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6576 				       SETASS_DATA_SIZEOF(flags_32bit),
6577 				       prot);
6578 	if (!iob) {
6579 		qeth_set_csum_off(card, cstype, prot);
6580 		return -ENOMEM;
6581 	}
6582 
6583 	if (features & QETH_IPA_CHECKSUM_LP2LP)
6584 		required_features |= QETH_IPA_CHECKSUM_LP2LP;
6585 	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6586 	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6587 	if (rc) {
6588 		qeth_set_csum_off(card, cstype, prot);
6589 		return rc;
6590 	}
6591 
6592 	if (!qeth_ipa_caps_supported(&caps, required_features) ||
6593 	    !qeth_ipa_caps_enabled(&caps, required_features)) {
6594 		qeth_set_csum_off(card, cstype, prot);
6595 		return -EOPNOTSUPP;
6596 	}
6597 
6598 	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6599 		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6600 
6601 	if (lp2lp)
6602 		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6603 
6604 	return 0;
6605 }
6606 
6607 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6608 			     enum qeth_prot_versions prot, u8 *lp2lp)
6609 {
6610 	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6611 		    qeth_set_csum_off(card, cstype, prot);
6612 }
6613 
6614 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6615 			     unsigned long data)
6616 {
6617 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6618 	struct qeth_tso_start_data *tso_data = reply->param;
6619 
6620 	if (qeth_setassparms_inspect_rc(cmd))
6621 		return -EIO;
6622 
6623 	tso_data->mss = cmd->data.setassparms.data.tso.mss;
6624 	tso_data->supported = cmd->data.setassparms.data.tso.supported;
6625 	return 0;
6626 }
6627 
6628 static int qeth_set_tso_off(struct qeth_card *card,
6629 			    enum qeth_prot_versions prot)
6630 {
6631 	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6632 						 IPA_CMD_ASS_STOP, NULL, prot);
6633 }
6634 
6635 static int qeth_set_tso_on(struct qeth_card *card,
6636 			   enum qeth_prot_versions prot)
6637 {
6638 	struct qeth_tso_start_data tso_data;
6639 	struct qeth_cmd_buffer *iob;
6640 	struct qeth_ipa_caps caps;
6641 	int rc;
6642 
6643 	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6644 				       IPA_CMD_ASS_START, 0, prot);
6645 	if (!iob)
6646 		return -ENOMEM;
6647 
6648 	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6649 	if (rc)
6650 		return rc;
6651 
6652 	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6653 		qeth_set_tso_off(card, prot);
6654 		return -EOPNOTSUPP;
6655 	}
6656 
6657 	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6658 				       IPA_CMD_ASS_ENABLE,
6659 				       SETASS_DATA_SIZEOF(caps), prot);
6660 	if (!iob) {
6661 		qeth_set_tso_off(card, prot);
6662 		return -ENOMEM;
6663 	}
6664 
6665 	/* enable TSO capability */
6666 	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6667 		QETH_IPA_LARGE_SEND_TCP;
6668 	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6669 	if (rc) {
6670 		qeth_set_tso_off(card, prot);
6671 		return rc;
6672 	}
6673 
6674 	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6675 	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6676 		qeth_set_tso_off(card, prot);
6677 		return -EOPNOTSUPP;
6678 	}
6679 
6680 	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6681 		 tso_data.mss);
6682 	return 0;
6683 }
6684 
6685 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6686 			    enum qeth_prot_versions prot)
6687 {
6688 	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6689 }
6690 
6691 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6692 {
6693 	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6694 	int rc_ipv6;
6695 
6696 	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6697 		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6698 					    QETH_PROT_IPV4, NULL);
6699 	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6700 		/* no/one Offload Assist available, so the rc is trivial */
6701 		return rc_ipv4;
6702 
6703 	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6704 				    QETH_PROT_IPV6, NULL);
6705 
6706 	if (on)
6707 		/* enable: success if any Assist is active */
6708 		return (rc_ipv6) ? rc_ipv4 : 0;
6709 
6710 	/* disable: failure if any Assist is still active */
6711 	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6712 }
6713 
6714 /**
6715  * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6716  * @dev:	a net_device
6717  */
6718 void qeth_enable_hw_features(struct net_device *dev)
6719 {
6720 	struct qeth_card *card = dev->ml_priv;
6721 	netdev_features_t features;
6722 
6723 	features = dev->features;
6724 	/* force-off any feature that might need an IPA sequence.
6725 	 * netdev_update_features() will restart them.
6726 	 */
6727 	dev->features &= ~dev->hw_features;
6728 	/* toggle VLAN filter, so that VIDs are re-programmed: */
6729 	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6730 		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6731 		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6732 	}
6733 	netdev_update_features(dev);
6734 	if (features != dev->features)
6735 		dev_warn(&card->gdev->dev,
6736 			 "Device recovery failed to restore all offload features\n");
6737 }
6738 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6739 
6740 static void qeth_check_restricted_features(struct qeth_card *card,
6741 					   netdev_features_t changed,
6742 					   netdev_features_t actual)
6743 {
6744 	netdev_features_t ipv6_features = NETIF_F_TSO6;
6745 	netdev_features_t ipv4_features = NETIF_F_TSO;
6746 
6747 	if (!card->info.has_lp2lp_cso_v6)
6748 		ipv6_features |= NETIF_F_IPV6_CSUM;
6749 	if (!card->info.has_lp2lp_cso_v4)
6750 		ipv4_features |= NETIF_F_IP_CSUM;
6751 
6752 	if ((changed & ipv6_features) && !(actual & ipv6_features))
6753 		qeth_flush_local_addrs6(card);
6754 	if ((changed & ipv4_features) && !(actual & ipv4_features))
6755 		qeth_flush_local_addrs4(card);
6756 }
6757 
6758 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6759 {
6760 	struct qeth_card *card = dev->ml_priv;
6761 	netdev_features_t changed = dev->features ^ features;
6762 	int rc = 0;
6763 
6764 	QETH_CARD_TEXT(card, 2, "setfeat");
6765 	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6766 
6767 	if ((changed & NETIF_F_IP_CSUM)) {
6768 		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6769 				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6770 				       &card->info.has_lp2lp_cso_v4);
6771 		if (rc)
6772 			changed ^= NETIF_F_IP_CSUM;
6773 	}
6774 	if (changed & NETIF_F_IPV6_CSUM) {
6775 		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6776 				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6777 				       &card->info.has_lp2lp_cso_v6);
6778 		if (rc)
6779 			changed ^= NETIF_F_IPV6_CSUM;
6780 	}
6781 	if (changed & NETIF_F_RXCSUM) {
6782 		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6783 		if (rc)
6784 			changed ^= NETIF_F_RXCSUM;
6785 	}
6786 	if (changed & NETIF_F_TSO) {
6787 		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6788 				      QETH_PROT_IPV4);
6789 		if (rc)
6790 			changed ^= NETIF_F_TSO;
6791 	}
6792 	if (changed & NETIF_F_TSO6) {
6793 		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6794 				      QETH_PROT_IPV6);
6795 		if (rc)
6796 			changed ^= NETIF_F_TSO6;
6797 	}
6798 
6799 	qeth_check_restricted_features(card, dev->features ^ features,
6800 				       dev->features ^ changed);
6801 
6802 	/* everything changed successfully? */
6803 	if ((dev->features ^ features) == changed)
6804 		return 0;
6805 	/* something went wrong. save changed features and return error */
6806 	dev->features ^= changed;
6807 	return -EIO;
6808 }
6809 EXPORT_SYMBOL_GPL(qeth_set_features);
6810 
6811 netdev_features_t qeth_fix_features(struct net_device *dev,
6812 				    netdev_features_t features)
6813 {
6814 	struct qeth_card *card = dev->ml_priv;
6815 
6816 	QETH_CARD_TEXT(card, 2, "fixfeat");
6817 	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6818 		features &= ~NETIF_F_IP_CSUM;
6819 	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6820 		features &= ~NETIF_F_IPV6_CSUM;
6821 	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6822 	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6823 		features &= ~NETIF_F_RXCSUM;
6824 	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6825 		features &= ~NETIF_F_TSO;
6826 	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6827 		features &= ~NETIF_F_TSO6;
6828 
6829 	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6830 	return features;
6831 }
6832 EXPORT_SYMBOL_GPL(qeth_fix_features);
6833 
6834 netdev_features_t qeth_features_check(struct sk_buff *skb,
6835 				      struct net_device *dev,
6836 				      netdev_features_t features)
6837 {
6838 	struct qeth_card *card = dev->ml_priv;
6839 
6840 	/* Traffic with local next-hop is not eligible for some offloads: */
6841 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6842 	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6843 		netdev_features_t restricted = 0;
6844 
6845 		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6846 			restricted |= NETIF_F_ALL_TSO;
6847 
6848 		switch (vlan_get_protocol(skb)) {
6849 		case htons(ETH_P_IP):
6850 			if (!card->info.has_lp2lp_cso_v4)
6851 				restricted |= NETIF_F_IP_CSUM;
6852 
6853 			if (restricted && qeth_next_hop_is_local_v4(card, skb))
6854 				features &= ~restricted;
6855 			break;
6856 		case htons(ETH_P_IPV6):
6857 			if (!card->info.has_lp2lp_cso_v6)
6858 				restricted |= NETIF_F_IPV6_CSUM;
6859 
6860 			if (restricted && qeth_next_hop_is_local_v6(card, skb))
6861 				features &= ~restricted;
6862 			break;
6863 		default:
6864 			break;
6865 		}
6866 	}
6867 
6868 	/* GSO segmentation builds skbs with
6869 	 *	a (small) linear part for the headers, and
6870 	 *	page frags for the data.
6871 	 * Compared to a linear skb, the header-only part consumes an
6872 	 * additional buffer element. This reduces buffer utilization, and
6873 	 * hurts throughput. So compress small segments into one element.
6874 	 */
6875 	if (netif_needs_gso(skb, features)) {
6876 		/* match skb_segment(): */
6877 		unsigned int doffset = skb->data - skb_mac_header(skb);
6878 		unsigned int hsize = skb_shinfo(skb)->gso_size;
6879 		unsigned int hroom = skb_headroom(skb);
6880 
6881 		/* linearize only if resulting skb allocations are order-0: */
6882 		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6883 			features &= ~NETIF_F_SG;
6884 	}
6885 
6886 	return vlan_features_check(skb, features);
6887 }
6888 EXPORT_SYMBOL_GPL(qeth_features_check);
6889 
6890 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6891 {
6892 	struct qeth_card *card = dev->ml_priv;
6893 	struct qeth_qdio_out_q *queue;
6894 	unsigned int i;
6895 
6896 	QETH_CARD_TEXT(card, 5, "getstat");
6897 
6898 	stats->rx_packets = card->stats.rx_packets;
6899 	stats->rx_bytes = card->stats.rx_bytes;
6900 	stats->rx_errors = card->stats.rx_length_errors +
6901 			   card->stats.rx_frame_errors +
6902 			   card->stats.rx_fifo_errors;
6903 	stats->rx_dropped = card->stats.rx_dropped_nomem +
6904 			    card->stats.rx_dropped_notsupp +
6905 			    card->stats.rx_dropped_runt;
6906 	stats->multicast = card->stats.rx_multicast;
6907 	stats->rx_length_errors = card->stats.rx_length_errors;
6908 	stats->rx_frame_errors = card->stats.rx_frame_errors;
6909 	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6910 
6911 	for (i = 0; i < card->qdio.no_out_queues; i++) {
6912 		queue = card->qdio.out_qs[i];
6913 
6914 		stats->tx_packets += queue->stats.tx_packets;
6915 		stats->tx_bytes += queue->stats.tx_bytes;
6916 		stats->tx_errors += queue->stats.tx_errors;
6917 		stats->tx_dropped += queue->stats.tx_dropped;
6918 	}
6919 }
6920 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6921 
6922 #define TC_IQD_UCAST   0
6923 static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6924 				     unsigned int ucast_txqs)
6925 {
6926 	unsigned int prio;
6927 
6928 	/* IQD requires mcast traffic to be placed on a dedicated queue, and
6929 	 * qeth_iqd_select_queue() deals with this.
6930 	 * For unicast traffic, we defer the queue selection to the stack.
6931 	 * By installing a trivial prio map that spans over only the unicast
6932 	 * queues, we can encourage the stack to spread the ucast traffic evenly
6933 	 * without selecting the mcast queue.
6934 	 */
6935 
6936 	/* One traffic class, spanning over all active ucast queues: */
6937 	netdev_set_num_tc(dev, 1);
6938 	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6939 			    QETH_IQD_MIN_UCAST_TXQ);
6940 
6941 	/* Map all priorities to this traffic class: */
6942 	for (prio = 0; prio <= TC_BITMASK; prio++)
6943 		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6944 }
6945 
6946 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6947 {
6948 	struct net_device *dev = card->dev;
6949 	int rc;
6950 
6951 	/* Per netif_setup_tc(), adjust the mapping first: */
6952 	if (IS_IQD(card))
6953 		qeth_iqd_set_prio_tc_map(dev, count - 1);
6954 
6955 	rc = netif_set_real_num_tx_queues(dev, count);
6956 
6957 	if (rc && IS_IQD(card))
6958 		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6959 
6960 	return rc;
6961 }
6962 
6963 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6964 			  u8 cast_type, struct net_device *sb_dev)
6965 {
6966 	u16 txq;
6967 
6968 	if (cast_type != RTN_UNICAST)
6969 		return QETH_IQD_MCAST_TXQ;
6970 	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
6971 		return QETH_IQD_MIN_UCAST_TXQ;
6972 
6973 	txq = netdev_pick_tx(dev, skb, sb_dev);
6974 	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
6975 }
6976 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6977 
6978 int qeth_open(struct net_device *dev)
6979 {
6980 	struct qeth_card *card = dev->ml_priv;
6981 
6982 	QETH_CARD_TEXT(card, 4, "qethopen");
6983 
6984 	card->data.state = CH_STATE_UP;
6985 	netif_tx_start_all_queues(dev);
6986 
6987 	napi_enable(&card->napi);
6988 	local_bh_disable();
6989 	napi_schedule(&card->napi);
6990 	if (IS_IQD(card)) {
6991 		struct qeth_qdio_out_q *queue;
6992 		unsigned int i;
6993 
6994 		qeth_for_each_output_queue(card, queue, i) {
6995 			netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6996 					  QETH_NAPI_WEIGHT);
6997 			napi_enable(&queue->napi);
6998 			napi_schedule(&queue->napi);
6999 		}
7000 	}
7001 	/* kick-start the NAPI softirq: */
7002 	local_bh_enable();
7003 	return 0;
7004 }
7005 EXPORT_SYMBOL_GPL(qeth_open);
7006 
7007 int qeth_stop(struct net_device *dev)
7008 {
7009 	struct qeth_card *card = dev->ml_priv;
7010 
7011 	QETH_CARD_TEXT(card, 4, "qethstop");
7012 	if (IS_IQD(card)) {
7013 		struct qeth_qdio_out_q *queue;
7014 		unsigned int i;
7015 
7016 		/* Quiesce the NAPI instances: */
7017 		qeth_for_each_output_queue(card, queue, i)
7018 			napi_disable(&queue->napi);
7019 
7020 		/* Stop .ndo_start_xmit, might still access queue->napi. */
7021 		netif_tx_disable(dev);
7022 
7023 		qeth_for_each_output_queue(card, queue, i) {
7024 			del_timer_sync(&queue->timer);
7025 			/* Queues may get re-allocated, so remove the NAPIs. */
7026 			netif_napi_del(&queue->napi);
7027 		}
7028 	} else {
7029 		netif_tx_disable(dev);
7030 	}
7031 
7032 	napi_disable(&card->napi);
7033 	cancel_delayed_work_sync(&card->buffer_reclaim_work);
7034 	qdio_stop_irq(CARD_DDEV(card));
7035 
7036 	return 0;
7037 }
7038 EXPORT_SYMBOL_GPL(qeth_stop);
7039 
7040 static int __init qeth_core_init(void)
7041 {
7042 	int rc;
7043 
7044 	pr_info("loading core functions\n");
7045 
7046 	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7047 
7048 	rc = qeth_register_dbf_views();
7049 	if (rc)
7050 		goto dbf_err;
7051 	qeth_core_root_dev = root_device_register("qeth");
7052 	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7053 	if (rc)
7054 		goto register_err;
7055 	qeth_core_header_cache =
7056 		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7057 				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7058 				  0, NULL);
7059 	if (!qeth_core_header_cache) {
7060 		rc = -ENOMEM;
7061 		goto slab_err;
7062 	}
7063 	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7064 			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7065 	if (!qeth_qdio_outbuf_cache) {
7066 		rc = -ENOMEM;
7067 		goto cqslab_err;
7068 	}
7069 	rc = ccw_driver_register(&qeth_ccw_driver);
7070 	if (rc)
7071 		goto ccw_err;
7072 	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7073 	if (rc)
7074 		goto ccwgroup_err;
7075 
7076 	return 0;
7077 
7078 ccwgroup_err:
7079 	ccw_driver_unregister(&qeth_ccw_driver);
7080 ccw_err:
7081 	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7082 cqslab_err:
7083 	kmem_cache_destroy(qeth_core_header_cache);
7084 slab_err:
7085 	root_device_unregister(qeth_core_root_dev);
7086 register_err:
7087 	qeth_unregister_dbf_views();
7088 dbf_err:
7089 	debugfs_remove_recursive(qeth_debugfs_root);
7090 	pr_err("Initializing the qeth device driver failed\n");
7091 	return rc;
7092 }
7093 
7094 static void __exit qeth_core_exit(void)
7095 {
7096 	qeth_clear_dbf_list();
7097 	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7098 	ccw_driver_unregister(&qeth_ccw_driver);
7099 	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7100 	kmem_cache_destroy(qeth_core_header_cache);
7101 	root_device_unregister(qeth_core_root_dev);
7102 	qeth_unregister_dbf_views();
7103 	debugfs_remove_recursive(qeth_debugfs_root);
7104 	pr_info("core functions removed\n");
7105 }
7106 
7107 module_init(qeth_core_init);
7108 module_exit(qeth_core_exit);
7109 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7110 MODULE_DESCRIPTION("qeth core functions");
7111 MODULE_LICENSE("GPL");
7112