xref: /openbmc/linux/drivers/s390/net/qeth_core_main.c (revision e0d77d0f38aa60ca61b3ce6e60d64fad2aa0853d)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *    Copyright IBM Corp. 2007, 2009
4   *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5   *		 Frank Pavlic <fpavlic@de.ibm.com>,
6   *		 Thomas Spatzier <tspat@de.ibm.com>,
7   *		 Frank Blaschka <frank.blaschka@de.ibm.com>
8   */
9  
10  #define KMSG_COMPONENT "qeth"
11  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12  
13  #include <linux/compat.h>
14  #include <linux/module.h>
15  #include <linux/moduleparam.h>
16  #include <linux/string.h>
17  #include <linux/errno.h>
18  #include <linux/kernel.h>
19  #include <linux/log2.h>
20  #include <linux/io.h>
21  #include <linux/ip.h>
22  #include <linux/tcp.h>
23  #include <linux/mii.h>
24  #include <linux/mm.h>
25  #include <linux/kthread.h>
26  #include <linux/slab.h>
27  #include <linux/if_vlan.h>
28  #include <linux/netdevice.h>
29  #include <linux/netdev_features.h>
30  #include <linux/rcutree.h>
31  #include <linux/skbuff.h>
32  #include <linux/vmalloc.h>
33  
34  #include <net/iucv/af_iucv.h>
35  #include <net/dsfield.h>
36  #include <net/sock.h>
37  
38  #include <asm/ebcdic.h>
39  #include <asm/chpid.h>
40  #include <asm/sysinfo.h>
41  #include <asm/diag.h>
42  #include <asm/cio.h>
43  #include <asm/ccwdev.h>
44  #include <asm/cpcmd.h>
45  
46  #include "qeth_core.h"
47  
48  struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
49  	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
50  	/*                   N  P  A    M  L  V                      H  */
51  	[QETH_DBF_SETUP] = {"qeth_setup",
52  				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53  	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54  			    &debug_sprintf_view, NULL},
55  	[QETH_DBF_CTRL]  = {"qeth_control",
56  		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
57  };
58  EXPORT_SYMBOL_GPL(qeth_dbf);
59  
60  static struct kmem_cache *qeth_core_header_cache;
61  static struct kmem_cache *qeth_qdio_outbuf_cache;
62  static struct kmem_cache *qeth_qaob_cache;
63  
64  static struct device *qeth_core_root_dev;
65  static struct dentry *qeth_debugfs_root;
66  static struct lock_class_key qdio_out_skb_queue_key;
67  
68  static void qeth_issue_next_read_cb(struct qeth_card *card,
69  				    struct qeth_cmd_buffer *iob,
70  				    unsigned int data_length);
71  static int qeth_qdio_establish(struct qeth_card *);
72  static void qeth_free_qdio_queues(struct qeth_card *card);
73  
qeth_get_cardname(struct qeth_card * card)74  static const char *qeth_get_cardname(struct qeth_card *card)
75  {
76  	if (IS_VM_NIC(card)) {
77  		switch (card->info.type) {
78  		case QETH_CARD_TYPE_OSD:
79  			return " Virtual NIC QDIO";
80  		case QETH_CARD_TYPE_IQD:
81  			return " Virtual NIC Hiper";
82  		case QETH_CARD_TYPE_OSM:
83  			return " Virtual NIC QDIO - OSM";
84  		case QETH_CARD_TYPE_OSX:
85  			return " Virtual NIC QDIO - OSX";
86  		default:
87  			return " unknown";
88  		}
89  	} else {
90  		switch (card->info.type) {
91  		case QETH_CARD_TYPE_OSD:
92  			return " OSD Express";
93  		case QETH_CARD_TYPE_IQD:
94  			return " HiperSockets";
95  		case QETH_CARD_TYPE_OSM:
96  			return " OSM QDIO";
97  		case QETH_CARD_TYPE_OSX:
98  			return " OSX QDIO";
99  		default:
100  			return " unknown";
101  		}
102  	}
103  	return " n/a";
104  }
105  
106  /* max length to be returned: 14 */
qeth_get_cardname_short(struct qeth_card * card)107  const char *qeth_get_cardname_short(struct qeth_card *card)
108  {
109  	if (IS_VM_NIC(card)) {
110  		switch (card->info.type) {
111  		case QETH_CARD_TYPE_OSD:
112  			return "Virt.NIC QDIO";
113  		case QETH_CARD_TYPE_IQD:
114  			return "Virt.NIC Hiper";
115  		case QETH_CARD_TYPE_OSM:
116  			return "Virt.NIC OSM";
117  		case QETH_CARD_TYPE_OSX:
118  			return "Virt.NIC OSX";
119  		default:
120  			return "unknown";
121  		}
122  	} else {
123  		switch (card->info.type) {
124  		case QETH_CARD_TYPE_OSD:
125  			switch (card->info.link_type) {
126  			case QETH_LINK_TYPE_FAST_ETH:
127  				return "OSD_100";
128  			case QETH_LINK_TYPE_HSTR:
129  				return "HSTR";
130  			case QETH_LINK_TYPE_GBIT_ETH:
131  				return "OSD_1000";
132  			case QETH_LINK_TYPE_10GBIT_ETH:
133  				return "OSD_10GIG";
134  			case QETH_LINK_TYPE_25GBIT_ETH:
135  				return "OSD_25GIG";
136  			case QETH_LINK_TYPE_LANE_ETH100:
137  				return "OSD_FE_LANE";
138  			case QETH_LINK_TYPE_LANE_TR:
139  				return "OSD_TR_LANE";
140  			case QETH_LINK_TYPE_LANE_ETH1000:
141  				return "OSD_GbE_LANE";
142  			case QETH_LINK_TYPE_LANE:
143  				return "OSD_ATM_LANE";
144  			default:
145  				return "OSD_Express";
146  			}
147  		case QETH_CARD_TYPE_IQD:
148  			return "HiperSockets";
149  		case QETH_CARD_TYPE_OSM:
150  			return "OSM_1000";
151  		case QETH_CARD_TYPE_OSX:
152  			return "OSX_10GIG";
153  		default:
154  			return "unknown";
155  		}
156  	}
157  	return "n/a";
158  }
159  
qeth_set_allowed_threads(struct qeth_card * card,unsigned long threads,int clear_start_mask)160  void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
161  			 int clear_start_mask)
162  {
163  	unsigned long flags;
164  
165  	spin_lock_irqsave(&card->thread_mask_lock, flags);
166  	card->thread_allowed_mask = threads;
167  	if (clear_start_mask)
168  		card->thread_start_mask &= threads;
169  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
170  	wake_up(&card->wait_q);
171  }
172  EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
173  
qeth_threads_running(struct qeth_card * card,unsigned long threads)174  int qeth_threads_running(struct qeth_card *card, unsigned long threads)
175  {
176  	unsigned long flags;
177  	int rc = 0;
178  
179  	spin_lock_irqsave(&card->thread_mask_lock, flags);
180  	rc = (card->thread_running_mask & threads);
181  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
182  	return rc;
183  }
184  EXPORT_SYMBOL_GPL(qeth_threads_running);
185  
qeth_clear_working_pool_list(struct qeth_card * card)186  static void qeth_clear_working_pool_list(struct qeth_card *card)
187  {
188  	struct qeth_buffer_pool_entry *pool_entry, *tmp;
189  	struct qeth_qdio_q *queue = card->qdio.in_q;
190  	unsigned int i;
191  
192  	QETH_CARD_TEXT(card, 5, "clwrklst");
193  	list_for_each_entry_safe(pool_entry, tmp,
194  				 &card->qdio.in_buf_pool.entry_list, list)
195  		list_del(&pool_entry->list);
196  
197  	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
198  		queue->bufs[i].pool_entry = NULL;
199  }
200  
qeth_free_pool_entry(struct qeth_buffer_pool_entry * entry)201  static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
202  {
203  	unsigned int i;
204  
205  	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
206  		if (entry->elements[i])
207  			__free_page(entry->elements[i]);
208  	}
209  
210  	kfree(entry);
211  }
212  
qeth_free_buffer_pool(struct qeth_card * card)213  static void qeth_free_buffer_pool(struct qeth_card *card)
214  {
215  	struct qeth_buffer_pool_entry *entry, *tmp;
216  
217  	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
218  				 init_list) {
219  		list_del(&entry->init_list);
220  		qeth_free_pool_entry(entry);
221  	}
222  }
223  
qeth_alloc_pool_entry(unsigned int pages)224  static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
225  {
226  	struct qeth_buffer_pool_entry *entry;
227  	unsigned int i;
228  
229  	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
230  	if (!entry)
231  		return NULL;
232  
233  	for (i = 0; i < pages; i++) {
234  		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
235  
236  		if (!entry->elements[i]) {
237  			qeth_free_pool_entry(entry);
238  			return NULL;
239  		}
240  	}
241  
242  	return entry;
243  }
244  
qeth_alloc_buffer_pool(struct qeth_card * card)245  static int qeth_alloc_buffer_pool(struct qeth_card *card)
246  {
247  	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
248  	unsigned int i;
249  
250  	QETH_CARD_TEXT(card, 5, "alocpool");
251  	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
252  		struct qeth_buffer_pool_entry *entry;
253  
254  		entry = qeth_alloc_pool_entry(buf_elements);
255  		if (!entry) {
256  			qeth_free_buffer_pool(card);
257  			return -ENOMEM;
258  		}
259  
260  		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
261  	}
262  	return 0;
263  }
264  
qeth_resize_buffer_pool(struct qeth_card * card,unsigned int count)265  int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
266  {
267  	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
268  	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
269  	struct qeth_buffer_pool_entry *entry, *tmp;
270  	int delta = count - pool->buf_count;
271  	LIST_HEAD(entries);
272  
273  	QETH_CARD_TEXT(card, 2, "realcbp");
274  
275  	/* Defer until pool is allocated: */
276  	if (list_empty(&pool->entry_list))
277  		goto out;
278  
279  	/* Remove entries from the pool: */
280  	while (delta < 0) {
281  		entry = list_first_entry(&pool->entry_list,
282  					 struct qeth_buffer_pool_entry,
283  					 init_list);
284  		list_del(&entry->init_list);
285  		qeth_free_pool_entry(entry);
286  
287  		delta++;
288  	}
289  
290  	/* Allocate additional entries: */
291  	while (delta > 0) {
292  		entry = qeth_alloc_pool_entry(buf_elements);
293  		if (!entry) {
294  			list_for_each_entry_safe(entry, tmp, &entries,
295  						 init_list) {
296  				list_del(&entry->init_list);
297  				qeth_free_pool_entry(entry);
298  			}
299  
300  			return -ENOMEM;
301  		}
302  
303  		list_add(&entry->init_list, &entries);
304  
305  		delta--;
306  	}
307  
308  	list_splice(&entries, &pool->entry_list);
309  
310  out:
311  	card->qdio.in_buf_pool.buf_count = count;
312  	pool->buf_count = count;
313  	return 0;
314  }
315  EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
316  
qeth_free_qdio_queue(struct qeth_qdio_q * q)317  static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
318  {
319  	if (!q)
320  		return;
321  
322  	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
323  	kfree(q);
324  }
325  
qeth_alloc_qdio_queue(void)326  static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
327  {
328  	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
329  	int i;
330  
331  	if (!q)
332  		return NULL;
333  
334  	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
335  		kfree(q);
336  		return NULL;
337  	}
338  
339  	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
340  		q->bufs[i].buffer = q->qdio_bufs[i];
341  
342  	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
343  	return q;
344  }
345  
qeth_cq_init(struct qeth_card * card)346  static int qeth_cq_init(struct qeth_card *card)
347  {
348  	int rc;
349  
350  	if (card->options.cq == QETH_CQ_ENABLED) {
351  		QETH_CARD_TEXT(card, 2, "cqinit");
352  		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
353  				   QDIO_MAX_BUFFERS_PER_Q);
354  		card->qdio.c_q->next_buf_to_init = 127;
355  
356  		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
357  		if (rc) {
358  			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
359  			goto out;
360  		}
361  	}
362  	rc = 0;
363  out:
364  	return rc;
365  }
366  
qeth_free_cq(struct qeth_card * card)367  static void qeth_free_cq(struct qeth_card *card)
368  {
369  	if (card->qdio.c_q) {
370  		qeth_free_qdio_queue(card->qdio.c_q);
371  		card->qdio.c_q = NULL;
372  	}
373  }
374  
qeth_alloc_cq(struct qeth_card * card)375  static int qeth_alloc_cq(struct qeth_card *card)
376  {
377  	if (card->options.cq == QETH_CQ_ENABLED) {
378  		QETH_CARD_TEXT(card, 2, "cqon");
379  		if (!card->qdio.c_q) {
380  			card->qdio.c_q = qeth_alloc_qdio_queue();
381  			if (!card->qdio.c_q) {
382  				dev_err(&card->gdev->dev,
383  					"Failed to create completion queue\n");
384  				return -ENOMEM;
385  			}
386  		}
387  	} else {
388  		QETH_CARD_TEXT(card, 2, "nocq");
389  		qeth_free_cq(card);
390  	}
391  	return 0;
392  }
393  
qeth_compute_cq_notification(int sbalf15,int delayed)394  static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
395  							int delayed)
396  {
397  	enum iucv_tx_notify n;
398  
399  	switch (sbalf15) {
400  	case 0:
401  		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
402  		break;
403  	case 4:
404  	case 16:
405  	case 17:
406  	case 18:
407  		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
408  			TX_NOTIFY_UNREACHABLE;
409  		break;
410  	default:
411  		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
412  			TX_NOTIFY_GENERALERROR;
413  		break;
414  	}
415  
416  	return n;
417  }
418  
qeth_put_cmd(struct qeth_cmd_buffer * iob)419  static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
420  {
421  	if (refcount_dec_and_test(&iob->ref_count)) {
422  		kfree(iob->data);
423  		kfree(iob);
424  	}
425  }
qeth_setup_ccw(struct ccw1 * ccw,u8 cmd_code,u8 flags,u32 len,void * data)426  static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
427  			   void *data)
428  {
429  	ccw->cmd_code = cmd_code;
430  	ccw->flags = flags | CCW_FLAG_SLI;
431  	ccw->count = len;
432  	ccw->cda = (__u32)virt_to_phys(data);
433  }
434  
__qeth_issue_next_read(struct qeth_card * card)435  static int __qeth_issue_next_read(struct qeth_card *card)
436  {
437  	struct qeth_cmd_buffer *iob = card->read_cmd;
438  	struct qeth_channel *channel = iob->channel;
439  	struct ccw1 *ccw = __ccw_from_cmd(iob);
440  	int rc;
441  
442  	QETH_CARD_TEXT(card, 5, "issnxrd");
443  	if (channel->state != CH_STATE_UP)
444  		return -EIO;
445  
446  	memset(iob->data, 0, iob->length);
447  	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
448  	iob->callback = qeth_issue_next_read_cb;
449  	/* keep the cmd alive after completion: */
450  	qeth_get_cmd(iob);
451  
452  	QETH_CARD_TEXT(card, 6, "noirqpnd");
453  	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
454  	if (!rc) {
455  		channel->active_cmd = iob;
456  	} else {
457  		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
458  				 rc, CARD_DEVID(card));
459  		qeth_unlock_channel(card, channel);
460  		qeth_put_cmd(iob);
461  		card->read_or_write_problem = 1;
462  		qeth_schedule_recovery(card);
463  	}
464  	return rc;
465  }
466  
qeth_issue_next_read(struct qeth_card * card)467  static int qeth_issue_next_read(struct qeth_card *card)
468  {
469  	int ret;
470  
471  	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
472  	ret = __qeth_issue_next_read(card);
473  	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
474  
475  	return ret;
476  }
477  
qeth_enqueue_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob)478  static void qeth_enqueue_cmd(struct qeth_card *card,
479  			     struct qeth_cmd_buffer *iob)
480  {
481  	spin_lock_irq(&card->lock);
482  	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
483  	spin_unlock_irq(&card->lock);
484  }
485  
qeth_dequeue_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob)486  static void qeth_dequeue_cmd(struct qeth_card *card,
487  			     struct qeth_cmd_buffer *iob)
488  {
489  	spin_lock_irq(&card->lock);
490  	list_del(&iob->list_entry);
491  	spin_unlock_irq(&card->lock);
492  }
493  
qeth_notify_cmd(struct qeth_cmd_buffer * iob,int reason)494  static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
495  {
496  	iob->rc = reason;
497  	complete(&iob->done);
498  }
499  
qeth_flush_local_addrs4(struct qeth_card * card)500  static void qeth_flush_local_addrs4(struct qeth_card *card)
501  {
502  	struct qeth_local_addr *addr;
503  	struct hlist_node *tmp;
504  	unsigned int i;
505  
506  	spin_lock_irq(&card->local_addrs4_lock);
507  	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
508  		hash_del_rcu(&addr->hnode);
509  		kfree_rcu(addr, rcu);
510  	}
511  	spin_unlock_irq(&card->local_addrs4_lock);
512  }
513  
qeth_flush_local_addrs6(struct qeth_card * card)514  static void qeth_flush_local_addrs6(struct qeth_card *card)
515  {
516  	struct qeth_local_addr *addr;
517  	struct hlist_node *tmp;
518  	unsigned int i;
519  
520  	spin_lock_irq(&card->local_addrs6_lock);
521  	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
522  		hash_del_rcu(&addr->hnode);
523  		kfree_rcu(addr, rcu);
524  	}
525  	spin_unlock_irq(&card->local_addrs6_lock);
526  }
527  
qeth_flush_local_addrs(struct qeth_card * card)528  static void qeth_flush_local_addrs(struct qeth_card *card)
529  {
530  	qeth_flush_local_addrs4(card);
531  	qeth_flush_local_addrs6(card);
532  }
533  
qeth_add_local_addrs4(struct qeth_card * card,struct qeth_ipacmd_local_addrs4 * cmd)534  static void qeth_add_local_addrs4(struct qeth_card *card,
535  				  struct qeth_ipacmd_local_addrs4 *cmd)
536  {
537  	unsigned int i;
538  
539  	if (cmd->addr_length !=
540  	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
541  		dev_err_ratelimited(&card->gdev->dev,
542  				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
543  				    cmd->addr_length);
544  		return;
545  	}
546  
547  	spin_lock(&card->local_addrs4_lock);
548  	for (i = 0; i < cmd->count; i++) {
549  		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
550  		struct qeth_local_addr *addr;
551  		bool duplicate = false;
552  
553  		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
554  			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
555  				duplicate = true;
556  				break;
557  			}
558  		}
559  
560  		if (duplicate)
561  			continue;
562  
563  		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
564  		if (!addr) {
565  			dev_err(&card->gdev->dev,
566  				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
567  				&cmd->addrs[i].addr);
568  			continue;
569  		}
570  
571  		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
572  		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
573  	}
574  	spin_unlock(&card->local_addrs4_lock);
575  }
576  
qeth_add_local_addrs6(struct qeth_card * card,struct qeth_ipacmd_local_addrs6 * cmd)577  static void qeth_add_local_addrs6(struct qeth_card *card,
578  				  struct qeth_ipacmd_local_addrs6 *cmd)
579  {
580  	unsigned int i;
581  
582  	if (cmd->addr_length !=
583  	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
584  		dev_err_ratelimited(&card->gdev->dev,
585  				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
586  				    cmd->addr_length);
587  		return;
588  	}
589  
590  	spin_lock(&card->local_addrs6_lock);
591  	for (i = 0; i < cmd->count; i++) {
592  		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
593  		struct qeth_local_addr *addr;
594  		bool duplicate = false;
595  
596  		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
597  			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
598  				duplicate = true;
599  				break;
600  			}
601  		}
602  
603  		if (duplicate)
604  			continue;
605  
606  		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
607  		if (!addr) {
608  			dev_err(&card->gdev->dev,
609  				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
610  				&cmd->addrs[i].addr);
611  			continue;
612  		}
613  
614  		addr->addr = cmd->addrs[i].addr;
615  		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
616  	}
617  	spin_unlock(&card->local_addrs6_lock);
618  }
619  
qeth_del_local_addrs4(struct qeth_card * card,struct qeth_ipacmd_local_addrs4 * cmd)620  static void qeth_del_local_addrs4(struct qeth_card *card,
621  				  struct qeth_ipacmd_local_addrs4 *cmd)
622  {
623  	unsigned int i;
624  
625  	if (cmd->addr_length !=
626  	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
627  		dev_err_ratelimited(&card->gdev->dev,
628  				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
629  				    cmd->addr_length);
630  		return;
631  	}
632  
633  	spin_lock(&card->local_addrs4_lock);
634  	for (i = 0; i < cmd->count; i++) {
635  		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
636  		unsigned int key = ipv4_addr_hash(addr->addr);
637  		struct qeth_local_addr *tmp;
638  
639  		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
640  			if (tmp->addr.s6_addr32[3] == addr->addr) {
641  				hash_del_rcu(&tmp->hnode);
642  				kfree_rcu(tmp, rcu);
643  				break;
644  			}
645  		}
646  	}
647  	spin_unlock(&card->local_addrs4_lock);
648  }
649  
qeth_del_local_addrs6(struct qeth_card * card,struct qeth_ipacmd_local_addrs6 * cmd)650  static void qeth_del_local_addrs6(struct qeth_card *card,
651  				  struct qeth_ipacmd_local_addrs6 *cmd)
652  {
653  	unsigned int i;
654  
655  	if (cmd->addr_length !=
656  	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
657  		dev_err_ratelimited(&card->gdev->dev,
658  				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
659  				    cmd->addr_length);
660  		return;
661  	}
662  
663  	spin_lock(&card->local_addrs6_lock);
664  	for (i = 0; i < cmd->count; i++) {
665  		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
666  		u32 key = ipv6_addr_hash(&addr->addr);
667  		struct qeth_local_addr *tmp;
668  
669  		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
670  			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
671  				hash_del_rcu(&tmp->hnode);
672  				kfree_rcu(tmp, rcu);
673  				break;
674  			}
675  		}
676  	}
677  	spin_unlock(&card->local_addrs6_lock);
678  }
679  
qeth_next_hop_is_local_v4(struct qeth_card * card,struct sk_buff * skb)680  static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
681  				      struct sk_buff *skb)
682  {
683  	struct qeth_local_addr *tmp;
684  	bool is_local = false;
685  	unsigned int key;
686  	__be32 next_hop;
687  
688  	if (hash_empty(card->local_addrs4))
689  		return false;
690  
691  	rcu_read_lock();
692  	next_hop = qeth_next_hop_v4_rcu(skb,
693  					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
694  	key = ipv4_addr_hash(next_hop);
695  
696  	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
697  		if (tmp->addr.s6_addr32[3] == next_hop) {
698  			is_local = true;
699  			break;
700  		}
701  	}
702  	rcu_read_unlock();
703  
704  	return is_local;
705  }
706  
qeth_next_hop_is_local_v6(struct qeth_card * card,struct sk_buff * skb)707  static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
708  				      struct sk_buff *skb)
709  {
710  	struct qeth_local_addr *tmp;
711  	struct in6_addr *next_hop;
712  	bool is_local = false;
713  	u32 key;
714  
715  	if (hash_empty(card->local_addrs6))
716  		return false;
717  
718  	rcu_read_lock();
719  	next_hop = qeth_next_hop_v6_rcu(skb,
720  					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
721  	key = ipv6_addr_hash(next_hop);
722  
723  	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
724  		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
725  			is_local = true;
726  			break;
727  		}
728  	}
729  	rcu_read_unlock();
730  
731  	return is_local;
732  }
733  
qeth_debugfs_local_addr_show(struct seq_file * m,void * v)734  static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
735  {
736  	struct qeth_card *card = m->private;
737  	struct qeth_local_addr *tmp;
738  	unsigned int i;
739  
740  	rcu_read_lock();
741  	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
742  		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
743  	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
744  		seq_printf(m, "%pI6c\n", &tmp->addr);
745  	rcu_read_unlock();
746  
747  	return 0;
748  }
749  
750  DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
751  
qeth_issue_ipa_msg(struct qeth_ipa_cmd * cmd,int rc,struct qeth_card * card)752  static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
753  		struct qeth_card *card)
754  {
755  	const char *ipa_name;
756  	int com = cmd->hdr.command;
757  
758  	ipa_name = qeth_get_ipa_cmd_name(com);
759  
760  	if (rc)
761  		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
762  				 ipa_name, com, CARD_DEVID(card), rc,
763  				 qeth_get_ipa_msg(rc));
764  	else
765  		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
766  				 ipa_name, com, CARD_DEVID(card));
767  }
768  
qeth_default_link_info(struct qeth_card * card)769  static void qeth_default_link_info(struct qeth_card *card)
770  {
771  	struct qeth_link_info *link_info = &card->info.link_info;
772  
773  	QETH_CARD_TEXT(card, 2, "dftlinfo");
774  	link_info->duplex = DUPLEX_FULL;
775  
776  	if (IS_IQD(card) || IS_VM_NIC(card)) {
777  		link_info->speed = SPEED_10000;
778  		link_info->port = PORT_FIBRE;
779  		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
780  	} else {
781  		switch (card->info.link_type) {
782  		case QETH_LINK_TYPE_FAST_ETH:
783  		case QETH_LINK_TYPE_LANE_ETH100:
784  			link_info->speed = SPEED_100;
785  			link_info->port = PORT_TP;
786  			break;
787  		case QETH_LINK_TYPE_GBIT_ETH:
788  		case QETH_LINK_TYPE_LANE_ETH1000:
789  			link_info->speed = SPEED_1000;
790  			link_info->port = PORT_FIBRE;
791  			break;
792  		case QETH_LINK_TYPE_10GBIT_ETH:
793  			link_info->speed = SPEED_10000;
794  			link_info->port = PORT_FIBRE;
795  			break;
796  		case QETH_LINK_TYPE_25GBIT_ETH:
797  			link_info->speed = SPEED_25000;
798  			link_info->port = PORT_FIBRE;
799  			break;
800  		default:
801  			dev_info(&card->gdev->dev,
802  				 "Unknown link type %x\n",
803  				 card->info.link_type);
804  			link_info->speed = SPEED_UNKNOWN;
805  			link_info->port = PORT_OTHER;
806  		}
807  
808  		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
809  	}
810  }
811  
qeth_check_ipa_data(struct qeth_card * card,struct qeth_ipa_cmd * cmd)812  static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
813  						struct qeth_ipa_cmd *cmd)
814  {
815  	QETH_CARD_TEXT(card, 5, "chkipad");
816  
817  	if (IS_IPA_REPLY(cmd)) {
818  		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
819  			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
820  		return cmd;
821  	}
822  
823  	/* handle unsolicited event: */
824  	switch (cmd->hdr.command) {
825  	case IPA_CMD_STOPLAN:
826  		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
827  			dev_err(&card->gdev->dev,
828  				"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
829  				netdev_name(card->dev));
830  			/* Set offline, then probably fail to set online: */
831  			qeth_schedule_recovery(card);
832  		} else {
833  			/* stay online for subsequent STARTLAN */
834  			dev_warn(&card->gdev->dev,
835  				 "The link for interface %s on CHPID 0x%X failed\n",
836  				 netdev_name(card->dev), card->info.chpid);
837  			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
838  			netif_carrier_off(card->dev);
839  			qeth_default_link_info(card);
840  		}
841  		return NULL;
842  	case IPA_CMD_STARTLAN:
843  		dev_info(&card->gdev->dev,
844  			 "The link for %s on CHPID 0x%X has been restored\n",
845  			 netdev_name(card->dev), card->info.chpid);
846  		if (card->info.hwtrap)
847  			card->info.hwtrap = 2;
848  		qeth_schedule_recovery(card);
849  		return NULL;
850  	case IPA_CMD_SETBRIDGEPORT_IQD:
851  	case IPA_CMD_SETBRIDGEPORT_OSA:
852  	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
853  		if (card->discipline->control_event_handler(card, cmd))
854  			return cmd;
855  		return NULL;
856  	case IPA_CMD_REGISTER_LOCAL_ADDR:
857  		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
858  			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
859  		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
860  			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
861  
862  		QETH_CARD_TEXT(card, 3, "irla");
863  		return NULL;
864  	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
865  		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
866  			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
867  		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
868  			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
869  
870  		QETH_CARD_TEXT(card, 3, "urla");
871  		return NULL;
872  	default:
873  		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
874  		return cmd;
875  	}
876  }
877  
qeth_clear_ipacmd_list(struct qeth_card * card)878  static void qeth_clear_ipacmd_list(struct qeth_card *card)
879  {
880  	struct qeth_cmd_buffer *iob;
881  	unsigned long flags;
882  
883  	QETH_CARD_TEXT(card, 4, "clipalst");
884  
885  	spin_lock_irqsave(&card->lock, flags);
886  	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
887  		qeth_notify_cmd(iob, -ECANCELED);
888  	spin_unlock_irqrestore(&card->lock, flags);
889  }
890  
qeth_check_idx_response(struct qeth_card * card,unsigned char * buffer)891  static int qeth_check_idx_response(struct qeth_card *card,
892  	unsigned char *buffer)
893  {
894  	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
895  	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
896  		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
897  				 buffer[4]);
898  		QETH_CARD_TEXT(card, 2, "ckidxres");
899  		QETH_CARD_TEXT(card, 2, " idxterm");
900  		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
901  		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
902  		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
903  			dev_err(&card->gdev->dev,
904  				"The device does not support the configured transport mode\n");
905  			return -EPROTONOSUPPORT;
906  		}
907  		return -EIO;
908  	}
909  	return 0;
910  }
911  
qeth_release_buffer_cb(struct qeth_card * card,struct qeth_cmd_buffer * iob,unsigned int data_length)912  static void qeth_release_buffer_cb(struct qeth_card *card,
913  				   struct qeth_cmd_buffer *iob,
914  				   unsigned int data_length)
915  {
916  	qeth_put_cmd(iob);
917  }
918  
qeth_cancel_cmd(struct qeth_cmd_buffer * iob,int rc)919  static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
920  {
921  	qeth_notify_cmd(iob, rc);
922  	qeth_put_cmd(iob);
923  }
924  
qeth_alloc_cmd(struct qeth_channel * channel,unsigned int length,unsigned int ccws,long timeout)925  static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
926  					      unsigned int length,
927  					      unsigned int ccws, long timeout)
928  {
929  	struct qeth_cmd_buffer *iob;
930  
931  	if (length > QETH_BUFSIZE)
932  		return NULL;
933  
934  	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
935  	if (!iob)
936  		return NULL;
937  
938  	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
939  			    GFP_KERNEL | GFP_DMA);
940  	if (!iob->data) {
941  		kfree(iob);
942  		return NULL;
943  	}
944  
945  	init_completion(&iob->done);
946  	spin_lock_init(&iob->lock);
947  	refcount_set(&iob->ref_count, 1);
948  	iob->channel = channel;
949  	iob->timeout = timeout;
950  	iob->length = length;
951  	return iob;
952  }
953  
qeth_issue_next_read_cb(struct qeth_card * card,struct qeth_cmd_buffer * iob,unsigned int data_length)954  static void qeth_issue_next_read_cb(struct qeth_card *card,
955  				    struct qeth_cmd_buffer *iob,
956  				    unsigned int data_length)
957  {
958  	struct qeth_cmd_buffer *request = NULL;
959  	struct qeth_ipa_cmd *cmd = NULL;
960  	struct qeth_reply *reply = NULL;
961  	struct qeth_cmd_buffer *tmp;
962  	unsigned long flags;
963  	int rc = 0;
964  
965  	QETH_CARD_TEXT(card, 4, "sndctlcb");
966  	rc = qeth_check_idx_response(card, iob->data);
967  	switch (rc) {
968  	case 0:
969  		break;
970  	case -EIO:
971  		qeth_schedule_recovery(card);
972  		fallthrough;
973  	default:
974  		qeth_clear_ipacmd_list(card);
975  		goto err_idx;
976  	}
977  
978  	cmd = __ipa_reply(iob);
979  	if (cmd) {
980  		cmd = qeth_check_ipa_data(card, cmd);
981  		if (!cmd)
982  			goto out;
983  	}
984  
985  	/* match against pending cmd requests */
986  	spin_lock_irqsave(&card->lock, flags);
987  	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
988  		if (tmp->match && tmp->match(tmp, iob)) {
989  			request = tmp;
990  			/* take the object outside the lock */
991  			qeth_get_cmd(request);
992  			break;
993  		}
994  	}
995  	spin_unlock_irqrestore(&card->lock, flags);
996  
997  	if (!request)
998  		goto out;
999  
1000  	reply = &request->reply;
1001  	if (!reply->callback) {
1002  		rc = 0;
1003  		goto no_callback;
1004  	}
1005  
1006  	spin_lock_irqsave(&request->lock, flags);
1007  	if (request->rc)
1008  		/* Bail out when the requestor has already left: */
1009  		rc = request->rc;
1010  	else
1011  		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1012  							(unsigned long)iob);
1013  	spin_unlock_irqrestore(&request->lock, flags);
1014  
1015  no_callback:
1016  	if (rc <= 0)
1017  		qeth_notify_cmd(request, rc);
1018  	qeth_put_cmd(request);
1019  out:
1020  	memcpy(&card->seqno.pdu_hdr_ack,
1021  		QETH_PDU_HEADER_SEQ_NO(iob->data),
1022  		QETH_SEQ_NO_LENGTH);
1023  	__qeth_issue_next_read(card);
1024  err_idx:
1025  	qeth_put_cmd(iob);
1026  }
1027  
qeth_set_thread_start_bit(struct qeth_card * card,unsigned long thread)1028  static int qeth_set_thread_start_bit(struct qeth_card *card,
1029  		unsigned long thread)
1030  {
1031  	unsigned long flags;
1032  	int rc = 0;
1033  
1034  	spin_lock_irqsave(&card->thread_mask_lock, flags);
1035  	if (!(card->thread_allowed_mask & thread))
1036  		rc = -EPERM;
1037  	else if (card->thread_start_mask & thread)
1038  		rc = -EBUSY;
1039  	else
1040  		card->thread_start_mask |= thread;
1041  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1042  
1043  	return rc;
1044  }
1045  
qeth_clear_thread_start_bit(struct qeth_card * card,unsigned long thread)1046  static void qeth_clear_thread_start_bit(struct qeth_card *card,
1047  					unsigned long thread)
1048  {
1049  	unsigned long flags;
1050  
1051  	spin_lock_irqsave(&card->thread_mask_lock, flags);
1052  	card->thread_start_mask &= ~thread;
1053  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1054  	wake_up(&card->wait_q);
1055  }
1056  
qeth_clear_thread_running_bit(struct qeth_card * card,unsigned long thread)1057  static void qeth_clear_thread_running_bit(struct qeth_card *card,
1058  					  unsigned long thread)
1059  {
1060  	unsigned long flags;
1061  
1062  	spin_lock_irqsave(&card->thread_mask_lock, flags);
1063  	card->thread_running_mask &= ~thread;
1064  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1065  	wake_up_all(&card->wait_q);
1066  }
1067  
__qeth_do_run_thread(struct qeth_card * card,unsigned long thread)1068  static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1069  {
1070  	unsigned long flags;
1071  	int rc = 0;
1072  
1073  	spin_lock_irqsave(&card->thread_mask_lock, flags);
1074  	if (card->thread_start_mask & thread) {
1075  		if ((card->thread_allowed_mask & thread) &&
1076  		    !(card->thread_running_mask & thread)) {
1077  			rc = 1;
1078  			card->thread_start_mask &= ~thread;
1079  			card->thread_running_mask |= thread;
1080  		} else
1081  			rc = -EPERM;
1082  	}
1083  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1084  	return rc;
1085  }
1086  
qeth_do_run_thread(struct qeth_card * card,unsigned long thread)1087  static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1088  {
1089  	int rc = 0;
1090  
1091  	wait_event(card->wait_q,
1092  		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
1093  	return rc;
1094  }
1095  
qeth_schedule_recovery(struct qeth_card * card)1096  int qeth_schedule_recovery(struct qeth_card *card)
1097  {
1098  	int rc;
1099  
1100  	QETH_CARD_TEXT(card, 2, "startrec");
1101  
1102  	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1103  	if (!rc)
1104  		schedule_work(&card->kernel_thread_starter);
1105  
1106  	return rc;
1107  }
1108  
qeth_get_problem(struct qeth_card * card,struct ccw_device * cdev,struct irb * irb)1109  static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1110  			    struct irb *irb)
1111  {
1112  	int dstat, cstat;
1113  	char *sense;
1114  
1115  	sense = (char *) irb->ecw;
1116  	cstat = irb->scsw.cmd.cstat;
1117  	dstat = irb->scsw.cmd.dstat;
1118  
1119  	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1120  		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1121  		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1122  		QETH_CARD_TEXT(card, 2, "CGENCHK");
1123  		dev_warn(&cdev->dev, "The qeth device driver "
1124  			"failed to recover an error on the device\n");
1125  		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1126  				 CCW_DEVID(cdev), dstat, cstat);
1127  		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1128  				16, 1, irb, 64, 1);
1129  		return -EIO;
1130  	}
1131  
1132  	if (dstat & DEV_STAT_UNIT_CHECK) {
1133  		if (sense[SENSE_RESETTING_EVENT_BYTE] &
1134  		    SENSE_RESETTING_EVENT_FLAG) {
1135  			QETH_CARD_TEXT(card, 2, "REVIND");
1136  			return -EIO;
1137  		}
1138  		if (sense[SENSE_COMMAND_REJECT_BYTE] &
1139  		    SENSE_COMMAND_REJECT_FLAG) {
1140  			QETH_CARD_TEXT(card, 2, "CMDREJi");
1141  			return -EIO;
1142  		}
1143  		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1144  			QETH_CARD_TEXT(card, 2, "AFFE");
1145  			return -EIO;
1146  		}
1147  		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1148  			QETH_CARD_TEXT(card, 2, "ZEROSEN");
1149  			return 0;
1150  		}
1151  		QETH_CARD_TEXT(card, 2, "DGENCHK");
1152  		return -EIO;
1153  	}
1154  	return 0;
1155  }
1156  
qeth_check_irb_error(struct qeth_card * card,struct ccw_device * cdev,struct irb * irb)1157  static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1158  				struct irb *irb)
1159  {
1160  	if (!IS_ERR(irb))
1161  		return 0;
1162  
1163  	switch (PTR_ERR(irb)) {
1164  	case -EIO:
1165  		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1166  				 CCW_DEVID(cdev));
1167  		QETH_CARD_TEXT(card, 2, "ckirberr");
1168  		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1169  		return -EIO;
1170  	case -ETIMEDOUT:
1171  		dev_warn(&cdev->dev, "A hardware operation timed out"
1172  			" on the device\n");
1173  		QETH_CARD_TEXT(card, 2, "ckirberr");
1174  		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1175  		return -ETIMEDOUT;
1176  	default:
1177  		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1178  				 PTR_ERR(irb), CCW_DEVID(cdev));
1179  		QETH_CARD_TEXT(card, 2, "ckirberr");
1180  		QETH_CARD_TEXT(card, 2, "  rc???");
1181  		return PTR_ERR(irb);
1182  	}
1183  }
1184  
1185  /**
1186   * qeth_irq() - qeth interrupt handler
1187   * @cdev: ccw device
1188   * @intparm: expect pointer to iob
1189   * @irb: Interruption Response Block
1190   *
1191   * In the good path:
1192   * corresponding qeth channel is locked with last used iob as active_cmd.
1193   * But this function is also called for error interrupts.
1194   *
1195   * Caller ensures that:
1196   * Interrupts are disabled; ccw device lock is held;
1197   *
1198   */
qeth_irq(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1199  static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1200  		struct irb *irb)
1201  {
1202  	int rc;
1203  	int cstat, dstat;
1204  	struct qeth_cmd_buffer *iob = NULL;
1205  	struct ccwgroup_device *gdev;
1206  	struct qeth_channel *channel;
1207  	struct qeth_card *card;
1208  
1209  	/* while we hold the ccwdev lock, this stays valid: */
1210  	gdev = dev_get_drvdata(&cdev->dev);
1211  	card = dev_get_drvdata(&gdev->dev);
1212  
1213  	QETH_CARD_TEXT(card, 5, "irq");
1214  
1215  	if (card->read.ccwdev == cdev) {
1216  		channel = &card->read;
1217  		QETH_CARD_TEXT(card, 5, "read");
1218  	} else if (card->write.ccwdev == cdev) {
1219  		channel = &card->write;
1220  		QETH_CARD_TEXT(card, 5, "write");
1221  	} else {
1222  		channel = &card->data;
1223  		QETH_CARD_TEXT(card, 5, "data");
1224  	}
1225  
1226  	if (intparm == 0) {
1227  		QETH_CARD_TEXT(card, 5, "irqunsol");
1228  	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1229  		QETH_CARD_TEXT(card, 5, "irqunexp");
1230  
1231  		dev_err(&cdev->dev,
1232  			"Received IRQ with intparm %lx, expected %px\n",
1233  			intparm, channel->active_cmd);
1234  		if (channel->active_cmd)
1235  			qeth_cancel_cmd(channel->active_cmd, -EIO);
1236  	} else {
1237  		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1238  	}
1239  
1240  	rc = qeth_check_irb_error(card, cdev, irb);
1241  	if (rc) {
1242  		/* IO was terminated, free its resources. */
1243  		qeth_unlock_channel(card, channel);
1244  		if (iob)
1245  			qeth_cancel_cmd(iob, rc);
1246  		return;
1247  	}
1248  
1249  	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1250  		channel->state = CH_STATE_STOPPED;
1251  		wake_up(&card->wait_q);
1252  	}
1253  
1254  	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1255  		channel->state = CH_STATE_HALTED;
1256  		wake_up(&card->wait_q);
1257  	}
1258  
1259  	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1260  					  SCSW_FCTL_HALT_FUNC))) {
1261  		qeth_cancel_cmd(iob, -ECANCELED);
1262  		iob = NULL;
1263  	}
1264  
1265  	cstat = irb->scsw.cmd.cstat;
1266  	dstat = irb->scsw.cmd.dstat;
1267  
1268  	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1269  	    (dstat & DEV_STAT_UNIT_CHECK) ||
1270  	    (cstat)) {
1271  		if (irb->esw.esw0.erw.cons) {
1272  			dev_warn(&channel->ccwdev->dev,
1273  				"The qeth device driver failed to recover "
1274  				"an error on the device\n");
1275  			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1276  					 CCW_DEVID(channel->ccwdev), cstat,
1277  					 dstat);
1278  			print_hex_dump(KERN_WARNING, "qeth: irb ",
1279  				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1280  			print_hex_dump(KERN_WARNING, "qeth: sense data ",
1281  				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1282  		}
1283  
1284  		rc = qeth_get_problem(card, cdev, irb);
1285  		if (rc) {
1286  			card->read_or_write_problem = 1;
1287  			qeth_unlock_channel(card, channel);
1288  			if (iob)
1289  				qeth_cancel_cmd(iob, rc);
1290  			qeth_clear_ipacmd_list(card);
1291  			qeth_schedule_recovery(card);
1292  			return;
1293  		}
1294  	}
1295  
1296  	if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
1297  		/* channel command hasn't started: retry.
1298  		 * active_cmd is still set to last iob
1299  		 */
1300  		QETH_CARD_TEXT(card, 2, "irqcc1");
1301  		rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
1302  					      (addr_t)iob, 0, 0, iob->timeout);
1303  		if (rc) {
1304  			QETH_DBF_MESSAGE(2,
1305  					 "ccw retry on %x failed, rc = %i\n",
1306  					 CARD_DEVID(card), rc);
1307  			QETH_CARD_TEXT_(card, 2, " err%d", rc);
1308  			qeth_unlock_channel(card, channel);
1309  			qeth_cancel_cmd(iob, rc);
1310  		}
1311  		return;
1312  	}
1313  
1314  	qeth_unlock_channel(card, channel);
1315  
1316  	if (iob) {
1317  		/* sanity check: */
1318  		if (irb->scsw.cmd.count > iob->length) {
1319  			qeth_cancel_cmd(iob, -EIO);
1320  			return;
1321  		}
1322  		if (iob->callback)
1323  			iob->callback(card, iob,
1324  				      iob->length - irb->scsw.cmd.count);
1325  	}
1326  }
1327  
qeth_notify_skbs(struct qeth_qdio_out_q * q,struct qeth_qdio_out_buffer * buf,enum iucv_tx_notify notification)1328  static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1329  		struct qeth_qdio_out_buffer *buf,
1330  		enum iucv_tx_notify notification)
1331  {
1332  	struct sk_buff *skb;
1333  
1334  	skb_queue_walk(&buf->skb_list, skb) {
1335  		struct sock *sk = skb->sk;
1336  
1337  		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1338  		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1339  		if (sk && sk->sk_family == PF_IUCV)
1340  			iucv_sk(sk)->sk_txnotify(sk, notification);
1341  	}
1342  }
1343  
qeth_tx_complete_buf(struct qeth_qdio_out_q * queue,struct qeth_qdio_out_buffer * buf,bool error,int budget)1344  static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
1345  				 struct qeth_qdio_out_buffer *buf, bool error,
1346  				 int budget)
1347  {
1348  	struct sk_buff *skb;
1349  
1350  	/* Empty buffer? */
1351  	if (buf->next_element_to_fill == 0)
1352  		return;
1353  
1354  	QETH_TXQ_STAT_INC(queue, bufs);
1355  	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1356  	if (error) {
1357  		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1358  	} else {
1359  		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1360  		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1361  	}
1362  
1363  	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1364  		unsigned int bytes = qdisc_pkt_len(skb);
1365  		bool is_tso = skb_is_gso(skb);
1366  		unsigned int packets;
1367  
1368  		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1369  		if (!error) {
1370  			if (skb->ip_summed == CHECKSUM_PARTIAL)
1371  				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1372  			if (skb_is_nonlinear(skb))
1373  				QETH_TXQ_STAT_INC(queue, skbs_sg);
1374  			if (is_tso) {
1375  				QETH_TXQ_STAT_INC(queue, skbs_tso);
1376  				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1377  			}
1378  		}
1379  
1380  		napi_consume_skb(skb, budget);
1381  	}
1382  }
1383  
qeth_clear_output_buffer(struct qeth_qdio_out_q * queue,struct qeth_qdio_out_buffer * buf,bool error,int budget)1384  static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1385  				     struct qeth_qdio_out_buffer *buf,
1386  				     bool error, int budget)
1387  {
1388  	int i;
1389  
1390  	/* is PCI flag set on buffer? */
1391  	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1392  		atomic_dec(&queue->set_pci_flags_count);
1393  		QETH_TXQ_STAT_INC(queue, completion_irq);
1394  	}
1395  
1396  	qeth_tx_complete_buf(queue, buf, error, budget);
1397  
1398  	for (i = 0; i < queue->max_elements; ++i) {
1399  		void *data = phys_to_virt(buf->buffer->element[i].addr);
1400  
1401  		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1402  			kmem_cache_free(qeth_core_header_cache, data);
1403  	}
1404  
1405  	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1406  	buf->next_element_to_fill = 0;
1407  	buf->frames = 0;
1408  	buf->bytes = 0;
1409  	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1410  }
1411  
qeth_free_out_buf(struct qeth_qdio_out_buffer * buf)1412  static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
1413  {
1414  	if (buf->aob)
1415  		kmem_cache_free(qeth_qaob_cache, buf->aob);
1416  	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1417  }
1418  
qeth_tx_complete_pending_bufs(struct qeth_card * card,struct qeth_qdio_out_q * queue,bool drain,int budget)1419  static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1420  					  struct qeth_qdio_out_q *queue,
1421  					  bool drain, int budget)
1422  {
1423  	struct qeth_qdio_out_buffer *buf, *tmp;
1424  
1425  	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1426  		struct qeth_qaob_priv1 *priv;
1427  		struct qaob *aob = buf->aob;
1428  		enum iucv_tx_notify notify;
1429  		unsigned int i;
1430  
1431  		priv = (struct qeth_qaob_priv1 *)&aob->user1;
1432  		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1433  			QETH_CARD_TEXT(card, 5, "fp");
1434  			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1435  
1436  			notify = drain ? TX_NOTIFY_GENERALERROR :
1437  					 qeth_compute_cq_notification(aob->aorc, 1);
1438  			qeth_notify_skbs(queue, buf, notify);
1439  			qeth_tx_complete_buf(queue, buf, drain, budget);
1440  
1441  			for (i = 0;
1442  			     i < aob->sb_count && i < queue->max_elements;
1443  			     i++) {
1444  				void *data = phys_to_virt(aob->sba[i]);
1445  
1446  				if (test_bit(i, buf->from_kmem_cache) && data)
1447  					kmem_cache_free(qeth_core_header_cache,
1448  							data);
1449  			}
1450  
1451  			list_del(&buf->list_entry);
1452  			qeth_free_out_buf(buf);
1453  		}
1454  	}
1455  }
1456  
qeth_drain_output_queue(struct qeth_qdio_out_q * q,bool free)1457  static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1458  {
1459  	int j;
1460  
1461  	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1462  
1463  	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1464  		if (!q->bufs[j])
1465  			continue;
1466  
1467  		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1468  		if (free) {
1469  			qeth_free_out_buf(q->bufs[j]);
1470  			q->bufs[j] = NULL;
1471  		}
1472  	}
1473  }
1474  
qeth_drain_output_queues(struct qeth_card * card)1475  static void qeth_drain_output_queues(struct qeth_card *card)
1476  {
1477  	int i;
1478  
1479  	QETH_CARD_TEXT(card, 2, "clearqdbf");
1480  	/* clear outbound buffers to free skbs */
1481  	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1482  		if (card->qdio.out_qs[i])
1483  			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1484  	}
1485  }
1486  
qeth_osa_set_output_queues(struct qeth_card * card,bool single)1487  static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1488  {
1489  	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1490  
1491  	if (card->qdio.no_out_queues == max)
1492  		return;
1493  
1494  	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1495  		qeth_free_qdio_queues(card);
1496  
1497  	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1498  		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1499  
1500  	card->qdio.no_out_queues = max;
1501  }
1502  
qeth_update_from_chp_desc(struct qeth_card * card)1503  static int qeth_update_from_chp_desc(struct qeth_card *card)
1504  {
1505  	struct ccw_device *ccwdev;
1506  	struct channel_path_desc_fmt0 *chp_dsc;
1507  
1508  	QETH_CARD_TEXT(card, 2, "chp_desc");
1509  
1510  	ccwdev = card->data.ccwdev;
1511  	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1512  	if (!chp_dsc)
1513  		return -ENOMEM;
1514  
1515  	card->info.func_level = 0x4100 + chp_dsc->desc;
1516  
1517  	if (IS_OSD(card) || IS_OSX(card))
1518  		/* CHPP field bit 6 == 1 -> single queue */
1519  		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1520  
1521  	kfree(chp_dsc);
1522  	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1523  	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1524  	return 0;
1525  }
1526  
qeth_init_qdio_info(struct qeth_card * card)1527  static void qeth_init_qdio_info(struct qeth_card *card)
1528  {
1529  	QETH_CARD_TEXT(card, 4, "intqdinf");
1530  	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1531  	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1532  	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1533  
1534  	/* inbound */
1535  	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1536  	if (IS_IQD(card))
1537  		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1538  	else
1539  		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1540  	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1541  	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1542  	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1543  }
1544  
qeth_set_initial_options(struct qeth_card * card)1545  static void qeth_set_initial_options(struct qeth_card *card)
1546  {
1547  	card->options.route4.type = NO_ROUTER;
1548  	card->options.route6.type = NO_ROUTER;
1549  	card->options.isolation = ISOLATION_MODE_NONE;
1550  	card->options.cq = QETH_CQ_DISABLED;
1551  	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1552  }
1553  
qeth_do_start_thread(struct qeth_card * card,unsigned long thread)1554  static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1555  {
1556  	unsigned long flags;
1557  	int rc = 0;
1558  
1559  	spin_lock_irqsave(&card->thread_mask_lock, flags);
1560  	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1561  			(u8) card->thread_start_mask,
1562  			(u8) card->thread_allowed_mask,
1563  			(u8) card->thread_running_mask);
1564  	rc = (card->thread_start_mask & thread);
1565  	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1566  	return rc;
1567  }
1568  
1569  static int qeth_do_reset(void *data);
qeth_start_kernel_thread(struct work_struct * work)1570  static void qeth_start_kernel_thread(struct work_struct *work)
1571  {
1572  	struct task_struct *ts;
1573  	struct qeth_card *card = container_of(work, struct qeth_card,
1574  					kernel_thread_starter);
1575  	QETH_CARD_TEXT(card, 2, "strthrd");
1576  
1577  	if (card->read.state != CH_STATE_UP &&
1578  	    card->write.state != CH_STATE_UP)
1579  		return;
1580  	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1581  		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1582  		if (IS_ERR(ts)) {
1583  			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1584  			qeth_clear_thread_running_bit(card,
1585  				QETH_RECOVER_THREAD);
1586  		}
1587  	}
1588  }
1589  
1590  static void qeth_buffer_reclaim_work(struct work_struct *);
qeth_setup_card(struct qeth_card * card)1591  static void qeth_setup_card(struct qeth_card *card)
1592  {
1593  	QETH_CARD_TEXT(card, 2, "setupcrd");
1594  
1595  	card->info.type = CARD_RDEV(card)->id.driver_info;
1596  	card->state = CARD_STATE_DOWN;
1597  	spin_lock_init(&card->lock);
1598  	spin_lock_init(&card->thread_mask_lock);
1599  	mutex_init(&card->conf_mutex);
1600  	mutex_init(&card->discipline_mutex);
1601  	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1602  	INIT_LIST_HEAD(&card->cmd_waiter_list);
1603  	init_waitqueue_head(&card->wait_q);
1604  	qeth_set_initial_options(card);
1605  	/* IP address takeover */
1606  	INIT_LIST_HEAD(&card->ipato.entries);
1607  	qeth_init_qdio_info(card);
1608  	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1609  	hash_init(card->rx_mode_addrs);
1610  	hash_init(card->local_addrs4);
1611  	hash_init(card->local_addrs6);
1612  	spin_lock_init(&card->local_addrs4_lock);
1613  	spin_lock_init(&card->local_addrs6_lock);
1614  }
1615  
qeth_core_sl_print(struct seq_file * m,struct service_level * slr)1616  static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1617  {
1618  	struct qeth_card *card = container_of(slr, struct qeth_card,
1619  					qeth_service_level);
1620  	if (card->info.mcl_level[0])
1621  		seq_printf(m, "qeth: %s firmware level %s\n",
1622  			CARD_BUS_ID(card), card->info.mcl_level);
1623  }
1624  
qeth_alloc_card(struct ccwgroup_device * gdev)1625  static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1626  {
1627  	struct qeth_card *card;
1628  
1629  	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1630  	card = kzalloc(sizeof(*card), GFP_KERNEL);
1631  	if (!card)
1632  		goto out;
1633  	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1634  
1635  	card->gdev = gdev;
1636  	dev_set_drvdata(&gdev->dev, card);
1637  	CARD_RDEV(card) = gdev->cdev[0];
1638  	CARD_WDEV(card) = gdev->cdev[1];
1639  	CARD_DDEV(card) = gdev->cdev[2];
1640  
1641  	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1642  						 dev_name(&gdev->dev));
1643  	if (!card->event_wq)
1644  		goto out_wq;
1645  
1646  	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1647  	if (!card->read_cmd)
1648  		goto out_read_cmd;
1649  
1650  	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1651  					   qeth_debugfs_root);
1652  	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1653  			    &qeth_debugfs_local_addr_fops);
1654  
1655  	card->qeth_service_level.seq_print = qeth_core_sl_print;
1656  	register_service_level(&card->qeth_service_level);
1657  	return card;
1658  
1659  out_read_cmd:
1660  	destroy_workqueue(card->event_wq);
1661  out_wq:
1662  	dev_set_drvdata(&gdev->dev, NULL);
1663  	kfree(card);
1664  out:
1665  	return NULL;
1666  }
1667  
qeth_clear_channel(struct qeth_card * card,struct qeth_channel * channel)1668  static int qeth_clear_channel(struct qeth_card *card,
1669  			      struct qeth_channel *channel)
1670  {
1671  	int rc;
1672  
1673  	QETH_CARD_TEXT(card, 3, "clearch");
1674  	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1675  	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1676  	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1677  
1678  	if (rc)
1679  		return rc;
1680  	rc = wait_event_interruptible_timeout(card->wait_q,
1681  			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1682  	if (rc == -ERESTARTSYS)
1683  		return rc;
1684  	if (channel->state != CH_STATE_STOPPED)
1685  		return -ETIME;
1686  	channel->state = CH_STATE_DOWN;
1687  	return 0;
1688  }
1689  
qeth_halt_channel(struct qeth_card * card,struct qeth_channel * channel)1690  static int qeth_halt_channel(struct qeth_card *card,
1691  			     struct qeth_channel *channel)
1692  {
1693  	int rc;
1694  
1695  	QETH_CARD_TEXT(card, 3, "haltch");
1696  	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1697  	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1698  	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1699  
1700  	if (rc)
1701  		return rc;
1702  	rc = wait_event_interruptible_timeout(card->wait_q,
1703  			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1704  	if (rc == -ERESTARTSYS)
1705  		return rc;
1706  	if (channel->state != CH_STATE_HALTED)
1707  		return -ETIME;
1708  	return 0;
1709  }
1710  
qeth_stop_channel(struct qeth_channel * channel)1711  static int qeth_stop_channel(struct qeth_channel *channel)
1712  {
1713  	struct ccw_device *cdev = channel->ccwdev;
1714  	int rc;
1715  
1716  	rc = ccw_device_set_offline(cdev);
1717  
1718  	spin_lock_irq(get_ccwdev_lock(cdev));
1719  	if (channel->active_cmd)
1720  		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1721  			channel->active_cmd);
1722  
1723  	cdev->handler = NULL;
1724  	spin_unlock_irq(get_ccwdev_lock(cdev));
1725  
1726  	return rc;
1727  }
1728  
qeth_start_channel(struct qeth_channel * channel)1729  static int qeth_start_channel(struct qeth_channel *channel)
1730  {
1731  	struct ccw_device *cdev = channel->ccwdev;
1732  	int rc;
1733  
1734  	channel->state = CH_STATE_DOWN;
1735  	xchg(&channel->active_cmd, NULL);
1736  
1737  	spin_lock_irq(get_ccwdev_lock(cdev));
1738  	cdev->handler = qeth_irq;
1739  	spin_unlock_irq(get_ccwdev_lock(cdev));
1740  
1741  	rc = ccw_device_set_online(cdev);
1742  	if (rc)
1743  		goto err;
1744  
1745  	return 0;
1746  
1747  err:
1748  	spin_lock_irq(get_ccwdev_lock(cdev));
1749  	cdev->handler = NULL;
1750  	spin_unlock_irq(get_ccwdev_lock(cdev));
1751  	return rc;
1752  }
1753  
qeth_halt_channels(struct qeth_card * card)1754  static int qeth_halt_channels(struct qeth_card *card)
1755  {
1756  	int rc1 = 0, rc2 = 0, rc3 = 0;
1757  
1758  	QETH_CARD_TEXT(card, 3, "haltchs");
1759  	rc1 = qeth_halt_channel(card, &card->read);
1760  	rc2 = qeth_halt_channel(card, &card->write);
1761  	rc3 = qeth_halt_channel(card, &card->data);
1762  	if (rc1)
1763  		return rc1;
1764  	if (rc2)
1765  		return rc2;
1766  	return rc3;
1767  }
1768  
qeth_clear_channels(struct qeth_card * card)1769  static int qeth_clear_channels(struct qeth_card *card)
1770  {
1771  	int rc1 = 0, rc2 = 0, rc3 = 0;
1772  
1773  	QETH_CARD_TEXT(card, 3, "clearchs");
1774  	rc1 = qeth_clear_channel(card, &card->read);
1775  	rc2 = qeth_clear_channel(card, &card->write);
1776  	rc3 = qeth_clear_channel(card, &card->data);
1777  	if (rc1)
1778  		return rc1;
1779  	if (rc2)
1780  		return rc2;
1781  	return rc3;
1782  }
1783  
qeth_clear_halt_card(struct qeth_card * card,int halt)1784  static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1785  {
1786  	int rc = 0;
1787  
1788  	QETH_CARD_TEXT(card, 3, "clhacrd");
1789  
1790  	if (halt)
1791  		rc = qeth_halt_channels(card);
1792  	if (rc)
1793  		return rc;
1794  	return qeth_clear_channels(card);
1795  }
1796  
qeth_qdio_clear_card(struct qeth_card * card,int use_halt)1797  static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1798  {
1799  	int rc = 0;
1800  
1801  	QETH_CARD_TEXT(card, 3, "qdioclr");
1802  	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1803  		QETH_QDIO_CLEANING)) {
1804  	case QETH_QDIO_ESTABLISHED:
1805  		if (IS_IQD(card))
1806  			rc = qdio_shutdown(CARD_DDEV(card),
1807  				QDIO_FLAG_CLEANUP_USING_HALT);
1808  		else
1809  			rc = qdio_shutdown(CARD_DDEV(card),
1810  				QDIO_FLAG_CLEANUP_USING_CLEAR);
1811  		if (rc)
1812  			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1813  		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1814  		break;
1815  	case QETH_QDIO_CLEANING:
1816  		return rc;
1817  	default:
1818  		break;
1819  	}
1820  	rc = qeth_clear_halt_card(card, use_halt);
1821  	if (rc)
1822  		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1823  	return rc;
1824  }
1825  
qeth_vm_detect_layer(struct qeth_card * card)1826  static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1827  {
1828  	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1829  	struct diag26c_vnic_resp *response = NULL;
1830  	struct diag26c_vnic_req *request = NULL;
1831  	struct ccw_dev_id id;
1832  	char userid[80];
1833  	int rc = 0;
1834  
1835  	QETH_CARD_TEXT(card, 2, "vmlayer");
1836  
1837  	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1838  	if (rc)
1839  		goto out;
1840  
1841  	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1842  	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1843  	if (!request || !response) {
1844  		rc = -ENOMEM;
1845  		goto out;
1846  	}
1847  
1848  	ccw_device_get_id(CARD_RDEV(card), &id);
1849  	request->resp_buf_len = sizeof(*response);
1850  	request->resp_version = DIAG26C_VERSION6_VM65918;
1851  	request->req_format = DIAG26C_VNIC_INFO;
1852  	ASCEBC(userid, 8);
1853  	memcpy(&request->sys_name, userid, 8);
1854  	request->devno = id.devno;
1855  
1856  	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1857  	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1858  	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1859  	if (rc)
1860  		goto out;
1861  	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1862  
1863  	if (request->resp_buf_len < sizeof(*response) ||
1864  	    response->version != request->resp_version) {
1865  		rc = -EIO;
1866  		goto out;
1867  	}
1868  
1869  	if (response->protocol == VNIC_INFO_PROT_L2)
1870  		disc = QETH_DISCIPLINE_LAYER2;
1871  	else if (response->protocol == VNIC_INFO_PROT_L3)
1872  		disc = QETH_DISCIPLINE_LAYER3;
1873  
1874  out:
1875  	kfree(response);
1876  	kfree(request);
1877  	if (rc)
1878  		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1879  	return disc;
1880  }
1881  
1882  /* Determine whether the device requires a specific layer discipline */
qeth_enforce_discipline(struct qeth_card * card)1883  static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1884  {
1885  	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1886  
1887  	if (IS_OSM(card))
1888  		disc = QETH_DISCIPLINE_LAYER2;
1889  	else if (IS_VM_NIC(card))
1890  		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1891  				      qeth_vm_detect_layer(card);
1892  
1893  	switch (disc) {
1894  	case QETH_DISCIPLINE_LAYER2:
1895  		QETH_CARD_TEXT(card, 3, "force l2");
1896  		break;
1897  	case QETH_DISCIPLINE_LAYER3:
1898  		QETH_CARD_TEXT(card, 3, "force l3");
1899  		break;
1900  	default:
1901  		QETH_CARD_TEXT(card, 3, "force no");
1902  	}
1903  
1904  	return disc;
1905  }
1906  
qeth_set_blkt_defaults(struct qeth_card * card)1907  static void qeth_set_blkt_defaults(struct qeth_card *card)
1908  {
1909  	QETH_CARD_TEXT(card, 2, "cfgblkt");
1910  
1911  	if (card->info.use_v1_blkt) {
1912  		card->info.blkt.time_total = 0;
1913  		card->info.blkt.inter_packet = 0;
1914  		card->info.blkt.inter_packet_jumbo = 0;
1915  	} else {
1916  		card->info.blkt.time_total = 250;
1917  		card->info.blkt.inter_packet = 5;
1918  		card->info.blkt.inter_packet_jumbo = 15;
1919  	}
1920  }
1921  
qeth_idx_init(struct qeth_card * card)1922  static void qeth_idx_init(struct qeth_card *card)
1923  {
1924  	memset(&card->seqno, 0, sizeof(card->seqno));
1925  
1926  	card->token.issuer_rm_w = 0x00010103UL;
1927  	card->token.cm_filter_w = 0x00010108UL;
1928  	card->token.cm_connection_w = 0x0001010aUL;
1929  	card->token.ulp_filter_w = 0x0001010bUL;
1930  	card->token.ulp_connection_w = 0x0001010dUL;
1931  
1932  	switch (card->info.type) {
1933  	case QETH_CARD_TYPE_IQD:
1934  		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1935  		break;
1936  	case QETH_CARD_TYPE_OSD:
1937  		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1938  		break;
1939  	default:
1940  		break;
1941  	}
1942  }
1943  
qeth_idx_finalize_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob)1944  static void qeth_idx_finalize_cmd(struct qeth_card *card,
1945  				  struct qeth_cmd_buffer *iob)
1946  {
1947  	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1948  	       QETH_SEQ_NO_LENGTH);
1949  	if (iob->channel == &card->write)
1950  		card->seqno.trans_hdr++;
1951  }
1952  
qeth_peer_func_level(int level)1953  static int qeth_peer_func_level(int level)
1954  {
1955  	if ((level & 0xff) == 8)
1956  		return (level & 0xff) + 0x400;
1957  	if (((level >> 8) & 3) == 1)
1958  		return (level & 0xff) + 0x200;
1959  	return level;
1960  }
1961  
qeth_mpc_finalize_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob)1962  static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1963  				  struct qeth_cmd_buffer *iob)
1964  {
1965  	qeth_idx_finalize_cmd(card, iob);
1966  
1967  	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1968  	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1969  	card->seqno.pdu_hdr++;
1970  	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1971  	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1972  
1973  	iob->callback = qeth_release_buffer_cb;
1974  }
1975  
qeth_mpc_match_reply(struct qeth_cmd_buffer * iob,struct qeth_cmd_buffer * reply)1976  static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1977  				 struct qeth_cmd_buffer *reply)
1978  {
1979  	/* MPC cmds are issued strictly in sequence. */
1980  	return !IS_IPA(reply->data);
1981  }
1982  
qeth_mpc_alloc_cmd(struct qeth_card * card,const void * data,unsigned int data_length)1983  static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1984  						  const void *data,
1985  						  unsigned int data_length)
1986  {
1987  	struct qeth_cmd_buffer *iob;
1988  
1989  	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1990  	if (!iob)
1991  		return NULL;
1992  
1993  	memcpy(iob->data, data, data_length);
1994  	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1995  		       iob->data);
1996  	iob->finalize = qeth_mpc_finalize_cmd;
1997  	iob->match = qeth_mpc_match_reply;
1998  	return iob;
1999  }
2000  
2001  /**
2002   * qeth_send_control_data() -	send control command to the card
2003   * @card:			qeth_card structure pointer
2004   * @iob:			qeth_cmd_buffer pointer
2005   * @reply_cb:			callback function pointer
2006   *  cb_card:			pointer to the qeth_card structure
2007   *  cb_reply:			pointer to the qeth_reply structure
2008   *  cb_cmd:			pointer to the original iob for non-IPA
2009   *				commands, or to the qeth_ipa_cmd structure
2010   *				for the IPA commands.
2011   * @reply_param:		private pointer passed to the callback
2012   *
2013   * Callback function gets called one or more times, with cb_cmd
2014   * pointing to the response returned by the hardware. Callback
2015   * function must return
2016   *   > 0 if more reply blocks are expected,
2017   *     0 if the last or only reply block is received, and
2018   *   < 0 on error.
2019   * Callback function can get the value of the reply_param pointer from the
2020   * field 'param' of the structure qeth_reply.
2021   */
2022  
qeth_send_control_data(struct qeth_card * card,struct qeth_cmd_buffer * iob,int (* reply_cb)(struct qeth_card * cb_card,struct qeth_reply * cb_reply,unsigned long cb_cmd),void * reply_param)2023  static int qeth_send_control_data(struct qeth_card *card,
2024  				  struct qeth_cmd_buffer *iob,
2025  				  int (*reply_cb)(struct qeth_card *cb_card,
2026  						  struct qeth_reply *cb_reply,
2027  						  unsigned long cb_cmd),
2028  				  void *reply_param)
2029  {
2030  	struct qeth_channel *channel = iob->channel;
2031  	struct qeth_reply *reply = &iob->reply;
2032  	long timeout = iob->timeout;
2033  	int rc;
2034  
2035  	QETH_CARD_TEXT(card, 2, "sendctl");
2036  
2037  	reply->callback = reply_cb;
2038  	reply->param = reply_param;
2039  
2040  	timeout = wait_event_interruptible_timeout(card->wait_q,
2041  						   qeth_trylock_channel(channel, iob),
2042  						   timeout);
2043  	if (timeout <= 0) {
2044  		qeth_put_cmd(iob);
2045  		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2046  	}
2047  
2048  	if (iob->finalize)
2049  		iob->finalize(card, iob);
2050  	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2051  
2052  	qeth_enqueue_cmd(card, iob);
2053  
2054  	/* This pairs with iob->callback, and keeps the iob alive after IO: */
2055  	qeth_get_cmd(iob);
2056  
2057  	QETH_CARD_TEXT(card, 6, "noirqpnd");
2058  	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2059  	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2060  				      (addr_t) iob, 0, 0, timeout);
2061  	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2062  	if (rc) {
2063  		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2064  				 CARD_DEVID(card), rc);
2065  		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2066  		qeth_dequeue_cmd(card, iob);
2067  		qeth_put_cmd(iob);
2068  		qeth_unlock_channel(card, channel);
2069  		goto out;
2070  	}
2071  
2072  	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2073  							    timeout);
2074  	if (timeout <= 0)
2075  		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2076  
2077  	qeth_dequeue_cmd(card, iob);
2078  
2079  	if (reply_cb) {
2080  		/* Wait until the callback for a late reply has completed: */
2081  		spin_lock_irq(&iob->lock);
2082  		if (rc)
2083  			/* Zap any callback that's still pending: */
2084  			iob->rc = rc;
2085  		spin_unlock_irq(&iob->lock);
2086  	}
2087  
2088  	if (!rc)
2089  		rc = iob->rc;
2090  
2091  out:
2092  	qeth_put_cmd(iob);
2093  	return rc;
2094  }
2095  
2096  struct qeth_node_desc {
2097  	struct node_descriptor nd1;
2098  	struct node_descriptor nd2;
2099  	struct node_descriptor nd3;
2100  };
2101  
qeth_read_conf_data_cb(struct qeth_card * card,struct qeth_cmd_buffer * iob,unsigned int data_length)2102  static void qeth_read_conf_data_cb(struct qeth_card *card,
2103  				   struct qeth_cmd_buffer *iob,
2104  				   unsigned int data_length)
2105  {
2106  	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2107  	int rc = 0;
2108  	u8 *tag;
2109  
2110  	QETH_CARD_TEXT(card, 2, "cfgunit");
2111  
2112  	if (data_length < sizeof(*nd)) {
2113  		rc = -EINVAL;
2114  		goto out;
2115  	}
2116  
2117  	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2118  			       nd->nd1.plant[1] == _ascebc['M'];
2119  	tag = (u8 *)&nd->nd1.tag;
2120  	card->info.chpid = tag[0];
2121  	card->info.unit_addr2 = tag[1];
2122  
2123  	tag = (u8 *)&nd->nd2.tag;
2124  	card->info.cula = tag[1];
2125  
2126  	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2127  				 nd->nd3.model[1] == 0xF0 &&
2128  				 nd->nd3.model[2] >= 0xF1 &&
2129  				 nd->nd3.model[2] <= 0xF4;
2130  
2131  out:
2132  	qeth_notify_cmd(iob, rc);
2133  	qeth_put_cmd(iob);
2134  }
2135  
qeth_read_conf_data(struct qeth_card * card)2136  static int qeth_read_conf_data(struct qeth_card *card)
2137  {
2138  	struct qeth_channel *channel = &card->data;
2139  	struct qeth_cmd_buffer *iob;
2140  	struct ciw *ciw;
2141  
2142  	/* scan for RCD command in extended SenseID data */
2143  	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2144  	if (!ciw || ciw->cmd == 0)
2145  		return -EOPNOTSUPP;
2146  	if (ciw->count < sizeof(struct qeth_node_desc))
2147  		return -EINVAL;
2148  
2149  	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2150  	if (!iob)
2151  		return -ENOMEM;
2152  
2153  	iob->callback = qeth_read_conf_data_cb;
2154  	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2155  		       iob->data);
2156  
2157  	return qeth_send_control_data(card, iob, NULL, NULL);
2158  }
2159  
qeth_idx_check_activate_response(struct qeth_card * card,struct qeth_channel * channel,struct qeth_cmd_buffer * iob)2160  static int qeth_idx_check_activate_response(struct qeth_card *card,
2161  					    struct qeth_channel *channel,
2162  					    struct qeth_cmd_buffer *iob)
2163  {
2164  	int rc;
2165  
2166  	rc = qeth_check_idx_response(card, iob->data);
2167  	if (rc)
2168  		return rc;
2169  
2170  	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2171  		return 0;
2172  
2173  	/* negative reply: */
2174  	QETH_CARD_TEXT_(card, 2, "idxneg%c",
2175  			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2176  
2177  	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2178  	case QETH_IDX_ACT_ERR_EXCL:
2179  		dev_err(&channel->ccwdev->dev,
2180  			"The adapter is used exclusively by another host\n");
2181  		return -EBUSY;
2182  	case QETH_IDX_ACT_ERR_AUTH:
2183  	case QETH_IDX_ACT_ERR_AUTH_USER:
2184  		dev_err(&channel->ccwdev->dev,
2185  			"Setting the device online failed because of insufficient authorization\n");
2186  		return -EPERM;
2187  	default:
2188  		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2189  				 CCW_DEVID(channel->ccwdev));
2190  		return -EIO;
2191  	}
2192  }
2193  
qeth_idx_activate_read_channel_cb(struct qeth_card * card,struct qeth_cmd_buffer * iob,unsigned int data_length)2194  static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2195  					      struct qeth_cmd_buffer *iob,
2196  					      unsigned int data_length)
2197  {
2198  	struct qeth_channel *channel = iob->channel;
2199  	u16 peer_level;
2200  	int rc;
2201  
2202  	QETH_CARD_TEXT(card, 2, "idxrdcb");
2203  
2204  	rc = qeth_idx_check_activate_response(card, channel, iob);
2205  	if (rc)
2206  		goto out;
2207  
2208  	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2209  	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2210  		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2211  				 CCW_DEVID(channel->ccwdev),
2212  				 card->info.func_level, peer_level);
2213  		rc = -EINVAL;
2214  		goto out;
2215  	}
2216  
2217  	memcpy(&card->token.issuer_rm_r,
2218  	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2219  	       QETH_MPC_TOKEN_LENGTH);
2220  	memcpy(&card->info.mcl_level[0],
2221  	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2222  
2223  out:
2224  	qeth_notify_cmd(iob, rc);
2225  	qeth_put_cmd(iob);
2226  }
2227  
qeth_idx_activate_write_channel_cb(struct qeth_card * card,struct qeth_cmd_buffer * iob,unsigned int data_length)2228  static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2229  					       struct qeth_cmd_buffer *iob,
2230  					       unsigned int data_length)
2231  {
2232  	struct qeth_channel *channel = iob->channel;
2233  	u16 peer_level;
2234  	int rc;
2235  
2236  	QETH_CARD_TEXT(card, 2, "idxwrcb");
2237  
2238  	rc = qeth_idx_check_activate_response(card, channel, iob);
2239  	if (rc)
2240  		goto out;
2241  
2242  	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2243  	if ((peer_level & ~0x0100) !=
2244  	    qeth_peer_func_level(card->info.func_level)) {
2245  		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2246  				 CCW_DEVID(channel->ccwdev),
2247  				 card->info.func_level, peer_level);
2248  		rc = -EINVAL;
2249  	}
2250  
2251  out:
2252  	qeth_notify_cmd(iob, rc);
2253  	qeth_put_cmd(iob);
2254  }
2255  
qeth_idx_setup_activate_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob)2256  static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2257  					struct qeth_cmd_buffer *iob)
2258  {
2259  	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2260  	u8 port = ((u8)card->dev->dev_port) | 0x80;
2261  	struct ccw1 *ccw = __ccw_from_cmd(iob);
2262  
2263  	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2264  		       iob->data);
2265  	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2266  	iob->finalize = qeth_idx_finalize_cmd;
2267  
2268  	port |= QETH_IDX_ACT_INVAL_FRAME;
2269  	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2270  	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2271  	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2272  	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2273  	       &card->info.func_level, 2);
2274  	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2275  	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2276  }
2277  
qeth_idx_activate_read_channel(struct qeth_card * card)2278  static int qeth_idx_activate_read_channel(struct qeth_card *card)
2279  {
2280  	struct qeth_channel *channel = &card->read;
2281  	struct qeth_cmd_buffer *iob;
2282  	int rc;
2283  
2284  	QETH_CARD_TEXT(card, 2, "idxread");
2285  
2286  	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2287  	if (!iob)
2288  		return -ENOMEM;
2289  
2290  	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2291  	qeth_idx_setup_activate_cmd(card, iob);
2292  	iob->callback = qeth_idx_activate_read_channel_cb;
2293  
2294  	rc = qeth_send_control_data(card, iob, NULL, NULL);
2295  	if (rc)
2296  		return rc;
2297  
2298  	channel->state = CH_STATE_UP;
2299  	return 0;
2300  }
2301  
qeth_idx_activate_write_channel(struct qeth_card * card)2302  static int qeth_idx_activate_write_channel(struct qeth_card *card)
2303  {
2304  	struct qeth_channel *channel = &card->write;
2305  	struct qeth_cmd_buffer *iob;
2306  	int rc;
2307  
2308  	QETH_CARD_TEXT(card, 2, "idxwrite");
2309  
2310  	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2311  	if (!iob)
2312  		return -ENOMEM;
2313  
2314  	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2315  	qeth_idx_setup_activate_cmd(card, iob);
2316  	iob->callback = qeth_idx_activate_write_channel_cb;
2317  
2318  	rc = qeth_send_control_data(card, iob, NULL, NULL);
2319  	if (rc)
2320  		return rc;
2321  
2322  	channel->state = CH_STATE_UP;
2323  	return 0;
2324  }
2325  
qeth_cm_enable_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)2326  static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2327  		unsigned long data)
2328  {
2329  	struct qeth_cmd_buffer *iob;
2330  
2331  	QETH_CARD_TEXT(card, 2, "cmenblcb");
2332  
2333  	iob = (struct qeth_cmd_buffer *) data;
2334  	memcpy(&card->token.cm_filter_r,
2335  	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2336  	       QETH_MPC_TOKEN_LENGTH);
2337  	return 0;
2338  }
2339  
qeth_cm_enable(struct qeth_card * card)2340  static int qeth_cm_enable(struct qeth_card *card)
2341  {
2342  	struct qeth_cmd_buffer *iob;
2343  
2344  	QETH_CARD_TEXT(card, 2, "cmenable");
2345  
2346  	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2347  	if (!iob)
2348  		return -ENOMEM;
2349  
2350  	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2351  	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2352  	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2353  	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2354  
2355  	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2356  }
2357  
qeth_cm_setup_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)2358  static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2359  		unsigned long data)
2360  {
2361  	struct qeth_cmd_buffer *iob;
2362  
2363  	QETH_CARD_TEXT(card, 2, "cmsetpcb");
2364  
2365  	iob = (struct qeth_cmd_buffer *) data;
2366  	memcpy(&card->token.cm_connection_r,
2367  	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2368  	       QETH_MPC_TOKEN_LENGTH);
2369  	return 0;
2370  }
2371  
qeth_cm_setup(struct qeth_card * card)2372  static int qeth_cm_setup(struct qeth_card *card)
2373  {
2374  	struct qeth_cmd_buffer *iob;
2375  
2376  	QETH_CARD_TEXT(card, 2, "cmsetup");
2377  
2378  	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2379  	if (!iob)
2380  		return -ENOMEM;
2381  
2382  	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2383  	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2384  	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2385  	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2386  	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2387  	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2388  	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2389  }
2390  
qeth_is_supported_link_type(struct qeth_card * card,u8 link_type)2391  static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2392  {
2393  	if (link_type == QETH_LINK_TYPE_LANE_TR ||
2394  	    link_type == QETH_LINK_TYPE_HSTR) {
2395  		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2396  		return false;
2397  	}
2398  
2399  	return true;
2400  }
2401  
qeth_update_max_mtu(struct qeth_card * card,unsigned int max_mtu)2402  static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2403  {
2404  	struct net_device *dev = card->dev;
2405  	unsigned int new_mtu;
2406  
2407  	if (!max_mtu) {
2408  		/* IQD needs accurate max MTU to set up its RX buffers: */
2409  		if (IS_IQD(card))
2410  			return -EINVAL;
2411  		/* tolerate quirky HW: */
2412  		max_mtu = ETH_MAX_MTU;
2413  	}
2414  
2415  	rtnl_lock();
2416  	if (IS_IQD(card)) {
2417  		/* move any device with default MTU to new max MTU: */
2418  		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2419  
2420  		/* adjust RX buffer size to new max MTU: */
2421  		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2422  		if (dev->max_mtu && dev->max_mtu != max_mtu)
2423  			qeth_free_qdio_queues(card);
2424  	} else {
2425  		if (dev->mtu)
2426  			new_mtu = dev->mtu;
2427  		/* default MTUs for first setup: */
2428  		else if (IS_LAYER2(card))
2429  			new_mtu = ETH_DATA_LEN;
2430  		else
2431  			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2432  	}
2433  
2434  	dev->max_mtu = max_mtu;
2435  	dev->mtu = min(new_mtu, max_mtu);
2436  	rtnl_unlock();
2437  	return 0;
2438  }
2439  
qeth_get_mtu_outof_framesize(int framesize)2440  static int qeth_get_mtu_outof_framesize(int framesize)
2441  {
2442  	switch (framesize) {
2443  	case 0x4000:
2444  		return 8192;
2445  	case 0x6000:
2446  		return 16384;
2447  	case 0xa000:
2448  		return 32768;
2449  	case 0xffff:
2450  		return 57344;
2451  	default:
2452  		return 0;
2453  	}
2454  }
2455  
qeth_ulp_enable_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)2456  static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2457  		unsigned long data)
2458  {
2459  	__u16 mtu, framesize;
2460  	__u16 len;
2461  	struct qeth_cmd_buffer *iob;
2462  	u8 link_type = 0;
2463  
2464  	QETH_CARD_TEXT(card, 2, "ulpenacb");
2465  
2466  	iob = (struct qeth_cmd_buffer *) data;
2467  	memcpy(&card->token.ulp_filter_r,
2468  	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2469  	       QETH_MPC_TOKEN_LENGTH);
2470  	if (IS_IQD(card)) {
2471  		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2472  		mtu = qeth_get_mtu_outof_framesize(framesize);
2473  	} else {
2474  		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2475  	}
2476  	*(u16 *)reply->param = mtu;
2477  
2478  	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2479  	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2480  		memcpy(&link_type,
2481  		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2482  		if (!qeth_is_supported_link_type(card, link_type))
2483  			return -EPROTONOSUPPORT;
2484  	}
2485  
2486  	card->info.link_type = link_type;
2487  	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2488  	return 0;
2489  }
2490  
qeth_mpc_select_prot_type(struct qeth_card * card)2491  static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2492  {
2493  	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2494  }
2495  
qeth_ulp_enable(struct qeth_card * card)2496  static int qeth_ulp_enable(struct qeth_card *card)
2497  {
2498  	u8 prot_type = qeth_mpc_select_prot_type(card);
2499  	struct qeth_cmd_buffer *iob;
2500  	u16 max_mtu;
2501  	int rc;
2502  
2503  	QETH_CARD_TEXT(card, 2, "ulpenabl");
2504  
2505  	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2506  	if (!iob)
2507  		return -ENOMEM;
2508  
2509  	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2510  	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2511  	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2512  	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2513  	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2514  	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2515  	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2516  	if (rc)
2517  		return rc;
2518  	return qeth_update_max_mtu(card, max_mtu);
2519  }
2520  
qeth_ulp_setup_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)2521  static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2522  		unsigned long data)
2523  {
2524  	struct qeth_cmd_buffer *iob;
2525  
2526  	QETH_CARD_TEXT(card, 2, "ulpstpcb");
2527  
2528  	iob = (struct qeth_cmd_buffer *) data;
2529  	memcpy(&card->token.ulp_connection_r,
2530  	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2531  	       QETH_MPC_TOKEN_LENGTH);
2532  	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2533  		     3)) {
2534  		QETH_CARD_TEXT(card, 2, "olmlimit");
2535  		dev_err(&card->gdev->dev, "A connection could not be "
2536  			"established because of an OLM limit\n");
2537  		return -EMLINK;
2538  	}
2539  	return 0;
2540  }
2541  
qeth_ulp_setup(struct qeth_card * card)2542  static int qeth_ulp_setup(struct qeth_card *card)
2543  {
2544  	__u16 temp;
2545  	struct qeth_cmd_buffer *iob;
2546  
2547  	QETH_CARD_TEXT(card, 2, "ulpsetup");
2548  
2549  	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2550  	if (!iob)
2551  		return -ENOMEM;
2552  
2553  	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2554  	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2555  	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2556  	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2557  	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2558  	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2559  
2560  	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2561  	temp = (card->info.cula << 8) + card->info.unit_addr2;
2562  	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2563  	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2564  }
2565  
qeth_alloc_out_buf(struct qeth_qdio_out_q * q,unsigned int bidx,gfp_t gfp)2566  static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
2567  			      gfp_t gfp)
2568  {
2569  	struct qeth_qdio_out_buffer *newbuf;
2570  
2571  	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2572  	if (!newbuf)
2573  		return -ENOMEM;
2574  
2575  	newbuf->buffer = q->qdio_bufs[bidx];
2576  	skb_queue_head_init(&newbuf->skb_list);
2577  	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2578  	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2579  	q->bufs[bidx] = newbuf;
2580  	return 0;
2581  }
2582  
qeth_free_output_queue(struct qeth_qdio_out_q * q)2583  static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2584  {
2585  	if (!q)
2586  		return;
2587  
2588  	qeth_drain_output_queue(q, true);
2589  	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2590  	kfree(q);
2591  }
2592  
qeth_alloc_output_queue(void)2593  static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2594  {
2595  	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2596  	unsigned int i;
2597  
2598  	if (!q)
2599  		return NULL;
2600  
2601  	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2602  		goto err_qdio_bufs;
2603  
2604  	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2605  		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2606  			goto err_out_bufs;
2607  	}
2608  
2609  	return q;
2610  
2611  err_out_bufs:
2612  	while (i > 0)
2613  		qeth_free_out_buf(q->bufs[--i]);
2614  	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2615  err_qdio_bufs:
2616  	kfree(q);
2617  	return NULL;
2618  }
2619  
qeth_tx_completion_timer(struct timer_list * timer)2620  static void qeth_tx_completion_timer(struct timer_list *timer)
2621  {
2622  	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2623  
2624  	napi_schedule(&queue->napi);
2625  	QETH_TXQ_STAT_INC(queue, completion_timer);
2626  }
2627  
qeth_alloc_qdio_queues(struct qeth_card * card)2628  static int qeth_alloc_qdio_queues(struct qeth_card *card)
2629  {
2630  	unsigned int i;
2631  
2632  	QETH_CARD_TEXT(card, 2, "allcqdbf");
2633  
2634  	/* completion */
2635  	if (qeth_alloc_cq(card))
2636  		goto out_err;
2637  
2638  	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2639  		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2640  		return 0;
2641  
2642  	/* inbound buffer pool */
2643  	if (qeth_alloc_buffer_pool(card))
2644  		goto out_buffer_pool;
2645  
2646  	/* outbound */
2647  	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2648  		struct qeth_qdio_out_q *queue;
2649  
2650  		queue = qeth_alloc_output_queue();
2651  		if (!queue)
2652  			goto out_freeoutq;
2653  		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2654  		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2655  		card->qdio.out_qs[i] = queue;
2656  		queue->card = card;
2657  		queue->queue_no = i;
2658  		INIT_LIST_HEAD(&queue->pending_bufs);
2659  		spin_lock_init(&queue->lock);
2660  		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2661  		if (IS_IQD(card)) {
2662  			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2663  			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2664  			queue->rescan_usecs = QETH_TX_TIMER_USECS;
2665  		} else {
2666  			queue->coalesce_usecs = USEC_PER_SEC;
2667  			queue->max_coalesced_frames = 0;
2668  			queue->rescan_usecs = 10 * USEC_PER_SEC;
2669  		}
2670  		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2671  	}
2672  
2673  	return 0;
2674  
2675  out_freeoutq:
2676  	while (i > 0) {
2677  		qeth_free_output_queue(card->qdio.out_qs[--i]);
2678  		card->qdio.out_qs[i] = NULL;
2679  	}
2680  	qeth_free_buffer_pool(card);
2681  out_buffer_pool:
2682  	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2683  	qeth_free_cq(card);
2684  out_err:
2685  	return -ENOMEM;
2686  }
2687  
qeth_free_qdio_queues(struct qeth_card * card)2688  static void qeth_free_qdio_queues(struct qeth_card *card)
2689  {
2690  	int i, j;
2691  
2692  	qeth_free_cq(card);
2693  
2694  	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2695  		QETH_QDIO_UNINITIALIZED)
2696  		return;
2697  
2698  	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2699  		if (card->qdio.in_q->bufs[j].rx_skb) {
2700  			consume_skb(card->qdio.in_q->bufs[j].rx_skb);
2701  			card->qdio.in_q->bufs[j].rx_skb = NULL;
2702  		}
2703  	}
2704  
2705  	/* inbound buffer pool */
2706  	qeth_free_buffer_pool(card);
2707  	/* free outbound qdio_qs */
2708  	for (i = 0; i < card->qdio.no_out_queues; i++) {
2709  		qeth_free_output_queue(card->qdio.out_qs[i]);
2710  		card->qdio.out_qs[i] = NULL;
2711  	}
2712  }
2713  
qeth_fill_qib_parms(struct qeth_card * card,struct qeth_qib_parms * parms)2714  static void qeth_fill_qib_parms(struct qeth_card *card,
2715  				struct qeth_qib_parms *parms)
2716  {
2717  	struct qeth_qdio_out_q *queue;
2718  	unsigned int i;
2719  
2720  	parms->pcit_magic[0] = 'P';
2721  	parms->pcit_magic[1] = 'C';
2722  	parms->pcit_magic[2] = 'I';
2723  	parms->pcit_magic[3] = 'T';
2724  	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2725  	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2726  	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2727  	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2728  
2729  	parms->blkt_magic[0] = 'B';
2730  	parms->blkt_magic[1] = 'L';
2731  	parms->blkt_magic[2] = 'K';
2732  	parms->blkt_magic[3] = 'T';
2733  	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2734  	parms->blkt_total = card->info.blkt.time_total;
2735  	parms->blkt_inter_packet = card->info.blkt.inter_packet;
2736  	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2737  
2738  	/* Prio-queueing implicitly uses the default priorities: */
2739  	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2740  		return;
2741  
2742  	parms->pque_magic[0] = 'P';
2743  	parms->pque_magic[1] = 'Q';
2744  	parms->pque_magic[2] = 'U';
2745  	parms->pque_magic[3] = 'E';
2746  	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2747  	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2748  	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2749  
2750  	qeth_for_each_output_queue(card, queue, i)
2751  		parms->pque_priority[i] = queue->priority;
2752  }
2753  
qeth_qdio_activate(struct qeth_card * card)2754  static int qeth_qdio_activate(struct qeth_card *card)
2755  {
2756  	QETH_CARD_TEXT(card, 3, "qdioact");
2757  	return qdio_activate(CARD_DDEV(card));
2758  }
2759  
qeth_dm_act(struct qeth_card * card)2760  static int qeth_dm_act(struct qeth_card *card)
2761  {
2762  	struct qeth_cmd_buffer *iob;
2763  
2764  	QETH_CARD_TEXT(card, 2, "dmact");
2765  
2766  	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2767  	if (!iob)
2768  		return -ENOMEM;
2769  
2770  	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2771  	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2772  	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2773  	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2774  	return qeth_send_control_data(card, iob, NULL, NULL);
2775  }
2776  
qeth_mpc_initialize(struct qeth_card * card)2777  static int qeth_mpc_initialize(struct qeth_card *card)
2778  {
2779  	int rc;
2780  
2781  	QETH_CARD_TEXT(card, 2, "mpcinit");
2782  
2783  	rc = qeth_issue_next_read(card);
2784  	if (rc) {
2785  		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2786  		return rc;
2787  	}
2788  	rc = qeth_cm_enable(card);
2789  	if (rc) {
2790  		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2791  		return rc;
2792  	}
2793  	rc = qeth_cm_setup(card);
2794  	if (rc) {
2795  		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2796  		return rc;
2797  	}
2798  	rc = qeth_ulp_enable(card);
2799  	if (rc) {
2800  		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2801  		return rc;
2802  	}
2803  	rc = qeth_ulp_setup(card);
2804  	if (rc) {
2805  		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2806  		return rc;
2807  	}
2808  	rc = qeth_alloc_qdio_queues(card);
2809  	if (rc) {
2810  		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2811  		return rc;
2812  	}
2813  	rc = qeth_qdio_establish(card);
2814  	if (rc) {
2815  		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2816  		qeth_free_qdio_queues(card);
2817  		return rc;
2818  	}
2819  	rc = qeth_qdio_activate(card);
2820  	if (rc) {
2821  		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2822  		return rc;
2823  	}
2824  	rc = qeth_dm_act(card);
2825  	if (rc) {
2826  		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2827  		return rc;
2828  	}
2829  
2830  	return 0;
2831  }
2832  
qeth_print_status_message(struct qeth_card * card)2833  static void qeth_print_status_message(struct qeth_card *card)
2834  {
2835  	switch (card->info.type) {
2836  	case QETH_CARD_TYPE_OSD:
2837  	case QETH_CARD_TYPE_OSM:
2838  	case QETH_CARD_TYPE_OSX:
2839  		/* VM will use a non-zero first character
2840  		 * to indicate a HiperSockets like reporting
2841  		 * of the level OSA sets the first character to zero
2842  		 * */
2843  		if (!card->info.mcl_level[0]) {
2844  			scnprintf(card->info.mcl_level,
2845  				  sizeof(card->info.mcl_level),
2846  				  "%02x%02x",
2847  				  card->info.mcl_level[2],
2848  				  card->info.mcl_level[3]);
2849  			break;
2850  		}
2851  		fallthrough;
2852  	case QETH_CARD_TYPE_IQD:
2853  		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2854  			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2855  				card->info.mcl_level[0]];
2856  			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2857  				card->info.mcl_level[1]];
2858  			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2859  				card->info.mcl_level[2]];
2860  			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2861  				card->info.mcl_level[3]];
2862  			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2863  		}
2864  		break;
2865  	default:
2866  		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2867  	}
2868  	dev_info(&card->gdev->dev,
2869  		 "Device is a%s card%s%s%s\nwith link type %s.\n",
2870  		 qeth_get_cardname(card),
2871  		 (card->info.mcl_level[0]) ? " (level: " : "",
2872  		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2873  		 (card->info.mcl_level[0]) ? ")" : "",
2874  		 qeth_get_cardname_short(card));
2875  }
2876  
qeth_initialize_working_pool_list(struct qeth_card * card)2877  static void qeth_initialize_working_pool_list(struct qeth_card *card)
2878  {
2879  	struct qeth_buffer_pool_entry *entry;
2880  
2881  	QETH_CARD_TEXT(card, 5, "inwrklst");
2882  
2883  	list_for_each_entry(entry,
2884  			    &card->qdio.init_pool.entry_list, init_list) {
2885  		qeth_put_buffer_pool_entry(card, entry);
2886  	}
2887  }
2888  
qeth_find_free_buffer_pool_entry(struct qeth_card * card)2889  static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2890  					struct qeth_card *card)
2891  {
2892  	struct qeth_buffer_pool_entry *entry;
2893  	int i, free;
2894  
2895  	if (list_empty(&card->qdio.in_buf_pool.entry_list))
2896  		return NULL;
2897  
2898  	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2899  		free = 1;
2900  		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2901  			if (page_count(entry->elements[i]) > 1) {
2902  				free = 0;
2903  				break;
2904  			}
2905  		}
2906  		if (free) {
2907  			list_del_init(&entry->list);
2908  			return entry;
2909  		}
2910  	}
2911  
2912  	/* no free buffer in pool so take first one and swap pages */
2913  	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2914  				 struct qeth_buffer_pool_entry, list);
2915  	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2916  		if (page_count(entry->elements[i]) > 1) {
2917  			struct page *page = dev_alloc_page();
2918  
2919  			if (!page)
2920  				return NULL;
2921  
2922  			__free_page(entry->elements[i]);
2923  			entry->elements[i] = page;
2924  			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2925  		}
2926  	}
2927  	list_del_init(&entry->list);
2928  	return entry;
2929  }
2930  
qeth_init_input_buffer(struct qeth_card * card,struct qeth_qdio_buffer * buf)2931  static int qeth_init_input_buffer(struct qeth_card *card,
2932  		struct qeth_qdio_buffer *buf)
2933  {
2934  	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2935  	int i;
2936  
2937  	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2938  		buf->rx_skb = netdev_alloc_skb(card->dev,
2939  					       ETH_HLEN +
2940  					       sizeof(struct ipv6hdr));
2941  		if (!buf->rx_skb)
2942  			return -ENOMEM;
2943  	}
2944  
2945  	if (!pool_entry) {
2946  		pool_entry = qeth_find_free_buffer_pool_entry(card);
2947  		if (!pool_entry)
2948  			return -ENOBUFS;
2949  
2950  		buf->pool_entry = pool_entry;
2951  	}
2952  
2953  	/*
2954  	 * since the buffer is accessed only from the input_tasklet
2955  	 * there shouldn't be a need to synchronize; also, since we use
2956  	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2957  	 * buffers
2958  	 */
2959  	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2960  		buf->buffer->element[i].length = PAGE_SIZE;
2961  		buf->buffer->element[i].addr =
2962  			page_to_phys(pool_entry->elements[i]);
2963  		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2964  			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2965  		else
2966  			buf->buffer->element[i].eflags = 0;
2967  		buf->buffer->element[i].sflags = 0;
2968  	}
2969  	return 0;
2970  }
2971  
qeth_tx_select_bulk_max(struct qeth_card * card,struct qeth_qdio_out_q * queue)2972  static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2973  					    struct qeth_qdio_out_q *queue)
2974  {
2975  	if (!IS_IQD(card) ||
2976  	    qeth_iqd_is_mcast_queue(card, queue) ||
2977  	    card->options.cq == QETH_CQ_ENABLED ||
2978  	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2979  		return 1;
2980  
2981  	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2982  }
2983  
qeth_init_qdio_queues(struct qeth_card * card)2984  static int qeth_init_qdio_queues(struct qeth_card *card)
2985  {
2986  	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2987  	unsigned int i;
2988  	int rc;
2989  
2990  	QETH_CARD_TEXT(card, 2, "initqdqs");
2991  
2992  	/* inbound queue */
2993  	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2994  	memset(&card->rx, 0, sizeof(struct qeth_rx));
2995  
2996  	qeth_initialize_working_pool_list(card);
2997  	/*give only as many buffers to hardware as we have buffer pool entries*/
2998  	for (i = 0; i < rx_bufs; i++) {
2999  		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3000  		if (rc)
3001  			return rc;
3002  	}
3003  
3004  	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3005  	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
3006  	if (rc) {
3007  		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3008  		return rc;
3009  	}
3010  
3011  	/* completion */
3012  	rc = qeth_cq_init(card);
3013  	if (rc) {
3014  		return rc;
3015  	}
3016  
3017  	/* outbound queue */
3018  	for (i = 0; i < card->qdio.no_out_queues; ++i) {
3019  		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3020  
3021  		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3022  		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3023  		queue->next_buf_to_fill = 0;
3024  		queue->do_pack = 0;
3025  		queue->prev_hdr = NULL;
3026  		queue->coalesced_frames = 0;
3027  		queue->bulk_start = 0;
3028  		queue->bulk_count = 0;
3029  		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3030  		atomic_set(&queue->used_buffers, 0);
3031  		atomic_set(&queue->set_pci_flags_count, 0);
3032  		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3033  	}
3034  	return 0;
3035  }
3036  
qeth_ipa_finalize_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob)3037  static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3038  				  struct qeth_cmd_buffer *iob)
3039  {
3040  	qeth_mpc_finalize_cmd(card, iob);
3041  
3042  	/* override with IPA-specific values: */
3043  	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3044  }
3045  
qeth_prepare_ipa_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob,u16 cmd_length)3046  static void qeth_prepare_ipa_cmd(struct qeth_card *card,
3047  				 struct qeth_cmd_buffer *iob, u16 cmd_length)
3048  {
3049  	u8 prot_type = qeth_mpc_select_prot_type(card);
3050  	u16 total_length = iob->length;
3051  
3052  	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3053  		       iob->data);
3054  	iob->finalize = qeth_ipa_finalize_cmd;
3055  
3056  	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3057  	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3058  	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3059  	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3060  	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3061  	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3062  	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3063  	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3064  }
3065  
qeth_ipa_match_reply(struct qeth_cmd_buffer * iob,struct qeth_cmd_buffer * reply)3066  static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3067  				 struct qeth_cmd_buffer *reply)
3068  {
3069  	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3070  
3071  	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3072  }
3073  
qeth_ipa_alloc_cmd(struct qeth_card * card,enum qeth_ipa_cmds cmd_code,enum qeth_prot_versions prot,unsigned int data_length)3074  struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3075  					   enum qeth_ipa_cmds cmd_code,
3076  					   enum qeth_prot_versions prot,
3077  					   unsigned int data_length)
3078  {
3079  	struct qeth_cmd_buffer *iob;
3080  	struct qeth_ipacmd_hdr *hdr;
3081  
3082  	data_length += offsetof(struct qeth_ipa_cmd, data);
3083  	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3084  			     QETH_IPA_TIMEOUT);
3085  	if (!iob)
3086  		return NULL;
3087  
3088  	qeth_prepare_ipa_cmd(card, iob, data_length);
3089  	iob->match = qeth_ipa_match_reply;
3090  
3091  	hdr = &__ipa_cmd(iob)->hdr;
3092  	hdr->command = cmd_code;
3093  	hdr->initiator = IPA_CMD_INITIATOR_HOST;
3094  	/* hdr->seqno is set by qeth_send_control_data() */
3095  	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3096  	hdr->rel_adapter_no = (u8) card->dev->dev_port;
3097  	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3098  	hdr->param_count = 1;
3099  	hdr->prot_version = prot;
3100  	return iob;
3101  }
3102  EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3103  
qeth_send_ipa_cmd_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3104  static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3105  				struct qeth_reply *reply, unsigned long data)
3106  {
3107  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3108  
3109  	return (cmd->hdr.return_code) ? -EIO : 0;
3110  }
3111  
3112  /*
3113   * qeth_send_ipa_cmd() - send an IPA command
3114   *
3115   * See qeth_send_control_data() for explanation of the arguments.
3116   */
3117  
qeth_send_ipa_cmd(struct qeth_card * card,struct qeth_cmd_buffer * iob,int (* reply_cb)(struct qeth_card *,struct qeth_reply *,unsigned long),void * reply_param)3118  int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3119  		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3120  			unsigned long),
3121  		void *reply_param)
3122  {
3123  	int rc;
3124  
3125  	QETH_CARD_TEXT(card, 4, "sendipa");
3126  
3127  	if (card->read_or_write_problem) {
3128  		qeth_put_cmd(iob);
3129  		return -EIO;
3130  	}
3131  
3132  	if (reply_cb == NULL)
3133  		reply_cb = qeth_send_ipa_cmd_cb;
3134  	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3135  	if (rc == -ETIME) {
3136  		qeth_clear_ipacmd_list(card);
3137  		qeth_schedule_recovery(card);
3138  	}
3139  	return rc;
3140  }
3141  EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3142  
qeth_send_startlan_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3143  static int qeth_send_startlan_cb(struct qeth_card *card,
3144  				 struct qeth_reply *reply, unsigned long data)
3145  {
3146  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3147  
3148  	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3149  		return -ENETDOWN;
3150  
3151  	return (cmd->hdr.return_code) ? -EIO : 0;
3152  }
3153  
qeth_send_startlan(struct qeth_card * card)3154  static int qeth_send_startlan(struct qeth_card *card)
3155  {
3156  	struct qeth_cmd_buffer *iob;
3157  
3158  	QETH_CARD_TEXT(card, 2, "strtlan");
3159  
3160  	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3161  	if (!iob)
3162  		return -ENOMEM;
3163  	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3164  }
3165  
qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd * cmd)3166  static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3167  {
3168  	if (!cmd->hdr.return_code)
3169  		cmd->hdr.return_code =
3170  			cmd->data.setadapterparms.hdr.return_code;
3171  	return cmd->hdr.return_code;
3172  }
3173  
qeth_query_setadapterparms_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3174  static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3175  		struct qeth_reply *reply, unsigned long data)
3176  {
3177  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3178  	struct qeth_query_cmds_supp *query_cmd;
3179  
3180  	QETH_CARD_TEXT(card, 3, "quyadpcb");
3181  	if (qeth_setadpparms_inspect_rc(cmd))
3182  		return -EIO;
3183  
3184  	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3185  	if (query_cmd->lan_type & 0x7f) {
3186  		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3187  			return -EPROTONOSUPPORT;
3188  
3189  		card->info.link_type = query_cmd->lan_type;
3190  		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3191  	}
3192  
3193  	card->options.adp.supported = query_cmd->supported_cmds;
3194  	return 0;
3195  }
3196  
qeth_get_adapter_cmd(struct qeth_card * card,enum qeth_ipa_setadp_cmd adp_cmd,unsigned int data_length)3197  static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3198  						    enum qeth_ipa_setadp_cmd adp_cmd,
3199  						    unsigned int data_length)
3200  {
3201  	struct qeth_ipacmd_setadpparms_hdr *hdr;
3202  	struct qeth_cmd_buffer *iob;
3203  
3204  	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3205  				 data_length +
3206  				 offsetof(struct qeth_ipacmd_setadpparms,
3207  					  data));
3208  	if (!iob)
3209  		return NULL;
3210  
3211  	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3212  	hdr->cmdlength = sizeof(*hdr) + data_length;
3213  	hdr->command_code = adp_cmd;
3214  	hdr->used_total = 1;
3215  	hdr->seq_no = 1;
3216  	return iob;
3217  }
3218  
qeth_query_setadapterparms(struct qeth_card * card)3219  static int qeth_query_setadapterparms(struct qeth_card *card)
3220  {
3221  	int rc;
3222  	struct qeth_cmd_buffer *iob;
3223  
3224  	QETH_CARD_TEXT(card, 3, "queryadp");
3225  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3226  				   SETADP_DATA_SIZEOF(query_cmds_supp));
3227  	if (!iob)
3228  		return -ENOMEM;
3229  	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3230  	return rc;
3231  }
3232  
qeth_query_ipassists_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3233  static int qeth_query_ipassists_cb(struct qeth_card *card,
3234  		struct qeth_reply *reply, unsigned long data)
3235  {
3236  	struct qeth_ipa_cmd *cmd;
3237  
3238  	QETH_CARD_TEXT(card, 2, "qipasscb");
3239  
3240  	cmd = (struct qeth_ipa_cmd *) data;
3241  
3242  	switch (cmd->hdr.return_code) {
3243  	case IPA_RC_SUCCESS:
3244  		break;
3245  	case IPA_RC_NOTSUPP:
3246  	case IPA_RC_L2_UNSUPPORTED_CMD:
3247  		QETH_CARD_TEXT(card, 2, "ipaunsup");
3248  		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3249  		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3250  		return -EOPNOTSUPP;
3251  	default:
3252  		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3253  				 CARD_DEVID(card), cmd->hdr.return_code);
3254  		return -EIO;
3255  	}
3256  
3257  	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3258  		card->options.ipa4 = cmd->hdr.assists;
3259  	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3260  		card->options.ipa6 = cmd->hdr.assists;
3261  	else
3262  		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3263  				 CARD_DEVID(card));
3264  	return 0;
3265  }
3266  
qeth_query_ipassists(struct qeth_card * card,enum qeth_prot_versions prot)3267  static int qeth_query_ipassists(struct qeth_card *card,
3268  				enum qeth_prot_versions prot)
3269  {
3270  	int rc;
3271  	struct qeth_cmd_buffer *iob;
3272  
3273  	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3274  	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3275  	if (!iob)
3276  		return -ENOMEM;
3277  	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3278  	return rc;
3279  }
3280  
qeth_query_switch_attributes_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3281  static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3282  				struct qeth_reply *reply, unsigned long data)
3283  {
3284  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3285  	struct qeth_query_switch_attributes *attrs;
3286  	struct qeth_switch_info *sw_info;
3287  
3288  	QETH_CARD_TEXT(card, 2, "qswiatcb");
3289  	if (qeth_setadpparms_inspect_rc(cmd))
3290  		return -EIO;
3291  
3292  	sw_info = (struct qeth_switch_info *)reply->param;
3293  	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3294  	sw_info->capabilities = attrs->capabilities;
3295  	sw_info->settings = attrs->settings;
3296  	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3297  			sw_info->settings);
3298  	return 0;
3299  }
3300  
qeth_query_switch_attributes(struct qeth_card * card,struct qeth_switch_info * sw_info)3301  int qeth_query_switch_attributes(struct qeth_card *card,
3302  				 struct qeth_switch_info *sw_info)
3303  {
3304  	struct qeth_cmd_buffer *iob;
3305  
3306  	QETH_CARD_TEXT(card, 2, "qswiattr");
3307  	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3308  		return -EOPNOTSUPP;
3309  	if (!netif_carrier_ok(card->dev))
3310  		return -ENOMEDIUM;
3311  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3312  	if (!iob)
3313  		return -ENOMEM;
3314  	return qeth_send_ipa_cmd(card, iob,
3315  				qeth_query_switch_attributes_cb, sw_info);
3316  }
3317  
qeth_get_diag_cmd(struct qeth_card * card,enum qeth_diags_cmds sub_cmd,unsigned int data_length)3318  struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3319  					  enum qeth_diags_cmds sub_cmd,
3320  					  unsigned int data_length)
3321  {
3322  	struct qeth_ipacmd_diagass *cmd;
3323  	struct qeth_cmd_buffer *iob;
3324  
3325  	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3326  				 DIAG_HDR_LEN + data_length);
3327  	if (!iob)
3328  		return NULL;
3329  
3330  	cmd = &__ipa_cmd(iob)->data.diagass;
3331  	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3332  	cmd->subcmd = sub_cmd;
3333  	return iob;
3334  }
3335  EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3336  
qeth_query_setdiagass_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3337  static int qeth_query_setdiagass_cb(struct qeth_card *card,
3338  		struct qeth_reply *reply, unsigned long data)
3339  {
3340  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3341  	u16 rc = cmd->hdr.return_code;
3342  
3343  	if (rc) {
3344  		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3345  		return -EIO;
3346  	}
3347  
3348  	card->info.diagass_support = cmd->data.diagass.ext;
3349  	return 0;
3350  }
3351  
qeth_query_setdiagass(struct qeth_card * card)3352  static int qeth_query_setdiagass(struct qeth_card *card)
3353  {
3354  	struct qeth_cmd_buffer *iob;
3355  
3356  	QETH_CARD_TEXT(card, 2, "qdiagass");
3357  	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3358  	if (!iob)
3359  		return -ENOMEM;
3360  	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3361  }
3362  
qeth_get_trap_id(struct qeth_card * card,struct qeth_trap_id * tid)3363  static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3364  {
3365  	unsigned long info = get_zeroed_page(GFP_KERNEL);
3366  	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3367  	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3368  	struct ccw_dev_id ccwid;
3369  	int level;
3370  
3371  	tid->chpid = card->info.chpid;
3372  	ccw_device_get_id(CARD_RDEV(card), &ccwid);
3373  	tid->ssid = ccwid.ssid;
3374  	tid->devno = ccwid.devno;
3375  	if (!info)
3376  		return;
3377  	level = stsi(NULL, 0, 0, 0);
3378  	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3379  		tid->lparnr = info222->lpar_number;
3380  	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3381  		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3382  		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3383  	}
3384  	free_page(info);
3385  }
3386  
qeth_hw_trap_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)3387  static int qeth_hw_trap_cb(struct qeth_card *card,
3388  		struct qeth_reply *reply, unsigned long data)
3389  {
3390  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3391  	u16 rc = cmd->hdr.return_code;
3392  
3393  	if (rc) {
3394  		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3395  		return -EIO;
3396  	}
3397  	return 0;
3398  }
3399  
qeth_hw_trap(struct qeth_card * card,enum qeth_diags_trap_action action)3400  int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3401  {
3402  	struct qeth_cmd_buffer *iob;
3403  	struct qeth_ipa_cmd *cmd;
3404  
3405  	QETH_CARD_TEXT(card, 2, "diagtrap");
3406  	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3407  	if (!iob)
3408  		return -ENOMEM;
3409  	cmd = __ipa_cmd(iob);
3410  	cmd->data.diagass.type = 1;
3411  	cmd->data.diagass.action = action;
3412  	switch (action) {
3413  	case QETH_DIAGS_TRAP_ARM:
3414  		cmd->data.diagass.options = 0x0003;
3415  		cmd->data.diagass.ext = 0x00010000 +
3416  			sizeof(struct qeth_trap_id);
3417  		qeth_get_trap_id(card,
3418  			(struct qeth_trap_id *)cmd->data.diagass.cdata);
3419  		break;
3420  	case QETH_DIAGS_TRAP_DISARM:
3421  		cmd->data.diagass.options = 0x0001;
3422  		break;
3423  	case QETH_DIAGS_TRAP_CAPTURE:
3424  		break;
3425  	}
3426  	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3427  }
3428  
qeth_check_qdio_errors(struct qeth_card * card,struct qdio_buffer * buf,unsigned int qdio_error,const char * dbftext)3429  static int qeth_check_qdio_errors(struct qeth_card *card,
3430  				  struct qdio_buffer *buf,
3431  				  unsigned int qdio_error,
3432  				  const char *dbftext)
3433  {
3434  	if (qdio_error) {
3435  		QETH_CARD_TEXT(card, 2, dbftext);
3436  		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3437  			       buf->element[15].sflags);
3438  		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3439  			       buf->element[14].sflags);
3440  		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3441  		if ((buf->element[15].sflags) == 0x12) {
3442  			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3443  			return 0;
3444  		} else
3445  			return 1;
3446  	}
3447  	return 0;
3448  }
3449  
qeth_rx_refill_queue(struct qeth_card * card,unsigned int count)3450  static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3451  					 unsigned int count)
3452  {
3453  	struct qeth_qdio_q *queue = card->qdio.in_q;
3454  	struct list_head *lh;
3455  	int i;
3456  	int rc;
3457  	int newcount = 0;
3458  
3459  	/* only requeue at a certain threshold to avoid SIGAs */
3460  	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3461  		for (i = queue->next_buf_to_init;
3462  		     i < queue->next_buf_to_init + count; ++i) {
3463  			if (qeth_init_input_buffer(card,
3464  				&queue->bufs[QDIO_BUFNR(i)])) {
3465  				break;
3466  			} else {
3467  				newcount++;
3468  			}
3469  		}
3470  
3471  		if (newcount < count) {
3472  			/* we are in memory shortage so we switch back to
3473  			   traditional skb allocation and drop packages */
3474  			atomic_set(&card->force_alloc_skb, 3);
3475  			count = newcount;
3476  		} else {
3477  			atomic_add_unless(&card->force_alloc_skb, -1, 0);
3478  		}
3479  
3480  		if (!count) {
3481  			i = 0;
3482  			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3483  				i++;
3484  			if (i == card->qdio.in_buf_pool.buf_count) {
3485  				QETH_CARD_TEXT(card, 2, "qsarbw");
3486  				schedule_delayed_work(
3487  					&card->buffer_reclaim_work,
3488  					QETH_RECLAIM_WORK_TIME);
3489  			}
3490  			return 0;
3491  		}
3492  
3493  		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
3494  						  queue->next_buf_to_init,
3495  						  count);
3496  		if (rc) {
3497  			QETH_CARD_TEXT(card, 2, "qinberr");
3498  		}
3499  		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3500  						     count);
3501  		return count;
3502  	}
3503  
3504  	return 0;
3505  }
3506  
qeth_buffer_reclaim_work(struct work_struct * work)3507  static void qeth_buffer_reclaim_work(struct work_struct *work)
3508  {
3509  	struct qeth_card *card = container_of(to_delayed_work(work),
3510  					      struct qeth_card,
3511  					      buffer_reclaim_work);
3512  
3513  	local_bh_disable();
3514  	napi_schedule(&card->napi);
3515  	/* kick-start the NAPI softirq: */
3516  	local_bh_enable();
3517  }
3518  
qeth_handle_send_error(struct qeth_card * card,struct qeth_qdio_out_buffer * buffer,unsigned int qdio_err)3519  static void qeth_handle_send_error(struct qeth_card *card,
3520  		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3521  {
3522  	int sbalf15 = buffer->buffer->element[15].sflags;
3523  
3524  	QETH_CARD_TEXT(card, 6, "hdsnderr");
3525  	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3526  
3527  	if (!qdio_err)
3528  		return;
3529  
3530  	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3531  		return;
3532  
3533  	QETH_CARD_TEXT(card, 1, "lnkfail");
3534  	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3535  		       (u16)qdio_err, (u8)sbalf15);
3536  }
3537  
3538  /**
3539   * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3540   * @queue: queue to check for packing buffer
3541   *
3542   * Returns number of buffers that were prepared for flush.
3543   */
qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q * queue)3544  static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3545  {
3546  	struct qeth_qdio_out_buffer *buffer;
3547  
3548  	buffer = queue->bufs[queue->next_buf_to_fill];
3549  	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3550  	    (buffer->next_element_to_fill > 0)) {
3551  		/* it's a packing buffer */
3552  		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3553  		queue->next_buf_to_fill =
3554  			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3555  		return 1;
3556  	}
3557  	return 0;
3558  }
3559  
3560  /*
3561   * Switched to packing state if the number of used buffers on a queue
3562   * reaches a certain limit.
3563   */
qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q * queue)3564  static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3565  {
3566  	if (!queue->do_pack) {
3567  		if (atomic_read(&queue->used_buffers)
3568  		    >= QETH_HIGH_WATERMARK_PACK){
3569  			/* switch non-PACKING -> PACKING */
3570  			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3571  			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3572  			queue->do_pack = 1;
3573  		}
3574  	}
3575  }
3576  
3577  /*
3578   * Switches from packing to non-packing mode. If there is a packing
3579   * buffer on the queue this buffer will be prepared to be flushed.
3580   * In that case 1 is returned to inform the caller. If no buffer
3581   * has to be flushed, zero is returned.
3582   */
qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q * queue)3583  static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3584  {
3585  	if (queue->do_pack) {
3586  		if (atomic_read(&queue->used_buffers)
3587  		    <= QETH_LOW_WATERMARK_PACK) {
3588  			/* switch PACKING -> non-PACKING */
3589  			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3590  			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3591  			queue->do_pack = 0;
3592  			return qeth_prep_flush_pack_buffer(queue);
3593  		}
3594  	}
3595  	return 0;
3596  }
3597  
qeth_flush_buffers(struct qeth_qdio_out_q * queue,int index,int count)3598  static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3599  			       int count)
3600  {
3601  	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3602  	struct qeth_card *card = queue->card;
3603  	unsigned int frames, usecs;
3604  	struct qaob *aob = NULL;
3605  	int rc;
3606  	int i;
3607  
3608  	for (i = index; i < index + count; ++i) {
3609  		unsigned int bidx = QDIO_BUFNR(i);
3610  		struct sk_buff *skb;
3611  
3612  		buf = queue->bufs[bidx];
3613  		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3614  				SBAL_EFLAGS_LAST_ENTRY;
3615  		queue->coalesced_frames += buf->frames;
3616  
3617  		if (IS_IQD(card)) {
3618  			skb_queue_walk(&buf->skb_list, skb)
3619  				skb_tx_timestamp(skb);
3620  		}
3621  	}
3622  
3623  	if (IS_IQD(card)) {
3624  		if (card->options.cq == QETH_CQ_ENABLED &&
3625  		    !qeth_iqd_is_mcast_queue(card, queue) &&
3626  		    count == 1) {
3627  			if (!buf->aob)
3628  				buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
3629  							     GFP_ATOMIC);
3630  			if (buf->aob) {
3631  				struct qeth_qaob_priv1 *priv;
3632  
3633  				aob = buf->aob;
3634  				priv = (struct qeth_qaob_priv1 *)&aob->user1;
3635  				priv->state = QETH_QAOB_ISSUED;
3636  				priv->queue_no = queue->queue_no;
3637  			}
3638  		}
3639  	} else {
3640  		if (!queue->do_pack) {
3641  			if ((atomic_read(&queue->used_buffers) >=
3642  				(QETH_HIGH_WATERMARK_PACK -
3643  				 QETH_WATERMARK_PACK_FUZZ)) &&
3644  			    !atomic_read(&queue->set_pci_flags_count)) {
3645  				/* it's likely that we'll go to packing
3646  				 * mode soon */
3647  				atomic_inc(&queue->set_pci_flags_count);
3648  				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3649  			}
3650  		} else {
3651  			if (!atomic_read(&queue->set_pci_flags_count)) {
3652  				/*
3653  				 * there's no outstanding PCI any more, so we
3654  				 * have to request a PCI to be sure the PCI
3655  				 * will wake at some time in the future then we
3656  				 * can flush packed buffers that might still be
3657  				 * hanging around, which can happen if no
3658  				 * further send was requested by the stack
3659  				 */
3660  				atomic_inc(&queue->set_pci_flags_count);
3661  				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3662  			}
3663  		}
3664  	}
3665  
3666  	QETH_TXQ_STAT_INC(queue, doorbell);
3667  	rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
3668  					   index, count, aob);
3669  
3670  	switch (rc) {
3671  	case 0:
3672  	case -ENOBUFS:
3673  		/* ignore temporary SIGA errors without busy condition */
3674  
3675  		/* Fake the TX completion interrupt: */
3676  		frames = READ_ONCE(queue->max_coalesced_frames);
3677  		usecs = READ_ONCE(queue->coalesce_usecs);
3678  
3679  		if (frames && queue->coalesced_frames >= frames) {
3680  			napi_schedule(&queue->napi);
3681  			queue->coalesced_frames = 0;
3682  			QETH_TXQ_STAT_INC(queue, coal_frames);
3683  		} else if (qeth_use_tx_irqs(card) &&
3684  			   atomic_read(&queue->used_buffers) >= 32) {
3685  			/* Old behaviour carried over from the qdio layer: */
3686  			napi_schedule(&queue->napi);
3687  			QETH_TXQ_STAT_INC(queue, coal_frames);
3688  		} else if (usecs) {
3689  			qeth_tx_arm_timer(queue, usecs);
3690  		}
3691  
3692  		break;
3693  	default:
3694  		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3695  		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3696  		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3697  		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3698  		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3699  
3700  		/* this must not happen under normal circumstances. if it
3701  		 * happens something is really wrong -> recover */
3702  		qeth_schedule_recovery(queue->card);
3703  	}
3704  }
3705  
qeth_flush_queue(struct qeth_qdio_out_q * queue)3706  static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3707  {
3708  	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3709  
3710  	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3711  	queue->prev_hdr = NULL;
3712  	queue->bulk_count = 0;
3713  }
3714  
qeth_check_outbound_queue(struct qeth_qdio_out_q * queue)3715  static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3716  {
3717  	/*
3718  	 * check if weed have to switch to non-packing mode or if
3719  	 * we have to get a pci flag out on the queue
3720  	 */
3721  	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3722  	    !atomic_read(&queue->set_pci_flags_count)) {
3723  		unsigned int index, flush_cnt;
3724  
3725  		spin_lock(&queue->lock);
3726  
3727  		index = queue->next_buf_to_fill;
3728  
3729  		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3730  		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3731  			flush_cnt = qeth_prep_flush_pack_buffer(queue);
3732  
3733  		if (flush_cnt) {
3734  			qeth_flush_buffers(queue, index, flush_cnt);
3735  			QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3736  		}
3737  
3738  		spin_unlock(&queue->lock);
3739  	}
3740  }
3741  
qeth_qdio_poll(struct ccw_device * cdev,unsigned long card_ptr)3742  static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3743  {
3744  	struct qeth_card *card = (struct qeth_card *)card_ptr;
3745  
3746  	napi_schedule_irqoff(&card->napi);
3747  }
3748  
qeth_configure_cq(struct qeth_card * card,enum qeth_cq cq)3749  int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3750  {
3751  	if (card->options.cq == QETH_CQ_NOTAVAILABLE)
3752  		return -1;
3753  
3754  	card->options.cq = cq;
3755  	return 0;
3756  }
3757  EXPORT_SYMBOL_GPL(qeth_configure_cq);
3758  
qeth_qdio_handle_aob(struct qeth_card * card,struct qaob * aob)3759  static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
3760  {
3761  	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
3762  	unsigned int queue_no = priv->queue_no;
3763  
3764  	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
3765  
3766  	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
3767  	    queue_no < card->qdio.no_out_queues)
3768  		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
3769  }
3770  
qeth_qdio_cq_handler(struct qeth_card * card,unsigned int qdio_err,unsigned int queue,int first_element,int count)3771  static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3772  				 unsigned int queue, int first_element,
3773  				 int count)
3774  {
3775  	struct qeth_qdio_q *cq = card->qdio.c_q;
3776  	int i;
3777  	int rc;
3778  
3779  	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3780  	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3781  	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3782  
3783  	if (qdio_err) {
3784  		netif_tx_stop_all_queues(card->dev);
3785  		qeth_schedule_recovery(card);
3786  		return;
3787  	}
3788  
3789  	for (i = first_element; i < first_element + count; ++i) {
3790  		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3791  		int e = 0;
3792  
3793  		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3794  		       buffer->element[e].addr) {
3795  			unsigned long phys_aob_addr = buffer->element[e].addr;
3796  
3797  			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3798  			++e;
3799  		}
3800  		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3801  	}
3802  	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
3803  					  cq->next_buf_to_init, count);
3804  	if (rc) {
3805  		dev_warn(&card->gdev->dev,
3806  			"QDIO reported an error, rc=%i\n", rc);
3807  		QETH_CARD_TEXT(card, 2, "qcqherr");
3808  	}
3809  
3810  	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3811  }
3812  
qeth_qdio_input_handler(struct ccw_device * ccwdev,unsigned int qdio_err,int queue,int first_elem,int count,unsigned long card_ptr)3813  static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3814  				    unsigned int qdio_err, int queue,
3815  				    int first_elem, int count,
3816  				    unsigned long card_ptr)
3817  {
3818  	struct qeth_card *card = (struct qeth_card *)card_ptr;
3819  
3820  	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3821  	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3822  
3823  	if (qdio_err)
3824  		qeth_schedule_recovery(card);
3825  }
3826  
qeth_qdio_output_handler(struct ccw_device * ccwdev,unsigned int qdio_error,int __queue,int first_element,int count,unsigned long card_ptr)3827  static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3828  				     unsigned int qdio_error, int __queue,
3829  				     int first_element, int count,
3830  				     unsigned long card_ptr)
3831  {
3832  	struct qeth_card *card        = (struct qeth_card *) card_ptr;
3833  
3834  	QETH_CARD_TEXT(card, 2, "achkcond");
3835  	netif_tx_stop_all_queues(card->dev);
3836  	qeth_schedule_recovery(card);
3837  }
3838  
3839  /*
3840   * Note: Function assumes that we have 4 outbound queues.
3841   */
qeth_get_priority_queue(struct qeth_card * card,struct sk_buff * skb)3842  static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3843  {
3844  	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3845  	u8 tos;
3846  
3847  	switch (card->qdio.do_prio_queueing) {
3848  	case QETH_PRIO_Q_ING_TOS:
3849  	case QETH_PRIO_Q_ING_PREC:
3850  		switch (vlan_get_protocol(skb)) {
3851  		case htons(ETH_P_IP):
3852  			tos = ipv4_get_dsfield(ip_hdr(skb));
3853  			break;
3854  		case htons(ETH_P_IPV6):
3855  			tos = ipv6_get_dsfield(ipv6_hdr(skb));
3856  			break;
3857  		default:
3858  			return card->qdio.default_out_queue;
3859  		}
3860  		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3861  			return ~tos >> 6 & 3;
3862  		if (tos & IPTOS_MINCOST)
3863  			return 3;
3864  		if (tos & IPTOS_RELIABILITY)
3865  			return 2;
3866  		if (tos & IPTOS_THROUGHPUT)
3867  			return 1;
3868  		if (tos & IPTOS_LOWDELAY)
3869  			return 0;
3870  		break;
3871  	case QETH_PRIO_Q_ING_SKB:
3872  		if (skb->priority > 5)
3873  			return 0;
3874  		return ~skb->priority >> 1 & 3;
3875  	case QETH_PRIO_Q_ING_VLAN:
3876  		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3877  			return ~ntohs(veth->h_vlan_TCI) >>
3878  			       (VLAN_PRIO_SHIFT + 1) & 3;
3879  		break;
3880  	case QETH_PRIO_Q_ING_FIXED:
3881  		return card->qdio.default_out_queue;
3882  	default:
3883  		break;
3884  	}
3885  	return card->qdio.default_out_queue;
3886  }
3887  
3888  /**
3889   * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
3890   * @skb:				SKB address
3891   *
3892   * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3893   * fragmented part of the SKB. Returns zero for linear SKB.
3894   */
qeth_get_elements_for_frags(struct sk_buff * skb)3895  static int qeth_get_elements_for_frags(struct sk_buff *skb)
3896  {
3897  	int cnt, elements = 0;
3898  
3899  	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3900  		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3901  
3902  		elements += qeth_get_elements_for_range(
3903  			(addr_t)skb_frag_address(frag),
3904  			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3905  	}
3906  	return elements;
3907  }
3908  
3909  /**
3910   * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
3911   *				to transmit an skb.
3912   * @skb:			the skb to operate on.
3913   * @data_offset:		skip this part of the skb's linear data
3914   *
3915   * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3916   * skb's data (both its linear part and paged fragments).
3917   */
qeth_count_elements(struct sk_buff * skb,unsigned int data_offset)3918  static unsigned int qeth_count_elements(struct sk_buff *skb,
3919  					unsigned int data_offset)
3920  {
3921  	unsigned int elements = qeth_get_elements_for_frags(skb);
3922  	addr_t end = (addr_t)skb->data + skb_headlen(skb);
3923  	addr_t start = (addr_t)skb->data + data_offset;
3924  
3925  	if (start != end)
3926  		elements += qeth_get_elements_for_range(start, end);
3927  	return elements;
3928  }
3929  
3930  #define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
3931  					 MAX_TCP_HEADER)
3932  
3933  /**
3934   * qeth_add_hw_header() - add a HW header to an skb.
3935   * @queue: TX queue that the skb will be placed on.
3936   * @skb: skb that the HW header should be added to.
3937   * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3938   *	 it contains a valid pointer to a qeth_hdr.
3939   * @hdr_len: length of the HW header.
3940   * @proto_len: length of protocol headers that need to be in same page as the
3941   *	       HW header.
3942   * @elements: returns the required number of buffer elements for this skb.
3943   *
3944   * Returns the pushed length. If the header can't be pushed on
3945   * (eg. because it would cross a page boundary), it is allocated from
3946   * the cache instead and 0 is returned.
3947   * The number of needed buffer elements is returned in @elements.
3948   * Error to create the hdr is indicated by returning with < 0.
3949   */
qeth_add_hw_header(struct qeth_qdio_out_q * queue,struct sk_buff * skb,struct qeth_hdr ** hdr,unsigned int hdr_len,unsigned int proto_len,unsigned int * elements)3950  static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3951  			      struct sk_buff *skb, struct qeth_hdr **hdr,
3952  			      unsigned int hdr_len, unsigned int proto_len,
3953  			      unsigned int *elements)
3954  {
3955  	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3956  	const unsigned int contiguous = proto_len ? proto_len : 1;
3957  	const unsigned int max_elements = queue->max_elements;
3958  	unsigned int __elements;
3959  	addr_t start, end;
3960  	bool push_ok;
3961  	int rc;
3962  
3963  check_layout:
3964  	start = (addr_t)skb->data - hdr_len;
3965  	end = (addr_t)skb->data;
3966  
3967  	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3968  		/* Push HW header into same page as first protocol header. */
3969  		push_ok = true;
3970  		/* ... but TSO always needs a separate element for headers: */
3971  		if (skb_is_gso(skb))
3972  			__elements = 1 + qeth_count_elements(skb, proto_len);
3973  		else
3974  			__elements = qeth_count_elements(skb, 0);
3975  	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3976  		/* Push HW header into preceding page, flush with skb->data. */
3977  		push_ok = true;
3978  		__elements = 1 + qeth_count_elements(skb, 0);
3979  	} else {
3980  		/* Use header cache, copy protocol headers up. */
3981  		push_ok = false;
3982  		__elements = 1 + qeth_count_elements(skb, proto_len);
3983  	}
3984  
3985  	/* Compress skb to fit into one IO buffer: */
3986  	if (__elements > max_elements) {
3987  		if (!skb_is_nonlinear(skb)) {
3988  			/* Drop it, no easy way of shrinking it further. */
3989  			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3990  					 max_elements, __elements, skb->len);
3991  			return -E2BIG;
3992  		}
3993  
3994  		rc = skb_linearize(skb);
3995  		if (rc) {
3996  			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3997  			return rc;
3998  		}
3999  
4000  		QETH_TXQ_STAT_INC(queue, skbs_linearized);
4001  		/* Linearization changed the layout, re-evaluate: */
4002  		goto check_layout;
4003  	}
4004  
4005  	*elements = __elements;
4006  	/* Add the header: */
4007  	if (push_ok) {
4008  		*hdr = skb_push(skb, hdr_len);
4009  		return hdr_len;
4010  	}
4011  
4012  	/* Fall back to cache element with known-good alignment: */
4013  	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4014  		return -E2BIG;
4015  	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4016  	if (!*hdr)
4017  		return -ENOMEM;
4018  	/* Copy protocol headers behind HW header: */
4019  	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4020  	return 0;
4021  }
4022  
qeth_iqd_may_bulk(struct qeth_qdio_out_q * queue,struct sk_buff * curr_skb,struct qeth_hdr * curr_hdr)4023  static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4024  			      struct sk_buff *curr_skb,
4025  			      struct qeth_hdr *curr_hdr)
4026  {
4027  	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4028  	struct qeth_hdr *prev_hdr = queue->prev_hdr;
4029  
4030  	if (!prev_hdr)
4031  		return true;
4032  
4033  	/* All packets must have the same target: */
4034  	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4035  		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4036  
4037  		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4038  					eth_hdr(curr_skb)->h_dest) &&
4039  		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4040  	}
4041  
4042  	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4043  	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4044  }
4045  
4046  /**
4047   * qeth_fill_buffer() - map skb into an output buffer
4048   * @buf:	buffer to transport the skb
4049   * @skb:	skb to map into the buffer
4050   * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
4051   *		from qeth_core_header_cache.
4052   * @offset:	when mapping the skb, start at skb->data + offset
4053   * @hd_len:	if > 0, build a dedicated header element of this size
4054   */
qeth_fill_buffer(struct qeth_qdio_out_buffer * buf,struct sk_buff * skb,struct qeth_hdr * hdr,unsigned int offset,unsigned int hd_len)4055  static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4056  				     struct sk_buff *skb, struct qeth_hdr *hdr,
4057  				     unsigned int offset, unsigned int hd_len)
4058  {
4059  	struct qdio_buffer *buffer = buf->buffer;
4060  	int element = buf->next_element_to_fill;
4061  	int length = skb_headlen(skb) - offset;
4062  	char *data = skb->data + offset;
4063  	unsigned int elem_length, cnt;
4064  	bool is_first_elem = true;
4065  
4066  	__skb_queue_tail(&buf->skb_list, skb);
4067  
4068  	/* build dedicated element for HW Header */
4069  	if (hd_len) {
4070  		is_first_elem = false;
4071  
4072  		buffer->element[element].addr = virt_to_phys(hdr);
4073  		buffer->element[element].length = hd_len;
4074  		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4075  
4076  		/* HW header is allocated from cache: */
4077  		if ((void *)hdr != skb->data)
4078  			__set_bit(element, buf->from_kmem_cache);
4079  		/* HW header was pushed and is contiguous with linear part: */
4080  		else if (length > 0 && !PAGE_ALIGNED(data) &&
4081  			 (data == (char *)hdr + hd_len))
4082  			buffer->element[element].eflags |=
4083  				SBAL_EFLAGS_CONTIGUOUS;
4084  
4085  		element++;
4086  	}
4087  
4088  	/* map linear part into buffer element(s) */
4089  	while (length > 0) {
4090  		elem_length = min_t(unsigned int, length,
4091  				    PAGE_SIZE - offset_in_page(data));
4092  
4093  		buffer->element[element].addr = virt_to_phys(data);
4094  		buffer->element[element].length = elem_length;
4095  		length -= elem_length;
4096  		if (is_first_elem) {
4097  			is_first_elem = false;
4098  			if (length || skb_is_nonlinear(skb))
4099  				/* skb needs additional elements */
4100  				buffer->element[element].eflags =
4101  					SBAL_EFLAGS_FIRST_FRAG;
4102  			else
4103  				buffer->element[element].eflags = 0;
4104  		} else {
4105  			buffer->element[element].eflags =
4106  				SBAL_EFLAGS_MIDDLE_FRAG;
4107  		}
4108  
4109  		data += elem_length;
4110  		element++;
4111  	}
4112  
4113  	/* map page frags into buffer element(s) */
4114  	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4115  		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4116  
4117  		data = skb_frag_address(frag);
4118  		length = skb_frag_size(frag);
4119  		while (length > 0) {
4120  			elem_length = min_t(unsigned int, length,
4121  					    PAGE_SIZE - offset_in_page(data));
4122  
4123  			buffer->element[element].addr = virt_to_phys(data);
4124  			buffer->element[element].length = elem_length;
4125  			buffer->element[element].eflags =
4126  				SBAL_EFLAGS_MIDDLE_FRAG;
4127  
4128  			length -= elem_length;
4129  			data += elem_length;
4130  			element++;
4131  		}
4132  	}
4133  
4134  	if (buffer->element[element - 1].eflags)
4135  		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4136  	buf->next_element_to_fill = element;
4137  	return element;
4138  }
4139  
__qeth_xmit(struct qeth_card * card,struct qeth_qdio_out_q * queue,struct sk_buff * skb,unsigned int elements,struct qeth_hdr * hdr,unsigned int offset,unsigned int hd_len)4140  static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4141  		       struct sk_buff *skb, unsigned int elements,
4142  		       struct qeth_hdr *hdr, unsigned int offset,
4143  		       unsigned int hd_len)
4144  {
4145  	unsigned int bytes = qdisc_pkt_len(skb);
4146  	struct qeth_qdio_out_buffer *buffer;
4147  	unsigned int next_element;
4148  	struct netdev_queue *txq;
4149  	bool stopped = false;
4150  	bool flush;
4151  
4152  	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4153  	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4154  
4155  	/* Just a sanity check, the wake/stop logic should ensure that we always
4156  	 * get a free buffer.
4157  	 */
4158  	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4159  		return -EBUSY;
4160  
4161  	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4162  
4163  	if (flush ||
4164  	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
4165  		if (buffer->next_element_to_fill > 0) {
4166  			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4167  			queue->bulk_count++;
4168  		}
4169  
4170  		if (queue->bulk_count >= queue->bulk_max)
4171  			flush = true;
4172  
4173  		if (flush)
4174  			qeth_flush_queue(queue);
4175  
4176  		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4177  						queue->bulk_count)];
4178  
4179  		/* Sanity-check again: */
4180  		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4181  			return -EBUSY;
4182  	}
4183  
4184  	if (buffer->next_element_to_fill == 0 &&
4185  	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4186  		/* If a TX completion happens right _here_ and misses to wake
4187  		 * the txq, then our re-check below will catch the race.
4188  		 */
4189  		QETH_TXQ_STAT_INC(queue, stopped);
4190  		netif_tx_stop_queue(txq);
4191  		stopped = true;
4192  	}
4193  
4194  	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4195  	buffer->bytes += bytes;
4196  	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4197  	queue->prev_hdr = hdr;
4198  
4199  	flush = __netdev_tx_sent_queue(txq, bytes,
4200  				       !stopped && netdev_xmit_more());
4201  
4202  	if (flush || next_element >= queue->max_elements) {
4203  		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4204  		queue->bulk_count++;
4205  
4206  		if (queue->bulk_count >= queue->bulk_max)
4207  			flush = true;
4208  
4209  		if (flush)
4210  			qeth_flush_queue(queue);
4211  	}
4212  
4213  	if (stopped && !qeth_out_queue_is_full(queue))
4214  		netif_tx_start_queue(txq);
4215  	return 0;
4216  }
4217  
qeth_do_send_packet(struct qeth_card * card,struct qeth_qdio_out_q * queue,struct sk_buff * skb,struct qeth_hdr * hdr,unsigned int offset,unsigned int hd_len,unsigned int elements_needed)4218  static int qeth_do_send_packet(struct qeth_card *card,
4219  			       struct qeth_qdio_out_q *queue,
4220  			       struct sk_buff *skb, struct qeth_hdr *hdr,
4221  			       unsigned int offset, unsigned int hd_len,
4222  			       unsigned int elements_needed)
4223  {
4224  	unsigned int start_index = queue->next_buf_to_fill;
4225  	struct qeth_qdio_out_buffer *buffer;
4226  	unsigned int next_element;
4227  	struct netdev_queue *txq;
4228  	bool stopped = false;
4229  	int flush_count = 0;
4230  	int do_pack = 0;
4231  	int rc = 0;
4232  
4233  	buffer = queue->bufs[queue->next_buf_to_fill];
4234  
4235  	/* Just a sanity check, the wake/stop logic should ensure that we always
4236  	 * get a free buffer.
4237  	 */
4238  	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4239  		return -EBUSY;
4240  
4241  	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4242  
4243  	/* check if we need to switch packing state of this queue */
4244  	qeth_switch_to_packing_if_needed(queue);
4245  	if (queue->do_pack) {
4246  		do_pack = 1;
4247  		/* does packet fit in current buffer? */
4248  		if (buffer->next_element_to_fill + elements_needed >
4249  		    queue->max_elements) {
4250  			/* ... no -> set state PRIMED */
4251  			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4252  			flush_count++;
4253  			queue->next_buf_to_fill =
4254  				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4255  			buffer = queue->bufs[queue->next_buf_to_fill];
4256  
4257  			/* We stepped forward, so sanity-check again: */
4258  			if (atomic_read(&buffer->state) !=
4259  			    QETH_QDIO_BUF_EMPTY) {
4260  				qeth_flush_buffers(queue, start_index,
4261  							   flush_count);
4262  				rc = -EBUSY;
4263  				goto out;
4264  			}
4265  		}
4266  	}
4267  
4268  	if (buffer->next_element_to_fill == 0 &&
4269  	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4270  		/* If a TX completion happens right _here_ and misses to wake
4271  		 * the txq, then our re-check below will catch the race.
4272  		 */
4273  		QETH_TXQ_STAT_INC(queue, stopped);
4274  		netif_tx_stop_queue(txq);
4275  		stopped = true;
4276  	}
4277  
4278  	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4279  	buffer->bytes += qdisc_pkt_len(skb);
4280  	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4281  
4282  	if (queue->do_pack)
4283  		QETH_TXQ_STAT_INC(queue, skbs_pack);
4284  	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4285  		flush_count++;
4286  		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4287  		queue->next_buf_to_fill =
4288  				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4289  	}
4290  
4291  	if (flush_count)
4292  		qeth_flush_buffers(queue, start_index, flush_count);
4293  
4294  out:
4295  	if (do_pack)
4296  		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4297  
4298  	if (stopped && !qeth_out_queue_is_full(queue))
4299  		netif_tx_start_queue(txq);
4300  	return rc;
4301  }
4302  
qeth_fill_tso_ext(struct qeth_hdr_tso * hdr,unsigned int payload_len,struct sk_buff * skb,unsigned int proto_len)4303  static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4304  			      unsigned int payload_len, struct sk_buff *skb,
4305  			      unsigned int proto_len)
4306  {
4307  	struct qeth_hdr_ext_tso *ext = &hdr->ext;
4308  
4309  	ext->hdr_tot_len = sizeof(*ext);
4310  	ext->imb_hdr_no = 1;
4311  	ext->hdr_type = 1;
4312  	ext->hdr_version = 1;
4313  	ext->hdr_len = 28;
4314  	ext->payload_len = payload_len;
4315  	ext->mss = skb_shinfo(skb)->gso_size;
4316  	ext->dg_hdr_len = proto_len;
4317  }
4318  
qeth_xmit(struct qeth_card * card,struct sk_buff * skb,struct qeth_qdio_out_q * queue,__be16 proto,void (* fill_header)(struct qeth_qdio_out_q * queue,struct qeth_hdr * hdr,struct sk_buff * skb,__be16 proto,unsigned int data_len))4319  int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4320  	      struct qeth_qdio_out_q *queue, __be16 proto,
4321  	      void (*fill_header)(struct qeth_qdio_out_q *queue,
4322  				  struct qeth_hdr *hdr, struct sk_buff *skb,
4323  				  __be16 proto, unsigned int data_len))
4324  {
4325  	unsigned int proto_len, hw_hdr_len;
4326  	unsigned int frame_len = skb->len;
4327  	bool is_tso = skb_is_gso(skb);
4328  	unsigned int data_offset = 0;
4329  	struct qeth_hdr *hdr = NULL;
4330  	unsigned int hd_len = 0;
4331  	unsigned int elements;
4332  	int push_len, rc;
4333  
4334  	if (is_tso) {
4335  		hw_hdr_len = sizeof(struct qeth_hdr_tso);
4336  		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4337  	} else {
4338  		hw_hdr_len = sizeof(struct qeth_hdr);
4339  		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4340  	}
4341  
4342  	rc = skb_cow_head(skb, hw_hdr_len);
4343  	if (rc)
4344  		return rc;
4345  
4346  	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4347  				      &elements);
4348  	if (push_len < 0)
4349  		return push_len;
4350  	if (is_tso || !push_len) {
4351  		/* HW header needs its own buffer element. */
4352  		hd_len = hw_hdr_len + proto_len;
4353  		data_offset = push_len + proto_len;
4354  	}
4355  	memset(hdr, 0, hw_hdr_len);
4356  	fill_header(queue, hdr, skb, proto, frame_len);
4357  	if (is_tso)
4358  		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4359  				  frame_len - proto_len, skb, proto_len);
4360  
4361  	if (IS_IQD(card)) {
4362  		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4363  				 hd_len);
4364  	} else {
4365  		/* TODO: drop skb_orphan() once TX completion is fast enough */
4366  		skb_orphan(skb);
4367  		spin_lock(&queue->lock);
4368  		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4369  					 hd_len, elements);
4370  		spin_unlock(&queue->lock);
4371  	}
4372  
4373  	if (rc && !push_len)
4374  		kmem_cache_free(qeth_core_header_cache, hdr);
4375  
4376  	return rc;
4377  }
4378  EXPORT_SYMBOL_GPL(qeth_xmit);
4379  
qeth_setadp_promisc_mode_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)4380  static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4381  		struct qeth_reply *reply, unsigned long data)
4382  {
4383  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4384  	struct qeth_ipacmd_setadpparms *setparms;
4385  
4386  	QETH_CARD_TEXT(card, 4, "prmadpcb");
4387  
4388  	setparms = &(cmd->data.setadapterparms);
4389  	if (qeth_setadpparms_inspect_rc(cmd)) {
4390  		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4391  		setparms->data.mode = SET_PROMISC_MODE_OFF;
4392  	}
4393  	card->info.promisc_mode = setparms->data.mode;
4394  	return (cmd->hdr.return_code) ? -EIO : 0;
4395  }
4396  
qeth_setadp_promisc_mode(struct qeth_card * card,bool enable)4397  void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4398  {
4399  	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4400  						    SET_PROMISC_MODE_OFF;
4401  	struct qeth_cmd_buffer *iob;
4402  	struct qeth_ipa_cmd *cmd;
4403  
4404  	QETH_CARD_TEXT(card, 4, "setprom");
4405  	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4406  
4407  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4408  				   SETADP_DATA_SIZEOF(mode));
4409  	if (!iob)
4410  		return;
4411  	cmd = __ipa_cmd(iob);
4412  	cmd->data.setadapterparms.data.mode = mode;
4413  	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4414  }
4415  EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4416  
qeth_setadpparms_change_macaddr_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)4417  static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4418  		struct qeth_reply *reply, unsigned long data)
4419  {
4420  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4421  	struct qeth_ipacmd_setadpparms *adp_cmd;
4422  
4423  	QETH_CARD_TEXT(card, 4, "chgmaccb");
4424  	if (qeth_setadpparms_inspect_rc(cmd))
4425  		return -EIO;
4426  
4427  	adp_cmd = &cmd->data.setadapterparms;
4428  	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4429  		return -EADDRNOTAVAIL;
4430  
4431  	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4432  	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4433  		return -EADDRNOTAVAIL;
4434  
4435  	eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
4436  	return 0;
4437  }
4438  
qeth_setadpparms_change_macaddr(struct qeth_card * card)4439  int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4440  {
4441  	int rc;
4442  	struct qeth_cmd_buffer *iob;
4443  	struct qeth_ipa_cmd *cmd;
4444  
4445  	QETH_CARD_TEXT(card, 4, "chgmac");
4446  
4447  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4448  				   SETADP_DATA_SIZEOF(change_addr));
4449  	if (!iob)
4450  		return -ENOMEM;
4451  	cmd = __ipa_cmd(iob);
4452  	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4453  	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4454  	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4455  			card->dev->dev_addr);
4456  	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4457  			       NULL);
4458  	return rc;
4459  }
4460  EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4461  
qeth_setadpparms_set_access_ctrl_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)4462  static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4463  		struct qeth_reply *reply, unsigned long data)
4464  {
4465  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4466  	struct qeth_set_access_ctrl *access_ctrl_req;
4467  
4468  	QETH_CARD_TEXT(card, 4, "setaccb");
4469  
4470  	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4471  	QETH_CARD_TEXT_(card, 2, "rc=%d",
4472  			cmd->data.setadapterparms.hdr.return_code);
4473  	if (cmd->data.setadapterparms.hdr.return_code !=
4474  						SET_ACCESS_CTRL_RC_SUCCESS)
4475  		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4476  				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4477  				 cmd->data.setadapterparms.hdr.return_code);
4478  	switch (qeth_setadpparms_inspect_rc(cmd)) {
4479  	case SET_ACCESS_CTRL_RC_SUCCESS:
4480  		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4481  			dev_info(&card->gdev->dev,
4482  			    "QDIO data connection isolation is deactivated\n");
4483  		else
4484  			dev_info(&card->gdev->dev,
4485  			    "QDIO data connection isolation is activated\n");
4486  		return 0;
4487  	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4488  		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4489  				 CARD_DEVID(card));
4490  		return 0;
4491  	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4492  		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4493  				 CARD_DEVID(card));
4494  		return 0;
4495  	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4496  		dev_err(&card->gdev->dev, "Adapter does not "
4497  			"support QDIO data connection isolation\n");
4498  		return -EOPNOTSUPP;
4499  	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4500  		dev_err(&card->gdev->dev,
4501  			"Adapter is dedicated. "
4502  			"QDIO data connection isolation not supported\n");
4503  		return -EOPNOTSUPP;
4504  	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4505  		dev_err(&card->gdev->dev,
4506  			"TSO does not permit QDIO data connection isolation\n");
4507  		return -EPERM;
4508  	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4509  		dev_err(&card->gdev->dev, "The adjacent switch port does not "
4510  			"support reflective relay mode\n");
4511  		return -EOPNOTSUPP;
4512  	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4513  		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4514  					"enabled at the adjacent switch port");
4515  		return -EREMOTEIO;
4516  	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4517  		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4518  					"at the adjacent switch failed\n");
4519  		/* benign error while disabling ISOLATION_MODE_FWD */
4520  		return 0;
4521  	default:
4522  		return -EIO;
4523  	}
4524  }
4525  
qeth_setadpparms_set_access_ctrl(struct qeth_card * card,enum qeth_ipa_isolation_modes mode)4526  int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4527  				     enum qeth_ipa_isolation_modes mode)
4528  {
4529  	int rc;
4530  	struct qeth_cmd_buffer *iob;
4531  	struct qeth_ipa_cmd *cmd;
4532  	struct qeth_set_access_ctrl *access_ctrl_req;
4533  
4534  	QETH_CARD_TEXT(card, 4, "setacctl");
4535  
4536  	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4537  		dev_err(&card->gdev->dev,
4538  			"Adapter does not support QDIO data connection isolation\n");
4539  		return -EOPNOTSUPP;
4540  	}
4541  
4542  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4543  				   SETADP_DATA_SIZEOF(set_access_ctrl));
4544  	if (!iob)
4545  		return -ENOMEM;
4546  	cmd = __ipa_cmd(iob);
4547  	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4548  	access_ctrl_req->subcmd_code = mode;
4549  
4550  	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4551  			       NULL);
4552  	if (rc) {
4553  		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4554  		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4555  				 rc, CARD_DEVID(card));
4556  	}
4557  
4558  	return rc;
4559  }
4560  
qeth_tx_timeout(struct net_device * dev,unsigned int txqueue)4561  void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4562  {
4563  	struct qeth_card *card;
4564  
4565  	card = dev->ml_priv;
4566  	QETH_CARD_TEXT(card, 4, "txtimeo");
4567  	qeth_schedule_recovery(card);
4568  }
4569  EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4570  
qeth_mdio_read(struct net_device * dev,int phy_id,int regnum)4571  static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4572  {
4573  	struct qeth_card *card = dev->ml_priv;
4574  	int rc = 0;
4575  
4576  	switch (regnum) {
4577  	case MII_BMCR: /* Basic mode control register */
4578  		rc = BMCR_FULLDPLX;
4579  		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4580  		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4581  		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4582  			rc |= BMCR_SPEED100;
4583  		break;
4584  	case MII_BMSR: /* Basic mode status register */
4585  		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4586  		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4587  		     BMSR_100BASE4;
4588  		break;
4589  	case MII_PHYSID1: /* PHYS ID 1 */
4590  		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4591  		     dev->dev_addr[2];
4592  		rc = (rc >> 5) & 0xFFFF;
4593  		break;
4594  	case MII_PHYSID2: /* PHYS ID 2 */
4595  		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4596  		break;
4597  	case MII_ADVERTISE: /* Advertisement control reg */
4598  		rc = ADVERTISE_ALL;
4599  		break;
4600  	case MII_LPA: /* Link partner ability reg */
4601  		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4602  		     LPA_100BASE4 | LPA_LPACK;
4603  		break;
4604  	case MII_EXPANSION: /* Expansion register */
4605  		break;
4606  	case MII_DCOUNTER: /* disconnect counter */
4607  		break;
4608  	case MII_FCSCOUNTER: /* false carrier counter */
4609  		break;
4610  	case MII_NWAYTEST: /* N-way auto-neg test register */
4611  		break;
4612  	case MII_RERRCOUNTER: /* rx error counter */
4613  		rc = card->stats.rx_length_errors +
4614  		     card->stats.rx_frame_errors +
4615  		     card->stats.rx_fifo_errors;
4616  		break;
4617  	case MII_SREVISION: /* silicon revision */
4618  		break;
4619  	case MII_RESV1: /* reserved 1 */
4620  		break;
4621  	case MII_LBRERROR: /* loopback, rx, bypass error */
4622  		break;
4623  	case MII_PHYADDR: /* physical address */
4624  		break;
4625  	case MII_RESV2: /* reserved 2 */
4626  		break;
4627  	case MII_TPISTATUS: /* TPI status for 10mbps */
4628  		break;
4629  	case MII_NCONFIG: /* network interface config */
4630  		break;
4631  	default:
4632  		break;
4633  	}
4634  	return rc;
4635  }
4636  
qeth_snmp_command_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)4637  static int qeth_snmp_command_cb(struct qeth_card *card,
4638  				struct qeth_reply *reply, unsigned long data)
4639  {
4640  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4641  	struct qeth_arp_query_info *qinfo = reply->param;
4642  	struct qeth_ipacmd_setadpparms *adp_cmd;
4643  	unsigned int data_len;
4644  	void *snmp_data;
4645  
4646  	QETH_CARD_TEXT(card, 3, "snpcmdcb");
4647  
4648  	if (cmd->hdr.return_code) {
4649  		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4650  		return -EIO;
4651  	}
4652  	if (cmd->data.setadapterparms.hdr.return_code) {
4653  		cmd->hdr.return_code =
4654  			cmd->data.setadapterparms.hdr.return_code;
4655  		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4656  		return -EIO;
4657  	}
4658  
4659  	adp_cmd = &cmd->data.setadapterparms;
4660  	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4661  	if (adp_cmd->hdr.seq_no == 1) {
4662  		snmp_data = &adp_cmd->data.snmp;
4663  	} else {
4664  		snmp_data = &adp_cmd->data.snmp.request;
4665  		data_len -= offsetof(struct qeth_snmp_cmd, request);
4666  	}
4667  
4668  	/* check if there is enough room in userspace */
4669  	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4670  		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4671  		return -ENOSPC;
4672  	}
4673  	QETH_CARD_TEXT_(card, 4, "snore%i",
4674  			cmd->data.setadapterparms.hdr.used_total);
4675  	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4676  			cmd->data.setadapterparms.hdr.seq_no);
4677  	/*copy entries to user buffer*/
4678  	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4679  	qinfo->udata_offset += data_len;
4680  
4681  	if (cmd->data.setadapterparms.hdr.seq_no <
4682  	    cmd->data.setadapterparms.hdr.used_total)
4683  		return 1;
4684  	return 0;
4685  }
4686  
qeth_snmp_command(struct qeth_card * card,char __user * udata)4687  static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4688  {
4689  	struct qeth_snmp_ureq __user *ureq;
4690  	struct qeth_cmd_buffer *iob;
4691  	unsigned int req_len;
4692  	struct qeth_arp_query_info qinfo = {0, };
4693  	int rc = 0;
4694  
4695  	QETH_CARD_TEXT(card, 3, "snmpcmd");
4696  
4697  	if (IS_VM_NIC(card))
4698  		return -EOPNOTSUPP;
4699  
4700  	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4701  	    IS_LAYER3(card))
4702  		return -EOPNOTSUPP;
4703  
4704  	ureq = (struct qeth_snmp_ureq __user *) udata;
4705  	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4706  	    get_user(req_len, &ureq->hdr.req_len))
4707  		return -EFAULT;
4708  
4709  	/* Sanitize user input, to avoid overflows in iob size calculation: */
4710  	if (req_len > QETH_BUFSIZE)
4711  		return -EINVAL;
4712  
4713  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4714  	if (!iob)
4715  		return -ENOMEM;
4716  
4717  	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4718  			   &ureq->cmd, req_len)) {
4719  		qeth_put_cmd(iob);
4720  		return -EFAULT;
4721  	}
4722  
4723  	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4724  	if (!qinfo.udata) {
4725  		qeth_put_cmd(iob);
4726  		return -ENOMEM;
4727  	}
4728  	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4729  
4730  	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4731  	if (rc)
4732  		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4733  				 CARD_DEVID(card), rc);
4734  	else {
4735  		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4736  			rc = -EFAULT;
4737  	}
4738  
4739  	kfree(qinfo.udata);
4740  	return rc;
4741  }
4742  
qeth_setadpparms_query_oat_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)4743  static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4744  					 struct qeth_reply *reply,
4745  					 unsigned long data)
4746  {
4747  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4748  	struct qeth_qoat_priv *priv = reply->param;
4749  	int resdatalen;
4750  
4751  	QETH_CARD_TEXT(card, 3, "qoatcb");
4752  	if (qeth_setadpparms_inspect_rc(cmd))
4753  		return -EIO;
4754  
4755  	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4756  
4757  	if (resdatalen > (priv->buffer_len - priv->response_len))
4758  		return -ENOSPC;
4759  
4760  	memcpy(priv->buffer + priv->response_len,
4761  	       &cmd->data.setadapterparms.hdr, resdatalen);
4762  	priv->response_len += resdatalen;
4763  
4764  	if (cmd->data.setadapterparms.hdr.seq_no <
4765  	    cmd->data.setadapterparms.hdr.used_total)
4766  		return 1;
4767  	return 0;
4768  }
4769  
qeth_query_oat_command(struct qeth_card * card,char __user * udata)4770  static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4771  {
4772  	int rc = 0;
4773  	struct qeth_cmd_buffer *iob;
4774  	struct qeth_ipa_cmd *cmd;
4775  	struct qeth_query_oat *oat_req;
4776  	struct qeth_query_oat_data oat_data;
4777  	struct qeth_qoat_priv priv;
4778  	void __user *tmp;
4779  
4780  	QETH_CARD_TEXT(card, 3, "qoatcmd");
4781  
4782  	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4783  		return -EOPNOTSUPP;
4784  
4785  	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4786  		return -EFAULT;
4787  
4788  	priv.buffer_len = oat_data.buffer_len;
4789  	priv.response_len = 0;
4790  	priv.buffer = vzalloc(oat_data.buffer_len);
4791  	if (!priv.buffer)
4792  		return -ENOMEM;
4793  
4794  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4795  				   SETADP_DATA_SIZEOF(query_oat));
4796  	if (!iob) {
4797  		rc = -ENOMEM;
4798  		goto out_free;
4799  	}
4800  	cmd = __ipa_cmd(iob);
4801  	oat_req = &cmd->data.setadapterparms.data.query_oat;
4802  	oat_req->subcmd_code = oat_data.command;
4803  
4804  	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4805  	if (!rc) {
4806  		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4807  					 u64_to_user_ptr(oat_data.ptr);
4808  		oat_data.response_len = priv.response_len;
4809  
4810  		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4811  		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4812  			rc = -EFAULT;
4813  	}
4814  
4815  out_free:
4816  	vfree(priv.buffer);
4817  	return rc;
4818  }
4819  
qeth_init_link_info_oat_cb(struct qeth_card * card,struct qeth_reply * reply_priv,unsigned long data)4820  static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4821  				      struct qeth_reply *reply_priv,
4822  				      unsigned long data)
4823  {
4824  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4825  	struct qeth_link_info *link_info = reply_priv->param;
4826  	struct qeth_query_oat_physical_if *phys_if;
4827  	struct qeth_query_oat_reply *reply;
4828  
4829  	QETH_CARD_TEXT(card, 2, "qoatincb");
4830  	if (qeth_setadpparms_inspect_rc(cmd))
4831  		return -EIO;
4832  
4833  	/* Multi-part reply is unexpected, don't bother: */
4834  	if (cmd->data.setadapterparms.hdr.used_total > 1)
4835  		return -EINVAL;
4836  
4837  	/* Expect the reply to start with phys_if data: */
4838  	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4839  	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4840  	    reply->length < sizeof(*reply))
4841  		return -EINVAL;
4842  
4843  	phys_if = &reply->phys_if;
4844  
4845  	switch (phys_if->speed_duplex) {
4846  	case QETH_QOAT_PHYS_SPEED_10M_HALF:
4847  		link_info->speed = SPEED_10;
4848  		link_info->duplex = DUPLEX_HALF;
4849  		break;
4850  	case QETH_QOAT_PHYS_SPEED_10M_FULL:
4851  		link_info->speed = SPEED_10;
4852  		link_info->duplex = DUPLEX_FULL;
4853  		break;
4854  	case QETH_QOAT_PHYS_SPEED_100M_HALF:
4855  		link_info->speed = SPEED_100;
4856  		link_info->duplex = DUPLEX_HALF;
4857  		break;
4858  	case QETH_QOAT_PHYS_SPEED_100M_FULL:
4859  		link_info->speed = SPEED_100;
4860  		link_info->duplex = DUPLEX_FULL;
4861  		break;
4862  	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4863  		link_info->speed = SPEED_1000;
4864  		link_info->duplex = DUPLEX_HALF;
4865  		break;
4866  	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4867  		link_info->speed = SPEED_1000;
4868  		link_info->duplex = DUPLEX_FULL;
4869  		break;
4870  	case QETH_QOAT_PHYS_SPEED_10G_FULL:
4871  		link_info->speed = SPEED_10000;
4872  		link_info->duplex = DUPLEX_FULL;
4873  		break;
4874  	case QETH_QOAT_PHYS_SPEED_25G_FULL:
4875  		link_info->speed = SPEED_25000;
4876  		link_info->duplex = DUPLEX_FULL;
4877  		break;
4878  	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
4879  	default:
4880  		link_info->speed = SPEED_UNKNOWN;
4881  		link_info->duplex = DUPLEX_UNKNOWN;
4882  		break;
4883  	}
4884  
4885  	switch (phys_if->media_type) {
4886  	case QETH_QOAT_PHYS_MEDIA_COPPER:
4887  		link_info->port = PORT_TP;
4888  		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4889  		break;
4890  	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4891  		link_info->port = PORT_FIBRE;
4892  		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4893  		break;
4894  	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
4895  		link_info->port = PORT_FIBRE;
4896  		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4897  		break;
4898  	default:
4899  		link_info->port = PORT_OTHER;
4900  		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4901  		break;
4902  	}
4903  
4904  	return 0;
4905  }
4906  
qeth_init_link_info(struct qeth_card * card)4907  static void qeth_init_link_info(struct qeth_card *card)
4908  {
4909  	qeth_default_link_info(card);
4910  
4911  	/* Get more accurate data via QUERY OAT: */
4912  	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4913  		struct qeth_link_info link_info;
4914  		struct qeth_cmd_buffer *iob;
4915  
4916  		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4917  					   SETADP_DATA_SIZEOF(query_oat));
4918  		if (iob) {
4919  			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
4920  			struct qeth_query_oat *oat_req;
4921  
4922  			oat_req = &cmd->data.setadapterparms.data.query_oat;
4923  			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
4924  
4925  			if (!qeth_send_ipa_cmd(card, iob,
4926  					       qeth_init_link_info_oat_cb,
4927  					       &link_info)) {
4928  				if (link_info.speed != SPEED_UNKNOWN)
4929  					card->info.link_info.speed = link_info.speed;
4930  				if (link_info.duplex != DUPLEX_UNKNOWN)
4931  					card->info.link_info.duplex = link_info.duplex;
4932  				if (link_info.port != PORT_OTHER)
4933  					card->info.link_info.port = link_info.port;
4934  				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
4935  					card->info.link_info.link_mode = link_info.link_mode;
4936  			}
4937  		}
4938  	}
4939  }
4940  
4941  /**
4942   * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4943   * @card: pointer to a qeth_card
4944   *
4945   * Returns
4946   *	0, if a MAC address has been set for the card's netdevice
4947   *	a return code, for various error conditions
4948   */
qeth_vm_request_mac(struct qeth_card * card)4949  int qeth_vm_request_mac(struct qeth_card *card)
4950  {
4951  	struct diag26c_mac_resp *response;
4952  	struct diag26c_mac_req *request;
4953  	int rc;
4954  
4955  	QETH_CARD_TEXT(card, 2, "vmreqmac");
4956  
4957  	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4958  	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4959  	if (!request || !response) {
4960  		rc = -ENOMEM;
4961  		goto out;
4962  	}
4963  
4964  	request->resp_buf_len = sizeof(*response);
4965  	request->resp_version = DIAG26C_VERSION2;
4966  	request->op_code = DIAG26C_GET_MAC;
4967  	request->devno = card->info.ddev_devno;
4968  
4969  	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4970  	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4971  	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4972  	if (rc)
4973  		goto out;
4974  	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4975  
4976  	if (request->resp_buf_len < sizeof(*response) ||
4977  	    response->version != request->resp_version) {
4978  		rc = -EIO;
4979  		QETH_CARD_TEXT(card, 2, "badresp");
4980  		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4981  			      sizeof(request->resp_buf_len));
4982  	} else if (!is_valid_ether_addr(response->mac)) {
4983  		rc = -EINVAL;
4984  		QETH_CARD_TEXT(card, 2, "badmac");
4985  		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4986  	} else {
4987  		eth_hw_addr_set(card->dev, response->mac);
4988  	}
4989  
4990  out:
4991  	kfree(response);
4992  	kfree(request);
4993  	return rc;
4994  }
4995  EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4996  
qeth_determine_capabilities(struct qeth_card * card)4997  static void qeth_determine_capabilities(struct qeth_card *card)
4998  {
4999  	struct qeth_channel *channel = &card->data;
5000  	struct ccw_device *ddev = channel->ccwdev;
5001  	int rc;
5002  	int ddev_offline = 0;
5003  
5004  	QETH_CARD_TEXT(card, 2, "detcapab");
5005  	if (!ddev->online) {
5006  		ddev_offline = 1;
5007  		rc = qeth_start_channel(channel);
5008  		if (rc) {
5009  			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5010  			goto out;
5011  		}
5012  	}
5013  
5014  	rc = qeth_read_conf_data(card);
5015  	if (rc) {
5016  		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5017  				 CARD_DEVID(card), rc);
5018  		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5019  		goto out_offline;
5020  	}
5021  
5022  	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5023  	if (rc)
5024  		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5025  
5026  	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5027  	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5028  	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5029  	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5030  	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5031  	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5032  	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5033  	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5034  		dev_info(&card->gdev->dev,
5035  			"Completion Queueing supported\n");
5036  	} else {
5037  		card->options.cq = QETH_CQ_NOTAVAILABLE;
5038  	}
5039  
5040  out_offline:
5041  	if (ddev_offline == 1)
5042  		qeth_stop_channel(channel);
5043  out:
5044  	return;
5045  }
5046  
qeth_read_ccw_conf_data(struct qeth_card * card)5047  static void qeth_read_ccw_conf_data(struct qeth_card *card)
5048  {
5049  	struct qeth_card_info *info = &card->info;
5050  	struct ccw_device *cdev = CARD_DDEV(card);
5051  	struct ccw_dev_id dev_id;
5052  
5053  	QETH_CARD_TEXT(card, 2, "ccwconfd");
5054  	ccw_device_get_id(cdev, &dev_id);
5055  
5056  	info->ddev_devno = dev_id.devno;
5057  	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5058  			  !ccw_device_get_iid(cdev, &info->iid) &&
5059  			  !ccw_device_get_chid(cdev, 0, &info->chid);
5060  	info->ssid = dev_id.ssid;
5061  
5062  	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5063  		 info->chid, info->chpid);
5064  
5065  	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5066  	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5067  	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5068  	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5069  	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5070  	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5071  	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5072  }
5073  
qeth_qdio_establish(struct qeth_card * card)5074  static int qeth_qdio_establish(struct qeth_card *card)
5075  {
5076  	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5077  	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5078  	struct qeth_qib_parms *qib_parms = NULL;
5079  	struct qdio_initialize init_data;
5080  	unsigned int no_input_qs = 1;
5081  	unsigned int i;
5082  	int rc = 0;
5083  
5084  	QETH_CARD_TEXT(card, 2, "qdioest");
5085  
5086  	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5087  		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5088  		if (!qib_parms)
5089  			return -ENOMEM;
5090  
5091  		qeth_fill_qib_parms(card, qib_parms);
5092  	}
5093  
5094  	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5095  	if (card->options.cq == QETH_CQ_ENABLED) {
5096  		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5097  		no_input_qs++;
5098  	}
5099  
5100  	for (i = 0; i < card->qdio.no_out_queues; i++)
5101  		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5102  
5103  	memset(&init_data, 0, sizeof(struct qdio_initialize));
5104  	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5105  							  QDIO_QETH_QFMT;
5106  	init_data.qib_param_field_format = 0;
5107  	init_data.qib_param_field	 = (void *)qib_parms;
5108  	init_data.no_input_qs		 = no_input_qs;
5109  	init_data.no_output_qs           = card->qdio.no_out_queues;
5110  	init_data.input_handler		 = qeth_qdio_input_handler;
5111  	init_data.output_handler	 = qeth_qdio_output_handler;
5112  	init_data.irq_poll		 = qeth_qdio_poll;
5113  	init_data.int_parm               = (unsigned long) card;
5114  	init_data.input_sbal_addr_array  = in_sbal_ptrs;
5115  	init_data.output_sbal_addr_array = out_sbal_ptrs;
5116  
5117  	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5118  		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5119  		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5120  				   init_data.no_output_qs);
5121  		if (rc) {
5122  			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5123  			goto out;
5124  		}
5125  		rc = qdio_establish(CARD_DDEV(card), &init_data);
5126  		if (rc) {
5127  			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5128  			qdio_free(CARD_DDEV(card));
5129  		}
5130  	}
5131  
5132  	switch (card->options.cq) {
5133  	case QETH_CQ_ENABLED:
5134  		dev_info(&card->gdev->dev, "Completion Queue support enabled");
5135  		break;
5136  	case QETH_CQ_DISABLED:
5137  		dev_info(&card->gdev->dev, "Completion Queue support disabled");
5138  		break;
5139  	default:
5140  		break;
5141  	}
5142  
5143  out:
5144  	kfree(qib_parms);
5145  	return rc;
5146  }
5147  
qeth_core_free_card(struct qeth_card * card)5148  static void qeth_core_free_card(struct qeth_card *card)
5149  {
5150  	QETH_CARD_TEXT(card, 2, "freecrd");
5151  
5152  	unregister_service_level(&card->qeth_service_level);
5153  	debugfs_remove_recursive(card->debugfs);
5154  	qeth_put_cmd(card->read_cmd);
5155  	destroy_workqueue(card->event_wq);
5156  	dev_set_drvdata(&card->gdev->dev, NULL);
5157  	kfree(card);
5158  }
5159  
qeth_trace_features(struct qeth_card * card)5160  static void qeth_trace_features(struct qeth_card *card)
5161  {
5162  	QETH_CARD_TEXT(card, 2, "features");
5163  	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5164  	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5165  	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5166  	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5167  		      sizeof(card->info.diagass_support));
5168  }
5169  
5170  static struct ccw_device_id qeth_ids[] = {
5171  	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5172  					.driver_info = QETH_CARD_TYPE_OSD},
5173  	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5174  					.driver_info = QETH_CARD_TYPE_IQD},
5175  	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5176  					.driver_info = QETH_CARD_TYPE_OSM},
5177  #ifdef CONFIG_QETH_OSX
5178  	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5179  					.driver_info = QETH_CARD_TYPE_OSX},
5180  #endif
5181  	{},
5182  };
5183  MODULE_DEVICE_TABLE(ccw, qeth_ids);
5184  
5185  static struct ccw_driver qeth_ccw_driver = {
5186  	.driver = {
5187  		.owner = THIS_MODULE,
5188  		.name = "qeth",
5189  	},
5190  	.ids = qeth_ids,
5191  	.probe = ccwgroup_probe_ccwdev,
5192  	.remove = ccwgroup_remove_ccwdev,
5193  };
5194  
qeth_hardsetup_card(struct qeth_card * card,bool * carrier_ok)5195  static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5196  {
5197  	int retries = 3;
5198  	int rc;
5199  
5200  	QETH_CARD_TEXT(card, 2, "hrdsetup");
5201  	atomic_set(&card->force_alloc_skb, 0);
5202  	rc = qeth_update_from_chp_desc(card);
5203  	if (rc)
5204  		return rc;
5205  retry:
5206  	if (retries < 3)
5207  		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5208  				 CARD_DEVID(card));
5209  	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5210  	qeth_stop_channel(&card->data);
5211  	qeth_stop_channel(&card->write);
5212  	qeth_stop_channel(&card->read);
5213  	qdio_free(CARD_DDEV(card));
5214  
5215  	rc = qeth_start_channel(&card->read);
5216  	if (rc)
5217  		goto retriable;
5218  	rc = qeth_start_channel(&card->write);
5219  	if (rc)
5220  		goto retriable;
5221  	rc = qeth_start_channel(&card->data);
5222  	if (rc)
5223  		goto retriable;
5224  retriable:
5225  	if (rc == -ERESTARTSYS) {
5226  		QETH_CARD_TEXT(card, 2, "break1");
5227  		return rc;
5228  	} else if (rc) {
5229  		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5230  		if (--retries < 0)
5231  			goto out;
5232  		else
5233  			goto retry;
5234  	}
5235  
5236  	qeth_determine_capabilities(card);
5237  	qeth_read_ccw_conf_data(card);
5238  	qeth_idx_init(card);
5239  
5240  	rc = qeth_idx_activate_read_channel(card);
5241  	if (rc == -EINTR) {
5242  		QETH_CARD_TEXT(card, 2, "break2");
5243  		return rc;
5244  	} else if (rc) {
5245  		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5246  		if (--retries < 0)
5247  			goto out;
5248  		else
5249  			goto retry;
5250  	}
5251  
5252  	rc = qeth_idx_activate_write_channel(card);
5253  	if (rc == -EINTR) {
5254  		QETH_CARD_TEXT(card, 2, "break3");
5255  		return rc;
5256  	} else if (rc) {
5257  		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5258  		if (--retries < 0)
5259  			goto out;
5260  		else
5261  			goto retry;
5262  	}
5263  	card->read_or_write_problem = 0;
5264  	rc = qeth_mpc_initialize(card);
5265  	if (rc) {
5266  		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5267  		goto out;
5268  	}
5269  
5270  	rc = qeth_send_startlan(card);
5271  	if (rc) {
5272  		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5273  		if (rc == -ENETDOWN) {
5274  			dev_warn(&card->gdev->dev, "The LAN is offline\n");
5275  			*carrier_ok = false;
5276  		} else {
5277  			goto out;
5278  		}
5279  	} else {
5280  		*carrier_ok = true;
5281  	}
5282  
5283  	card->options.ipa4.supported = 0;
5284  	card->options.ipa6.supported = 0;
5285  	card->options.adp.supported = 0;
5286  	card->options.sbp.supported_funcs = 0;
5287  	card->info.diagass_support = 0;
5288  	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5289  	if (rc == -ENOMEM)
5290  		goto out;
5291  	if (qeth_is_supported(card, IPA_IPV6)) {
5292  		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5293  		if (rc == -ENOMEM)
5294  			goto out;
5295  	}
5296  	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5297  		rc = qeth_query_setadapterparms(card);
5298  		if (rc < 0) {
5299  			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5300  			goto out;
5301  		}
5302  	}
5303  	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5304  		rc = qeth_query_setdiagass(card);
5305  		if (rc)
5306  			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5307  	}
5308  
5309  	qeth_trace_features(card);
5310  
5311  	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5312  	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5313  		card->info.hwtrap = 0;
5314  
5315  	if (card->options.isolation != ISOLATION_MODE_NONE) {
5316  		rc = qeth_setadpparms_set_access_ctrl(card,
5317  						      card->options.isolation);
5318  		if (rc)
5319  			goto out;
5320  	}
5321  
5322  	qeth_init_link_info(card);
5323  
5324  	rc = qeth_init_qdio_queues(card);
5325  	if (rc) {
5326  		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5327  		goto out;
5328  	}
5329  
5330  	return 0;
5331  out:
5332  	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5333  		"an error on the device\n");
5334  	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5335  			 CARD_DEVID(card), rc);
5336  	return rc;
5337  }
5338  
qeth_set_online(struct qeth_card * card,const struct qeth_discipline * disc)5339  static int qeth_set_online(struct qeth_card *card,
5340  			   const struct qeth_discipline *disc)
5341  {
5342  	bool carrier_ok;
5343  	int rc;
5344  
5345  	mutex_lock(&card->conf_mutex);
5346  	QETH_CARD_TEXT(card, 2, "setonlin");
5347  
5348  	rc = qeth_hardsetup_card(card, &carrier_ok);
5349  	if (rc) {
5350  		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5351  		rc = -ENODEV;
5352  		goto err_hardsetup;
5353  	}
5354  
5355  	qeth_print_status_message(card);
5356  
5357  	if (card->dev->reg_state != NETREG_REGISTERED)
5358  		/* no need for locking / error handling at this early stage: */
5359  		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5360  
5361  	rc = disc->set_online(card, carrier_ok);
5362  	if (rc)
5363  		goto err_online;
5364  
5365  	/* let user_space know that device is online */
5366  	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5367  
5368  	mutex_unlock(&card->conf_mutex);
5369  	return 0;
5370  
5371  err_online:
5372  err_hardsetup:
5373  	qeth_qdio_clear_card(card, 0);
5374  	qeth_clear_working_pool_list(card);
5375  	qeth_flush_local_addrs(card);
5376  
5377  	qeth_stop_channel(&card->data);
5378  	qeth_stop_channel(&card->write);
5379  	qeth_stop_channel(&card->read);
5380  	qdio_free(CARD_DDEV(card));
5381  
5382  	mutex_unlock(&card->conf_mutex);
5383  	return rc;
5384  }
5385  
qeth_set_offline(struct qeth_card * card,const struct qeth_discipline * disc,bool resetting)5386  int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5387  		     bool resetting)
5388  {
5389  	int rc, rc2, rc3;
5390  
5391  	mutex_lock(&card->conf_mutex);
5392  	QETH_CARD_TEXT(card, 3, "setoffl");
5393  
5394  	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5395  		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5396  		card->info.hwtrap = 1;
5397  	}
5398  
5399  	/* cancel any stalled cmd that might block the rtnl: */
5400  	qeth_clear_ipacmd_list(card);
5401  
5402  	rtnl_lock();
5403  	netif_device_detach(card->dev);
5404  	netif_carrier_off(card->dev);
5405  	rtnl_unlock();
5406  
5407  	cancel_work_sync(&card->rx_mode_work);
5408  
5409  	disc->set_offline(card);
5410  
5411  	qeth_qdio_clear_card(card, 0);
5412  	qeth_drain_output_queues(card);
5413  	qeth_clear_working_pool_list(card);
5414  	qeth_flush_local_addrs(card);
5415  	card->info.promisc_mode = 0;
5416  	qeth_default_link_info(card);
5417  
5418  	rc  = qeth_stop_channel(&card->data);
5419  	rc2 = qeth_stop_channel(&card->write);
5420  	rc3 = qeth_stop_channel(&card->read);
5421  	if (!rc)
5422  		rc = (rc2) ? rc2 : rc3;
5423  	if (rc)
5424  		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5425  	qdio_free(CARD_DDEV(card));
5426  
5427  	/* let user_space know that device is offline */
5428  	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5429  
5430  	mutex_unlock(&card->conf_mutex);
5431  	return 0;
5432  }
5433  EXPORT_SYMBOL_GPL(qeth_set_offline);
5434  
qeth_do_reset(void * data)5435  static int qeth_do_reset(void *data)
5436  {
5437  	const struct qeth_discipline *disc;
5438  	struct qeth_card *card = data;
5439  	int rc;
5440  
5441  	/* Lock-free, other users will block until we are done. */
5442  	disc = card->discipline;
5443  
5444  	QETH_CARD_TEXT(card, 2, "recover1");
5445  	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5446  		return 0;
5447  	QETH_CARD_TEXT(card, 2, "recover2");
5448  	dev_warn(&card->gdev->dev,
5449  		 "A recovery process has been started for the device\n");
5450  
5451  	qeth_set_offline(card, disc, true);
5452  	rc = qeth_set_online(card, disc);
5453  	if (!rc) {
5454  		dev_info(&card->gdev->dev,
5455  			 "Device successfully recovered!\n");
5456  	} else {
5457  		qeth_set_offline(card, disc, true);
5458  		ccwgroup_set_offline(card->gdev, false);
5459  		dev_warn(&card->gdev->dev,
5460  			 "The qeth device driver failed to recover an error on the device\n");
5461  	}
5462  	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5463  	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5464  	return 0;
5465  }
5466  
5467  #if IS_ENABLED(CONFIG_QETH_L3)
qeth_l3_rebuild_skb(struct qeth_card * card,struct sk_buff * skb,struct qeth_hdr * hdr)5468  static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5469  				struct qeth_hdr *hdr)
5470  {
5471  	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5472  	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5473  	struct net_device *dev = skb->dev;
5474  
5475  	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5476  		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5477  				"FAKELL", skb->len);
5478  		return;
5479  	}
5480  
5481  	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5482  		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5483  							     ETH_P_IP;
5484  		unsigned char tg_addr[ETH_ALEN];
5485  
5486  		skb_reset_network_header(skb);
5487  		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5488  		case QETH_CAST_MULTICAST:
5489  			if (prot == ETH_P_IP)
5490  				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5491  			else
5492  				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5493  			QETH_CARD_STAT_INC(card, rx_multicast);
5494  			break;
5495  		case QETH_CAST_BROADCAST:
5496  			ether_addr_copy(tg_addr, dev->broadcast);
5497  			QETH_CARD_STAT_INC(card, rx_multicast);
5498  			break;
5499  		default:
5500  			if (card->options.sniffer)
5501  				skb->pkt_type = PACKET_OTHERHOST;
5502  			ether_addr_copy(tg_addr, dev->dev_addr);
5503  		}
5504  
5505  		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5506  			dev_hard_header(skb, dev, prot, tg_addr,
5507  					&l3_hdr->next_hop.rx.src_mac, skb->len);
5508  		else
5509  			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5510  					skb->len);
5511  	}
5512  
5513  	/* copy VLAN tag from hdr into skb */
5514  	if (!card->options.sniffer &&
5515  	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5516  				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5517  		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5518  				l3_hdr->vlan_id :
5519  				l3_hdr->next_hop.rx.vlan_id;
5520  
5521  		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5522  	}
5523  }
5524  #endif
5525  
qeth_receive_skb(struct qeth_card * card,struct sk_buff * skb,bool uses_frags,bool is_cso)5526  static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5527  			     bool uses_frags, bool is_cso)
5528  {
5529  	struct napi_struct *napi = &card->napi;
5530  
5531  	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5532  		skb->ip_summed = CHECKSUM_UNNECESSARY;
5533  		QETH_CARD_STAT_INC(card, rx_skb_csum);
5534  	} else {
5535  		skb->ip_summed = CHECKSUM_NONE;
5536  	}
5537  
5538  	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5539  	QETH_CARD_STAT_INC(card, rx_packets);
5540  	if (skb_is_nonlinear(skb)) {
5541  		QETH_CARD_STAT_INC(card, rx_sg_skbs);
5542  		QETH_CARD_STAT_ADD(card, rx_sg_frags,
5543  				   skb_shinfo(skb)->nr_frags);
5544  	}
5545  
5546  	if (uses_frags) {
5547  		napi_gro_frags(napi);
5548  	} else {
5549  		skb->protocol = eth_type_trans(skb, skb->dev);
5550  		napi_gro_receive(napi, skb);
5551  	}
5552  }
5553  
qeth_create_skb_frag(struct sk_buff * skb,char * data,int data_len)5554  static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5555  {
5556  	struct page *page = virt_to_page(data);
5557  	unsigned int next_frag;
5558  
5559  	next_frag = skb_shinfo(skb)->nr_frags;
5560  	get_page(page);
5561  	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5562  			data_len);
5563  }
5564  
qeth_is_last_sbale(struct qdio_buffer_element * sbale)5565  static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5566  {
5567  	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5568  }
5569  
qeth_extract_skb(struct qeth_card * card,struct qeth_qdio_buffer * qethbuffer,u8 * element_no,int * __offset)5570  static int qeth_extract_skb(struct qeth_card *card,
5571  			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5572  			    int *__offset)
5573  {
5574  	struct qeth_priv *priv = netdev_priv(card->dev);
5575  	struct qdio_buffer *buffer = qethbuffer->buffer;
5576  	struct napi_struct *napi = &card->napi;
5577  	struct qdio_buffer_element *element;
5578  	unsigned int linear_len = 0;
5579  	bool uses_frags = false;
5580  	int offset = *__offset;
5581  	bool use_rx_sg = false;
5582  	unsigned int headroom;
5583  	struct qeth_hdr *hdr;
5584  	struct sk_buff *skb;
5585  	int skb_len = 0;
5586  	bool is_cso;
5587  
5588  	element = &buffer->element[*element_no];
5589  
5590  next_packet:
5591  	/* qeth_hdr must not cross element boundaries */
5592  	while (element->length < offset + sizeof(struct qeth_hdr)) {
5593  		if (qeth_is_last_sbale(element))
5594  			return -ENODATA;
5595  		element++;
5596  		offset = 0;
5597  	}
5598  
5599  	hdr = phys_to_virt(element->addr) + offset;
5600  	offset += sizeof(*hdr);
5601  	skb = NULL;
5602  
5603  	switch (hdr->hdr.l2.id) {
5604  	case QETH_HEADER_TYPE_LAYER2:
5605  		skb_len = hdr->hdr.l2.pkt_length;
5606  		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5607  
5608  		linear_len = ETH_HLEN;
5609  		headroom = 0;
5610  		break;
5611  	case QETH_HEADER_TYPE_LAYER3:
5612  		skb_len = hdr->hdr.l3.length;
5613  		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5614  
5615  		if (!IS_LAYER3(card)) {
5616  			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5617  			goto walk_packet;
5618  		}
5619  
5620  		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5621  			linear_len = ETH_HLEN;
5622  			headroom = 0;
5623  			break;
5624  		}
5625  
5626  		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5627  			linear_len = sizeof(struct ipv6hdr);
5628  		else
5629  			linear_len = sizeof(struct iphdr);
5630  		headroom = ETH_HLEN;
5631  		break;
5632  	default:
5633  		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5634  			QETH_CARD_STAT_INC(card, rx_frame_errors);
5635  		else
5636  			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5637  
5638  		/* Can't determine packet length, drop the whole buffer. */
5639  		return -EPROTONOSUPPORT;
5640  	}
5641  
5642  	if (skb_len < linear_len) {
5643  		QETH_CARD_STAT_INC(card, rx_dropped_runt);
5644  		goto walk_packet;
5645  	}
5646  
5647  	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5648  		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5649  		     !atomic_read(&card->force_alloc_skb));
5650  
5651  	if (use_rx_sg) {
5652  		/* QETH_CQ_ENABLED only: */
5653  		if (qethbuffer->rx_skb &&
5654  		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5655  			skb = qethbuffer->rx_skb;
5656  			qethbuffer->rx_skb = NULL;
5657  			goto use_skb;
5658  		}
5659  
5660  		skb = napi_get_frags(napi);
5661  		if (!skb) {
5662  			/* -ENOMEM, no point in falling back further. */
5663  			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5664  			goto walk_packet;
5665  		}
5666  
5667  		if (skb_tailroom(skb) >= linear_len + headroom) {
5668  			uses_frags = true;
5669  			goto use_skb;
5670  		}
5671  
5672  		netdev_info_once(card->dev,
5673  				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5674  				 linear_len + headroom, skb_tailroom(skb));
5675  		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
5676  	}
5677  
5678  	linear_len = skb_len;
5679  	skb = napi_alloc_skb(napi, linear_len + headroom);
5680  	if (!skb) {
5681  		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5682  		goto walk_packet;
5683  	}
5684  
5685  use_skb:
5686  	if (headroom)
5687  		skb_reserve(skb, headroom);
5688  walk_packet:
5689  	while (skb_len) {
5690  		int data_len = min(skb_len, (int)(element->length - offset));
5691  		char *data = phys_to_virt(element->addr) + offset;
5692  
5693  		skb_len -= data_len;
5694  		offset += data_len;
5695  
5696  		/* Extract data from current element: */
5697  		if (skb && data_len) {
5698  			if (linear_len) {
5699  				unsigned int copy_len;
5700  
5701  				copy_len = min_t(unsigned int, linear_len,
5702  						 data_len);
5703  
5704  				skb_put_data(skb, data, copy_len);
5705  				linear_len -= copy_len;
5706  				data_len -= copy_len;
5707  				data += copy_len;
5708  			}
5709  
5710  			if (data_len)
5711  				qeth_create_skb_frag(skb, data, data_len);
5712  		}
5713  
5714  		/* Step forward to next element: */
5715  		if (skb_len) {
5716  			if (qeth_is_last_sbale(element)) {
5717  				QETH_CARD_TEXT(card, 4, "unexeob");
5718  				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5719  				if (skb) {
5720  					if (uses_frags)
5721  						napi_free_frags(napi);
5722  					else
5723  						kfree_skb(skb);
5724  					QETH_CARD_STAT_INC(card,
5725  							   rx_length_errors);
5726  				}
5727  				return -EMSGSIZE;
5728  			}
5729  			element++;
5730  			offset = 0;
5731  		}
5732  	}
5733  
5734  	/* This packet was skipped, go get another one: */
5735  	if (!skb)
5736  		goto next_packet;
5737  
5738  	*element_no = element - &buffer->element[0];
5739  	*__offset = offset;
5740  
5741  #if IS_ENABLED(CONFIG_QETH_L3)
5742  	if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3)
5743  		qeth_l3_rebuild_skb(card, skb, hdr);
5744  #endif
5745  
5746  	qeth_receive_skb(card, skb, uses_frags, is_cso);
5747  	return 0;
5748  }
5749  
qeth_extract_skbs(struct qeth_card * card,int budget,struct qeth_qdio_buffer * buf,bool * done)5750  static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5751  				      struct qeth_qdio_buffer *buf, bool *done)
5752  {
5753  	unsigned int work_done = 0;
5754  
5755  	while (budget) {
5756  		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5757  				     &card->rx.e_offset)) {
5758  			*done = true;
5759  			break;
5760  		}
5761  
5762  		work_done++;
5763  		budget--;
5764  	}
5765  
5766  	return work_done;
5767  }
5768  
qeth_rx_poll(struct qeth_card * card,int budget)5769  static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5770  {
5771  	struct qeth_rx *ctx = &card->rx;
5772  	unsigned int work_done = 0;
5773  
5774  	while (budget > 0) {
5775  		struct qeth_qdio_buffer *buffer;
5776  		unsigned int skbs_done = 0;
5777  		bool done = false;
5778  
5779  		/* Fetch completed RX buffers: */
5780  		if (!card->rx.b_count) {
5781  			card->rx.qdio_err = 0;
5782  			card->rx.b_count =
5783  				qdio_inspect_input_queue(CARD_DDEV(card), 0,
5784  							 &card->rx.b_index,
5785  							 &card->rx.qdio_err);
5786  			if (card->rx.b_count <= 0) {
5787  				card->rx.b_count = 0;
5788  				break;
5789  			}
5790  		}
5791  
5792  		/* Process one completed RX buffer: */
5793  		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5794  		if (!(card->rx.qdio_err &&
5795  		      qeth_check_qdio_errors(card, buffer->buffer,
5796  					     card->rx.qdio_err, "qinerr")))
5797  			skbs_done = qeth_extract_skbs(card, budget, buffer,
5798  						      &done);
5799  		else
5800  			done = true;
5801  
5802  		work_done += skbs_done;
5803  		budget -= skbs_done;
5804  
5805  		if (done) {
5806  			QETH_CARD_STAT_INC(card, rx_bufs);
5807  			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5808  			buffer->pool_entry = NULL;
5809  			card->rx.b_count--;
5810  			ctx->bufs_refill++;
5811  			ctx->bufs_refill -= qeth_rx_refill_queue(card,
5812  								 ctx->bufs_refill);
5813  
5814  			/* Step forward to next buffer: */
5815  			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5816  			card->rx.buf_element = 0;
5817  			card->rx.e_offset = 0;
5818  		}
5819  	}
5820  
5821  	return work_done;
5822  }
5823  
qeth_cq_poll(struct qeth_card * card)5824  static void qeth_cq_poll(struct qeth_card *card)
5825  {
5826  	unsigned int work_done = 0;
5827  
5828  	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5829  		unsigned int start, error;
5830  		int completed;
5831  
5832  		completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
5833  						     &error);
5834  		if (completed <= 0)
5835  			return;
5836  
5837  		qeth_qdio_cq_handler(card, error, 1, start, completed);
5838  		work_done += completed;
5839  	}
5840  }
5841  
qeth_poll(struct napi_struct * napi,int budget)5842  int qeth_poll(struct napi_struct *napi, int budget)
5843  {
5844  	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5845  	unsigned int work_done;
5846  
5847  	work_done = qeth_rx_poll(card, budget);
5848  
5849  	if (qeth_use_tx_irqs(card)) {
5850  		struct qeth_qdio_out_q *queue;
5851  		unsigned int i;
5852  
5853  		qeth_for_each_output_queue(card, queue, i) {
5854  			if (!qeth_out_queue_is_empty(queue))
5855  				napi_schedule(&queue->napi);
5856  		}
5857  	}
5858  
5859  	if (card->options.cq == QETH_CQ_ENABLED)
5860  		qeth_cq_poll(card);
5861  
5862  	if (budget) {
5863  		struct qeth_rx *ctx = &card->rx;
5864  
5865  		/* Process any substantial refill backlog: */
5866  		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5867  
5868  		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5869  		if (work_done >= budget)
5870  			return work_done;
5871  	}
5872  
5873  	if (napi_complete_done(napi, work_done) &&
5874  	    qdio_start_irq(CARD_DDEV(card)))
5875  		napi_schedule(napi);
5876  
5877  	return work_done;
5878  }
5879  EXPORT_SYMBOL_GPL(qeth_poll);
5880  
qeth_iqd_tx_complete(struct qeth_qdio_out_q * queue,unsigned int bidx,unsigned int qdio_error,int budget)5881  static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5882  				 unsigned int bidx, unsigned int qdio_error,
5883  				 int budget)
5884  {
5885  	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5886  	u8 sflags = buffer->buffer->element[15].sflags;
5887  	struct qeth_card *card = queue->card;
5888  	bool error = !!qdio_error;
5889  
5890  	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5891  		struct qaob *aob = buffer->aob;
5892  		struct qeth_qaob_priv1 *priv;
5893  		enum iucv_tx_notify notify;
5894  
5895  		if (!aob) {
5896  			netdev_WARN_ONCE(card->dev,
5897  					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5898  					 bidx, queue->queue_no);
5899  			qeth_schedule_recovery(card);
5900  			return;
5901  		}
5902  
5903  		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5904  
5905  		priv = (struct qeth_qaob_priv1 *)&aob->user1;
5906  		/* QAOB hasn't completed yet: */
5907  		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5908  			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5909  
5910  			/* Prepare the queue slot for immediate re-use: */
5911  			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5912  			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
5913  				QETH_CARD_TEXT(card, 2, "outofbuf");
5914  				qeth_schedule_recovery(card);
5915  			}
5916  
5917  			list_add(&buffer->list_entry, &queue->pending_bufs);
5918  			/* Skip clearing the buffer: */
5919  			return;
5920  		}
5921  
5922  		/* QAOB already completed: */
5923  		notify = qeth_compute_cq_notification(aob->aorc, 0);
5924  		qeth_notify_skbs(queue, buffer, notify);
5925  		error = !!aob->aorc;
5926  		memset(aob, 0, sizeof(*aob));
5927  	} else if (card->options.cq == QETH_CQ_ENABLED) {
5928  		qeth_notify_skbs(queue, buffer,
5929  				 qeth_compute_cq_notification(sflags, 0));
5930  	}
5931  
5932  	qeth_clear_output_buffer(queue, buffer, error, budget);
5933  }
5934  
qeth_tx_poll(struct napi_struct * napi,int budget)5935  static int qeth_tx_poll(struct napi_struct *napi, int budget)
5936  {
5937  	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5938  	unsigned int queue_no = queue->queue_no;
5939  	struct qeth_card *card = queue->card;
5940  	struct net_device *dev = card->dev;
5941  	unsigned int work_done = 0;
5942  	struct netdev_queue *txq;
5943  
5944  	if (IS_IQD(card))
5945  		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5946  	else
5947  		txq = netdev_get_tx_queue(dev, queue_no);
5948  
5949  	while (1) {
5950  		unsigned int start, error, i;
5951  		unsigned int packets = 0;
5952  		unsigned int bytes = 0;
5953  		int completed;
5954  
5955  		qeth_tx_complete_pending_bufs(card, queue, false, budget);
5956  
5957  		if (qeth_out_queue_is_empty(queue)) {
5958  			napi_complete(napi);
5959  			return 0;
5960  		}
5961  
5962  		/* Give the CPU a breather: */
5963  		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5964  			QETH_TXQ_STAT_INC(queue, completion_yield);
5965  			if (napi_complete_done(napi, 0))
5966  				napi_schedule(napi);
5967  			return 0;
5968  		}
5969  
5970  		completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
5971  						      &start, &error);
5972  		if (completed <= 0) {
5973  			/* Ensure we see TX completion for pending work: */
5974  			if (napi_complete_done(napi, 0) &&
5975  			    !atomic_read(&queue->set_pci_flags_count))
5976  				qeth_tx_arm_timer(queue, queue->rescan_usecs);
5977  			return 0;
5978  		}
5979  
5980  		for (i = start; i < start + completed; i++) {
5981  			struct qeth_qdio_out_buffer *buffer;
5982  			unsigned int bidx = QDIO_BUFNR(i);
5983  
5984  			buffer = queue->bufs[bidx];
5985  			packets += buffer->frames;
5986  			bytes += buffer->bytes;
5987  
5988  			qeth_handle_send_error(card, buffer, error);
5989  			if (IS_IQD(card))
5990  				qeth_iqd_tx_complete(queue, bidx, error, budget);
5991  			else
5992  				qeth_clear_output_buffer(queue, buffer, error,
5993  							 budget);
5994  		}
5995  
5996  		atomic_sub(completed, &queue->used_buffers);
5997  		work_done += completed;
5998  		if (IS_IQD(card))
5999  			netdev_tx_completed_queue(txq, packets, bytes);
6000  		else
6001  			qeth_check_outbound_queue(queue);
6002  
6003  		/* xmit may have observed the full-condition, but not yet
6004  		 * stopped the txq. In which case the code below won't trigger.
6005  		 * So before returning, xmit will re-check the txq's fill level
6006  		 * and wake it up if needed.
6007  		 */
6008  		if (netif_tx_queue_stopped(txq) &&
6009  		    !qeth_out_queue_is_full(queue))
6010  			netif_tx_wake_queue(txq);
6011  	}
6012  }
6013  
qeth_setassparms_inspect_rc(struct qeth_ipa_cmd * cmd)6014  static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
6015  {
6016  	if (!cmd->hdr.return_code)
6017  		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6018  	return cmd->hdr.return_code;
6019  }
6020  
qeth_setassparms_get_caps_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)6021  static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
6022  					struct qeth_reply *reply,
6023  					unsigned long data)
6024  {
6025  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6026  	struct qeth_ipa_caps *caps = reply->param;
6027  
6028  	if (qeth_setassparms_inspect_rc(cmd))
6029  		return -EIO;
6030  
6031  	caps->supported = cmd->data.setassparms.data.caps.supported;
6032  	caps->enabled = cmd->data.setassparms.data.caps.enabled;
6033  	return 0;
6034  }
6035  
qeth_setassparms_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)6036  int qeth_setassparms_cb(struct qeth_card *card,
6037  			struct qeth_reply *reply, unsigned long data)
6038  {
6039  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6040  
6041  	QETH_CARD_TEXT(card, 4, "defadpcb");
6042  
6043  	if (cmd->hdr.return_code)
6044  		return -EIO;
6045  
6046  	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6047  	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6048  		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6049  	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6050  		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6051  	return 0;
6052  }
6053  EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6054  
qeth_get_setassparms_cmd(struct qeth_card * card,enum qeth_ipa_funcs ipa_func,u16 cmd_code,unsigned int data_length,enum qeth_prot_versions prot)6055  struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6056  						 enum qeth_ipa_funcs ipa_func,
6057  						 u16 cmd_code,
6058  						 unsigned int data_length,
6059  						 enum qeth_prot_versions prot)
6060  {
6061  	struct qeth_ipacmd_setassparms *setassparms;
6062  	struct qeth_ipacmd_setassparms_hdr *hdr;
6063  	struct qeth_cmd_buffer *iob;
6064  
6065  	QETH_CARD_TEXT(card, 4, "getasscm");
6066  	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6067  				 data_length +
6068  				 offsetof(struct qeth_ipacmd_setassparms,
6069  					  data));
6070  	if (!iob)
6071  		return NULL;
6072  
6073  	setassparms = &__ipa_cmd(iob)->data.setassparms;
6074  	setassparms->assist_no = ipa_func;
6075  
6076  	hdr = &setassparms->hdr;
6077  	hdr->length = sizeof(*hdr) + data_length;
6078  	hdr->command_code = cmd_code;
6079  	return iob;
6080  }
6081  EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6082  
qeth_send_simple_setassparms_prot(struct qeth_card * card,enum qeth_ipa_funcs ipa_func,u16 cmd_code,u32 * data,enum qeth_prot_versions prot)6083  int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6084  				      enum qeth_ipa_funcs ipa_func,
6085  				      u16 cmd_code, u32 *data,
6086  				      enum qeth_prot_versions prot)
6087  {
6088  	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6089  	struct qeth_cmd_buffer *iob;
6090  
6091  	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6092  	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6093  	if (!iob)
6094  		return -ENOMEM;
6095  
6096  	if (data)
6097  		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6098  	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6099  }
6100  EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6101  
qeth_unregister_dbf_views(void)6102  static void qeth_unregister_dbf_views(void)
6103  {
6104  	int x;
6105  
6106  	for (x = 0; x < QETH_DBF_INFOS; x++) {
6107  		debug_unregister(qeth_dbf[x].id);
6108  		qeth_dbf[x].id = NULL;
6109  	}
6110  }
6111  
qeth_dbf_longtext(debug_info_t * id,int level,char * fmt,...)6112  void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6113  {
6114  	char dbf_txt_buf[32];
6115  	va_list args;
6116  
6117  	if (!debug_level_enabled(id, level))
6118  		return;
6119  	va_start(args, fmt);
6120  	vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6121  	va_end(args);
6122  	debug_text_event(id, level, dbf_txt_buf);
6123  }
6124  EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6125  
qeth_register_dbf_views(void)6126  static int qeth_register_dbf_views(void)
6127  {
6128  	int ret;
6129  	int x;
6130  
6131  	for (x = 0; x < QETH_DBF_INFOS; x++) {
6132  		/* register the areas */
6133  		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6134  						qeth_dbf[x].pages,
6135  						qeth_dbf[x].areas,
6136  						qeth_dbf[x].len);
6137  		if (qeth_dbf[x].id == NULL) {
6138  			qeth_unregister_dbf_views();
6139  			return -ENOMEM;
6140  		}
6141  
6142  		/* register a view */
6143  		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6144  		if (ret) {
6145  			qeth_unregister_dbf_views();
6146  			return ret;
6147  		}
6148  
6149  		/* set a passing level */
6150  		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6151  	}
6152  
6153  	return 0;
6154  }
6155  
6156  static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */
6157  
qeth_setup_discipline(struct qeth_card * card,enum qeth_discipline_id discipline)6158  int qeth_setup_discipline(struct qeth_card *card,
6159  			  enum qeth_discipline_id discipline)
6160  {
6161  	int rc;
6162  
6163  	mutex_lock(&qeth_mod_mutex);
6164  	switch (discipline) {
6165  	case QETH_DISCIPLINE_LAYER3:
6166  		card->discipline = try_then_request_module(
6167  			symbol_get(qeth_l3_discipline), "qeth_l3");
6168  		break;
6169  	case QETH_DISCIPLINE_LAYER2:
6170  		card->discipline = try_then_request_module(
6171  			symbol_get(qeth_l2_discipline), "qeth_l2");
6172  		break;
6173  	default:
6174  		break;
6175  	}
6176  	mutex_unlock(&qeth_mod_mutex);
6177  
6178  	if (!card->discipline) {
6179  		dev_err(&card->gdev->dev, "There is no kernel module to "
6180  			"support discipline %d\n", discipline);
6181  		return -EINVAL;
6182  	}
6183  
6184  	rc = card->discipline->setup(card->gdev);
6185  	if (rc) {
6186  		if (discipline == QETH_DISCIPLINE_LAYER2)
6187  			symbol_put(qeth_l2_discipline);
6188  		else
6189  			symbol_put(qeth_l3_discipline);
6190  		card->discipline = NULL;
6191  
6192  		return rc;
6193  	}
6194  
6195  	card->options.layer = discipline;
6196  	return 0;
6197  }
6198  
qeth_remove_discipline(struct qeth_card * card)6199  void qeth_remove_discipline(struct qeth_card *card)
6200  {
6201  	card->discipline->remove(card->gdev);
6202  
6203  	if (IS_LAYER2(card))
6204  		symbol_put(qeth_l2_discipline);
6205  	else
6206  		symbol_put(qeth_l3_discipline);
6207  	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6208  	card->discipline = NULL;
6209  }
6210  
6211  static const struct device_type qeth_generic_devtype = {
6212  	.name = "qeth_generic",
6213  };
6214  
6215  #define DBF_NAME_LEN	20
6216  
6217  struct qeth_dbf_entry {
6218  	char dbf_name[DBF_NAME_LEN];
6219  	debug_info_t *dbf_info;
6220  	struct list_head dbf_list;
6221  };
6222  
6223  static LIST_HEAD(qeth_dbf_list);
6224  static DEFINE_MUTEX(qeth_dbf_list_mutex);
6225  
qeth_get_dbf_entry(char * name)6226  static debug_info_t *qeth_get_dbf_entry(char *name)
6227  {
6228  	struct qeth_dbf_entry *entry;
6229  	debug_info_t *rc = NULL;
6230  
6231  	mutex_lock(&qeth_dbf_list_mutex);
6232  	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6233  		if (strcmp(entry->dbf_name, name) == 0) {
6234  			rc = entry->dbf_info;
6235  			break;
6236  		}
6237  	}
6238  	mutex_unlock(&qeth_dbf_list_mutex);
6239  	return rc;
6240  }
6241  
qeth_add_dbf_entry(struct qeth_card * card,char * name)6242  static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6243  {
6244  	struct qeth_dbf_entry *new_entry;
6245  
6246  	card->debug = debug_register(name, 2, 1, 8);
6247  	if (!card->debug) {
6248  		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6249  		goto err;
6250  	}
6251  	if (debug_register_view(card->debug, &debug_hex_ascii_view))
6252  		goto err_dbg;
6253  	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6254  	if (!new_entry)
6255  		goto err_dbg;
6256  	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6257  	new_entry->dbf_info = card->debug;
6258  	mutex_lock(&qeth_dbf_list_mutex);
6259  	list_add(&new_entry->dbf_list, &qeth_dbf_list);
6260  	mutex_unlock(&qeth_dbf_list_mutex);
6261  
6262  	return 0;
6263  
6264  err_dbg:
6265  	debug_unregister(card->debug);
6266  err:
6267  	return -ENOMEM;
6268  }
6269  
qeth_clear_dbf_list(void)6270  static void qeth_clear_dbf_list(void)
6271  {
6272  	struct qeth_dbf_entry *entry, *tmp;
6273  
6274  	mutex_lock(&qeth_dbf_list_mutex);
6275  	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6276  		list_del(&entry->dbf_list);
6277  		debug_unregister(entry->dbf_info);
6278  		kfree(entry);
6279  	}
6280  	mutex_unlock(&qeth_dbf_list_mutex);
6281  }
6282  
qeth_alloc_netdev(struct qeth_card * card)6283  static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6284  {
6285  	struct net_device *dev;
6286  	struct qeth_priv *priv;
6287  
6288  	switch (card->info.type) {
6289  	case QETH_CARD_TYPE_IQD:
6290  		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6291  				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6292  		break;
6293  	case QETH_CARD_TYPE_OSM:
6294  		dev = alloc_etherdev(sizeof(*priv));
6295  		break;
6296  	default:
6297  		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6298  	}
6299  
6300  	if (!dev)
6301  		return NULL;
6302  
6303  	priv = netdev_priv(dev);
6304  	priv->rx_copybreak = QETH_RX_COPYBREAK;
6305  	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6306  
6307  	dev->ml_priv = card;
6308  	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6309  	dev->min_mtu = 576;
6310  	 /* initialized when device first goes online: */
6311  	dev->max_mtu = 0;
6312  	dev->mtu = 0;
6313  	SET_NETDEV_DEV(dev, &card->gdev->dev);
6314  	netif_carrier_off(dev);
6315  
6316  	dev->ethtool_ops = &qeth_ethtool_ops;
6317  	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6318  	dev->hw_features |= NETIF_F_SG;
6319  	dev->vlan_features |= NETIF_F_SG;
6320  	if (IS_IQD(card))
6321  		dev->features |= NETIF_F_SG;
6322  
6323  	return dev;
6324  }
6325  
qeth_clone_netdev(struct net_device * orig)6326  struct net_device *qeth_clone_netdev(struct net_device *orig)
6327  {
6328  	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6329  
6330  	if (!clone)
6331  		return NULL;
6332  
6333  	clone->dev_port = orig->dev_port;
6334  	return clone;
6335  }
6336  
qeth_core_probe_device(struct ccwgroup_device * gdev)6337  static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6338  {
6339  	struct qeth_card *card;
6340  	struct device *dev;
6341  	int rc;
6342  	enum qeth_discipline_id enforced_disc;
6343  	char dbf_name[DBF_NAME_LEN];
6344  
6345  	QETH_DBF_TEXT(SETUP, 2, "probedev");
6346  
6347  	dev = &gdev->dev;
6348  	if (!get_device(dev))
6349  		return -ENODEV;
6350  
6351  	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6352  
6353  	card = qeth_alloc_card(gdev);
6354  	if (!card) {
6355  		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6356  		rc = -ENOMEM;
6357  		goto err_dev;
6358  	}
6359  
6360  	scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6361  		  dev_name(&gdev->dev));
6362  	card->debug = qeth_get_dbf_entry(dbf_name);
6363  	if (!card->debug) {
6364  		rc = qeth_add_dbf_entry(card, dbf_name);
6365  		if (rc)
6366  			goto err_card;
6367  	}
6368  
6369  	qeth_setup_card(card);
6370  	card->dev = qeth_alloc_netdev(card);
6371  	if (!card->dev) {
6372  		rc = -ENOMEM;
6373  		goto err_card;
6374  	}
6375  
6376  	qeth_determine_capabilities(card);
6377  	qeth_set_blkt_defaults(card);
6378  
6379  	card->qdio.in_q = qeth_alloc_qdio_queue();
6380  	if (!card->qdio.in_q) {
6381  		rc = -ENOMEM;
6382  		goto err_rx_queue;
6383  	}
6384  
6385  	card->qdio.no_out_queues = card->dev->num_tx_queues;
6386  	rc = qeth_update_from_chp_desc(card);
6387  	if (rc)
6388  		goto err_chp_desc;
6389  
6390  	gdev->dev.groups = qeth_dev_groups;
6391  
6392  	enforced_disc = qeth_enforce_discipline(card);
6393  	switch (enforced_disc) {
6394  	case QETH_DISCIPLINE_UNDETERMINED:
6395  		gdev->dev.type = &qeth_generic_devtype;
6396  		break;
6397  	default:
6398  		card->info.layer_enforced = true;
6399  		/* It's so early that we don't need the discipline_mutex yet. */
6400  		rc = qeth_setup_discipline(card, enforced_disc);
6401  		if (rc)
6402  			goto err_setup_disc;
6403  
6404  		break;
6405  	}
6406  
6407  	return 0;
6408  
6409  err_setup_disc:
6410  err_chp_desc:
6411  	qeth_free_qdio_queue(card->qdio.in_q);
6412  err_rx_queue:
6413  	free_netdev(card->dev);
6414  err_card:
6415  	qeth_core_free_card(card);
6416  err_dev:
6417  	put_device(dev);
6418  	return rc;
6419  }
6420  
qeth_core_remove_device(struct ccwgroup_device * gdev)6421  static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6422  {
6423  	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6424  
6425  	QETH_CARD_TEXT(card, 2, "removedv");
6426  
6427  	mutex_lock(&card->discipline_mutex);
6428  	if (card->discipline)
6429  		qeth_remove_discipline(card);
6430  	mutex_unlock(&card->discipline_mutex);
6431  
6432  	qeth_free_qdio_queues(card);
6433  
6434  	qeth_free_qdio_queue(card->qdio.in_q);
6435  	free_netdev(card->dev);
6436  	qeth_core_free_card(card);
6437  	put_device(&gdev->dev);
6438  }
6439  
qeth_core_set_online(struct ccwgroup_device * gdev)6440  static int qeth_core_set_online(struct ccwgroup_device *gdev)
6441  {
6442  	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6443  	int rc = 0;
6444  	enum qeth_discipline_id def_discipline;
6445  
6446  	mutex_lock(&card->discipline_mutex);
6447  	if (!card->discipline) {
6448  		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6449  						QETH_DISCIPLINE_LAYER2;
6450  		rc = qeth_setup_discipline(card, def_discipline);
6451  		if (rc)
6452  			goto err;
6453  	}
6454  
6455  	rc = qeth_set_online(card, card->discipline);
6456  
6457  err:
6458  	mutex_unlock(&card->discipline_mutex);
6459  	return rc;
6460  }
6461  
qeth_core_set_offline(struct ccwgroup_device * gdev)6462  static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6463  {
6464  	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6465  	int rc;
6466  
6467  	mutex_lock(&card->discipline_mutex);
6468  	rc = qeth_set_offline(card, card->discipline, false);
6469  	mutex_unlock(&card->discipline_mutex);
6470  
6471  	return rc;
6472  }
6473  
qeth_core_shutdown(struct ccwgroup_device * gdev)6474  static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6475  {
6476  	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6477  
6478  	qeth_set_allowed_threads(card, 0, 1);
6479  	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6480  		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6481  	qeth_qdio_clear_card(card, 0);
6482  	qeth_drain_output_queues(card);
6483  	qdio_free(CARD_DDEV(card));
6484  }
6485  
group_store(struct device_driver * ddrv,const char * buf,size_t count)6486  static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6487  			   size_t count)
6488  {
6489  	int err;
6490  
6491  	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6492  				  buf);
6493  
6494  	return err ? err : count;
6495  }
6496  static DRIVER_ATTR_WO(group);
6497  
6498  static struct attribute *qeth_drv_attrs[] = {
6499  	&driver_attr_group.attr,
6500  	NULL,
6501  };
6502  static struct attribute_group qeth_drv_attr_group = {
6503  	.attrs = qeth_drv_attrs,
6504  };
6505  static const struct attribute_group *qeth_drv_attr_groups[] = {
6506  	&qeth_drv_attr_group,
6507  	NULL,
6508  };
6509  
6510  static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6511  	.driver = {
6512  		.groups = qeth_drv_attr_groups,
6513  		.owner = THIS_MODULE,
6514  		.name = "qeth",
6515  	},
6516  	.ccw_driver = &qeth_ccw_driver,
6517  	.setup = qeth_core_probe_device,
6518  	.remove = qeth_core_remove_device,
6519  	.set_online = qeth_core_set_online,
6520  	.set_offline = qeth_core_set_offline,
6521  	.shutdown = qeth_core_shutdown,
6522  };
6523  
qeth_siocdevprivate(struct net_device * dev,struct ifreq * rq,void __user * data,int cmd)6524  int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6525  {
6526  	struct qeth_card *card = dev->ml_priv;
6527  	int rc = 0;
6528  
6529  	switch (cmd) {
6530  	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6531  		rc = qeth_snmp_command(card, data);
6532  		break;
6533  	case SIOC_QETH_GET_CARD_TYPE:
6534  		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6535  		    !IS_VM_NIC(card))
6536  			return 1;
6537  		return 0;
6538  	case SIOC_QETH_QUERY_OAT:
6539  		rc = qeth_query_oat_command(card, data);
6540  		break;
6541  	default:
6542  		rc = -EOPNOTSUPP;
6543  	}
6544  	if (rc)
6545  		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6546  	return rc;
6547  }
6548  EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
6549  
qeth_do_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6550  int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6551  {
6552  	struct qeth_card *card = dev->ml_priv;
6553  	struct mii_ioctl_data *mii_data;
6554  	int rc = 0;
6555  
6556  	switch (cmd) {
6557  	case SIOCGMIIPHY:
6558  		mii_data = if_mii(rq);
6559  		mii_data->phy_id = 0;
6560  		break;
6561  	case SIOCGMIIREG:
6562  		mii_data = if_mii(rq);
6563  		if (mii_data->phy_id != 0)
6564  			rc = -EINVAL;
6565  		else
6566  			mii_data->val_out = qeth_mdio_read(dev,
6567  				mii_data->phy_id, mii_data->reg_num);
6568  		break;
6569  	default:
6570  		return -EOPNOTSUPP;
6571  	}
6572  	if (rc)
6573  		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6574  	return rc;
6575  }
6576  EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6577  
qeth_start_csum_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)6578  static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6579  			      unsigned long data)
6580  {
6581  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6582  	u32 *features = reply->param;
6583  
6584  	if (qeth_setassparms_inspect_rc(cmd))
6585  		return -EIO;
6586  
6587  	*features = cmd->data.setassparms.data.flags_32bit;
6588  	return 0;
6589  }
6590  
qeth_set_csum_off(struct qeth_card * card,enum qeth_ipa_funcs cstype,enum qeth_prot_versions prot)6591  static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6592  			     enum qeth_prot_versions prot)
6593  {
6594  	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6595  						 NULL, prot);
6596  }
6597  
qeth_set_csum_on(struct qeth_card * card,enum qeth_ipa_funcs cstype,enum qeth_prot_versions prot,u8 * lp2lp)6598  static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6599  			    enum qeth_prot_versions prot, u8 *lp2lp)
6600  {
6601  	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6602  	struct qeth_cmd_buffer *iob;
6603  	struct qeth_ipa_caps caps;
6604  	u32 features;
6605  	int rc;
6606  
6607  	/* some L3 HW requires combined L3+L4 csum offload: */
6608  	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6609  	    cstype == IPA_OUTBOUND_CHECKSUM)
6610  		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6611  
6612  	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6613  				       prot);
6614  	if (!iob)
6615  		return -ENOMEM;
6616  
6617  	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6618  	if (rc)
6619  		return rc;
6620  
6621  	if ((required_features & features) != required_features) {
6622  		qeth_set_csum_off(card, cstype, prot);
6623  		return -EOPNOTSUPP;
6624  	}
6625  
6626  	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6627  				       SETASS_DATA_SIZEOF(flags_32bit),
6628  				       prot);
6629  	if (!iob) {
6630  		qeth_set_csum_off(card, cstype, prot);
6631  		return -ENOMEM;
6632  	}
6633  
6634  	if (features & QETH_IPA_CHECKSUM_LP2LP)
6635  		required_features |= QETH_IPA_CHECKSUM_LP2LP;
6636  	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6637  	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6638  	if (rc) {
6639  		qeth_set_csum_off(card, cstype, prot);
6640  		return rc;
6641  	}
6642  
6643  	if (!qeth_ipa_caps_supported(&caps, required_features) ||
6644  	    !qeth_ipa_caps_enabled(&caps, required_features)) {
6645  		qeth_set_csum_off(card, cstype, prot);
6646  		return -EOPNOTSUPP;
6647  	}
6648  
6649  	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6650  		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6651  
6652  	if (lp2lp)
6653  		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6654  
6655  	return 0;
6656  }
6657  
qeth_set_ipa_csum(struct qeth_card * card,bool on,int cstype,enum qeth_prot_versions prot,u8 * lp2lp)6658  static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6659  			     enum qeth_prot_versions prot, u8 *lp2lp)
6660  {
6661  	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6662  		    qeth_set_csum_off(card, cstype, prot);
6663  }
6664  
qeth_start_tso_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)6665  static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6666  			     unsigned long data)
6667  {
6668  	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6669  	struct qeth_tso_start_data *tso_data = reply->param;
6670  
6671  	if (qeth_setassparms_inspect_rc(cmd))
6672  		return -EIO;
6673  
6674  	tso_data->mss = cmd->data.setassparms.data.tso.mss;
6675  	tso_data->supported = cmd->data.setassparms.data.tso.supported;
6676  	return 0;
6677  }
6678  
qeth_set_tso_off(struct qeth_card * card,enum qeth_prot_versions prot)6679  static int qeth_set_tso_off(struct qeth_card *card,
6680  			    enum qeth_prot_versions prot)
6681  {
6682  	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6683  						 IPA_CMD_ASS_STOP, NULL, prot);
6684  }
6685  
qeth_set_tso_on(struct qeth_card * card,enum qeth_prot_versions prot)6686  static int qeth_set_tso_on(struct qeth_card *card,
6687  			   enum qeth_prot_versions prot)
6688  {
6689  	struct qeth_tso_start_data tso_data;
6690  	struct qeth_cmd_buffer *iob;
6691  	struct qeth_ipa_caps caps;
6692  	int rc;
6693  
6694  	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6695  				       IPA_CMD_ASS_START, 0, prot);
6696  	if (!iob)
6697  		return -ENOMEM;
6698  
6699  	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6700  	if (rc)
6701  		return rc;
6702  
6703  	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6704  		qeth_set_tso_off(card, prot);
6705  		return -EOPNOTSUPP;
6706  	}
6707  
6708  	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6709  				       IPA_CMD_ASS_ENABLE,
6710  				       SETASS_DATA_SIZEOF(caps), prot);
6711  	if (!iob) {
6712  		qeth_set_tso_off(card, prot);
6713  		return -ENOMEM;
6714  	}
6715  
6716  	/* enable TSO capability */
6717  	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6718  		QETH_IPA_LARGE_SEND_TCP;
6719  	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6720  	if (rc) {
6721  		qeth_set_tso_off(card, prot);
6722  		return rc;
6723  	}
6724  
6725  	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6726  	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6727  		qeth_set_tso_off(card, prot);
6728  		return -EOPNOTSUPP;
6729  	}
6730  
6731  	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6732  		 tso_data.mss);
6733  	return 0;
6734  }
6735  
qeth_set_ipa_tso(struct qeth_card * card,bool on,enum qeth_prot_versions prot)6736  static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6737  			    enum qeth_prot_versions prot)
6738  {
6739  	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6740  }
6741  
qeth_set_ipa_rx_csum(struct qeth_card * card,bool on)6742  static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6743  {
6744  	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6745  	int rc_ipv6;
6746  
6747  	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6748  		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6749  					    QETH_PROT_IPV4, NULL);
6750  	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6751  		/* no/one Offload Assist available, so the rc is trivial */
6752  		return rc_ipv4;
6753  
6754  	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6755  				    QETH_PROT_IPV6, NULL);
6756  
6757  	if (on)
6758  		/* enable: success if any Assist is active */
6759  		return (rc_ipv6) ? rc_ipv4 : 0;
6760  
6761  	/* disable: failure if any Assist is still active */
6762  	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6763  }
6764  
6765  /**
6766   * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6767   * @dev:	a net_device
6768   */
qeth_enable_hw_features(struct net_device * dev)6769  void qeth_enable_hw_features(struct net_device *dev)
6770  {
6771  	struct qeth_card *card = dev->ml_priv;
6772  	netdev_features_t features;
6773  
6774  	features = dev->features;
6775  	/* force-off any feature that might need an IPA sequence.
6776  	 * netdev_update_features() will restart them.
6777  	 */
6778  	dev->features &= ~dev->hw_features;
6779  	/* toggle VLAN filter, so that VIDs are re-programmed: */
6780  	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6781  		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6782  		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6783  	}
6784  	netdev_update_features(dev);
6785  	if (features != dev->features)
6786  		dev_warn(&card->gdev->dev,
6787  			 "Device recovery failed to restore all offload features\n");
6788  }
6789  EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6790  
qeth_check_restricted_features(struct qeth_card * card,netdev_features_t changed,netdev_features_t actual)6791  static void qeth_check_restricted_features(struct qeth_card *card,
6792  					   netdev_features_t changed,
6793  					   netdev_features_t actual)
6794  {
6795  	netdev_features_t ipv6_features = NETIF_F_TSO6;
6796  	netdev_features_t ipv4_features = NETIF_F_TSO;
6797  
6798  	if (!card->info.has_lp2lp_cso_v6)
6799  		ipv6_features |= NETIF_F_IPV6_CSUM;
6800  	if (!card->info.has_lp2lp_cso_v4)
6801  		ipv4_features |= NETIF_F_IP_CSUM;
6802  
6803  	if ((changed & ipv6_features) && !(actual & ipv6_features))
6804  		qeth_flush_local_addrs6(card);
6805  	if ((changed & ipv4_features) && !(actual & ipv4_features))
6806  		qeth_flush_local_addrs4(card);
6807  }
6808  
qeth_set_features(struct net_device * dev,netdev_features_t features)6809  int qeth_set_features(struct net_device *dev, netdev_features_t features)
6810  {
6811  	struct qeth_card *card = dev->ml_priv;
6812  	netdev_features_t changed = dev->features ^ features;
6813  	int rc = 0;
6814  
6815  	QETH_CARD_TEXT(card, 2, "setfeat");
6816  	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6817  
6818  	if ((changed & NETIF_F_IP_CSUM)) {
6819  		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6820  				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6821  				       &card->info.has_lp2lp_cso_v4);
6822  		if (rc)
6823  			changed ^= NETIF_F_IP_CSUM;
6824  	}
6825  	if (changed & NETIF_F_IPV6_CSUM) {
6826  		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6827  				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6828  				       &card->info.has_lp2lp_cso_v6);
6829  		if (rc)
6830  			changed ^= NETIF_F_IPV6_CSUM;
6831  	}
6832  	if (changed & NETIF_F_RXCSUM) {
6833  		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6834  		if (rc)
6835  			changed ^= NETIF_F_RXCSUM;
6836  	}
6837  	if (changed & NETIF_F_TSO) {
6838  		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6839  				      QETH_PROT_IPV4);
6840  		if (rc)
6841  			changed ^= NETIF_F_TSO;
6842  	}
6843  	if (changed & NETIF_F_TSO6) {
6844  		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6845  				      QETH_PROT_IPV6);
6846  		if (rc)
6847  			changed ^= NETIF_F_TSO6;
6848  	}
6849  
6850  	qeth_check_restricted_features(card, dev->features ^ features,
6851  				       dev->features ^ changed);
6852  
6853  	/* everything changed successfully? */
6854  	if ((dev->features ^ features) == changed)
6855  		return 0;
6856  	/* something went wrong. save changed features and return error */
6857  	dev->features ^= changed;
6858  	return -EIO;
6859  }
6860  EXPORT_SYMBOL_GPL(qeth_set_features);
6861  
qeth_fix_features(struct net_device * dev,netdev_features_t features)6862  netdev_features_t qeth_fix_features(struct net_device *dev,
6863  				    netdev_features_t features)
6864  {
6865  	struct qeth_card *card = dev->ml_priv;
6866  
6867  	QETH_CARD_TEXT(card, 2, "fixfeat");
6868  	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6869  		features &= ~NETIF_F_IP_CSUM;
6870  	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6871  		features &= ~NETIF_F_IPV6_CSUM;
6872  	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6873  	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6874  		features &= ~NETIF_F_RXCSUM;
6875  	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6876  		features &= ~NETIF_F_TSO;
6877  	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6878  		features &= ~NETIF_F_TSO6;
6879  
6880  	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6881  	return features;
6882  }
6883  EXPORT_SYMBOL_GPL(qeth_fix_features);
6884  
qeth_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)6885  netdev_features_t qeth_features_check(struct sk_buff *skb,
6886  				      struct net_device *dev,
6887  				      netdev_features_t features)
6888  {
6889  	struct qeth_card *card = dev->ml_priv;
6890  
6891  	/* Traffic with local next-hop is not eligible for some offloads: */
6892  	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6893  	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6894  		netdev_features_t restricted = 0;
6895  
6896  		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6897  			restricted |= NETIF_F_ALL_TSO;
6898  
6899  		switch (vlan_get_protocol(skb)) {
6900  		case htons(ETH_P_IP):
6901  			if (!card->info.has_lp2lp_cso_v4)
6902  				restricted |= NETIF_F_IP_CSUM;
6903  
6904  			if (restricted && qeth_next_hop_is_local_v4(card, skb))
6905  				features &= ~restricted;
6906  			break;
6907  		case htons(ETH_P_IPV6):
6908  			if (!card->info.has_lp2lp_cso_v6)
6909  				restricted |= NETIF_F_IPV6_CSUM;
6910  
6911  			if (restricted && qeth_next_hop_is_local_v6(card, skb))
6912  				features &= ~restricted;
6913  			break;
6914  		default:
6915  			break;
6916  		}
6917  	}
6918  
6919  	/* GSO segmentation builds skbs with
6920  	 *	a (small) linear part for the headers, and
6921  	 *	page frags for the data.
6922  	 * Compared to a linear skb, the header-only part consumes an
6923  	 * additional buffer element. This reduces buffer utilization, and
6924  	 * hurts throughput. So compress small segments into one element.
6925  	 */
6926  	if (netif_needs_gso(skb, features)) {
6927  		/* match skb_segment(): */
6928  		unsigned int doffset = skb->data - skb_mac_header(skb);
6929  		unsigned int hsize = skb_shinfo(skb)->gso_size;
6930  		unsigned int hroom = skb_headroom(skb);
6931  
6932  		/* linearize only if resulting skb allocations are order-0: */
6933  		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6934  			features &= ~NETIF_F_SG;
6935  	}
6936  
6937  	return vlan_features_check(skb, features);
6938  }
6939  EXPORT_SYMBOL_GPL(qeth_features_check);
6940  
qeth_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6941  void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6942  {
6943  	struct qeth_card *card = dev->ml_priv;
6944  	struct qeth_qdio_out_q *queue;
6945  	unsigned int i;
6946  
6947  	QETH_CARD_TEXT(card, 5, "getstat");
6948  
6949  	stats->rx_packets = card->stats.rx_packets;
6950  	stats->rx_bytes = card->stats.rx_bytes;
6951  	stats->rx_errors = card->stats.rx_length_errors +
6952  			   card->stats.rx_frame_errors +
6953  			   card->stats.rx_fifo_errors;
6954  	stats->rx_dropped = card->stats.rx_dropped_nomem +
6955  			    card->stats.rx_dropped_notsupp +
6956  			    card->stats.rx_dropped_runt;
6957  	stats->multicast = card->stats.rx_multicast;
6958  	stats->rx_length_errors = card->stats.rx_length_errors;
6959  	stats->rx_frame_errors = card->stats.rx_frame_errors;
6960  	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6961  
6962  	for (i = 0; i < card->qdio.no_out_queues; i++) {
6963  		queue = card->qdio.out_qs[i];
6964  
6965  		stats->tx_packets += queue->stats.tx_packets;
6966  		stats->tx_bytes += queue->stats.tx_bytes;
6967  		stats->tx_errors += queue->stats.tx_errors;
6968  		stats->tx_dropped += queue->stats.tx_dropped;
6969  	}
6970  }
6971  EXPORT_SYMBOL_GPL(qeth_get_stats64);
6972  
6973  #define TC_IQD_UCAST   0
qeth_iqd_set_prio_tc_map(struct net_device * dev,unsigned int ucast_txqs)6974  static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6975  				     unsigned int ucast_txqs)
6976  {
6977  	unsigned int prio;
6978  
6979  	/* IQD requires mcast traffic to be placed on a dedicated queue, and
6980  	 * qeth_iqd_select_queue() deals with this.
6981  	 * For unicast traffic, we defer the queue selection to the stack.
6982  	 * By installing a trivial prio map that spans over only the unicast
6983  	 * queues, we can encourage the stack to spread the ucast traffic evenly
6984  	 * without selecting the mcast queue.
6985  	 */
6986  
6987  	/* One traffic class, spanning over all active ucast queues: */
6988  	netdev_set_num_tc(dev, 1);
6989  	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6990  			    QETH_IQD_MIN_UCAST_TXQ);
6991  
6992  	/* Map all priorities to this traffic class: */
6993  	for (prio = 0; prio <= TC_BITMASK; prio++)
6994  		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6995  }
6996  
qeth_set_real_num_tx_queues(struct qeth_card * card,unsigned int count)6997  int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6998  {
6999  	struct net_device *dev = card->dev;
7000  	int rc;
7001  
7002  	/* Per netif_setup_tc(), adjust the mapping first: */
7003  	if (IS_IQD(card))
7004  		qeth_iqd_set_prio_tc_map(dev, count - 1);
7005  
7006  	rc = netif_set_real_num_tx_queues(dev, count);
7007  
7008  	if (rc && IS_IQD(card))
7009  		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
7010  
7011  	return rc;
7012  }
7013  EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7014  
qeth_iqd_select_queue(struct net_device * dev,struct sk_buff * skb,u8 cast_type,struct net_device * sb_dev)7015  u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
7016  			  u8 cast_type, struct net_device *sb_dev)
7017  {
7018  	u16 txq;
7019  
7020  	if (cast_type != RTN_UNICAST)
7021  		return QETH_IQD_MCAST_TXQ;
7022  	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7023  		return QETH_IQD_MIN_UCAST_TXQ;
7024  
7025  	txq = netdev_pick_tx(dev, skb, sb_dev);
7026  	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7027  }
7028  EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7029  
qeth_osa_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)7030  u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
7031  			  struct net_device *sb_dev)
7032  {
7033  	struct qeth_card *card = dev->ml_priv;
7034  
7035  	if (qeth_uses_tx_prio_queueing(card))
7036  		return qeth_get_priority_queue(card, skb);
7037  
7038  	return netdev_pick_tx(dev, skb, sb_dev);
7039  }
7040  EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
7041  
qeth_open(struct net_device * dev)7042  int qeth_open(struct net_device *dev)
7043  {
7044  	struct qeth_card *card = dev->ml_priv;
7045  	struct qeth_qdio_out_q *queue;
7046  	unsigned int i;
7047  
7048  	QETH_CARD_TEXT(card, 4, "qethopen");
7049  
7050  	card->data.state = CH_STATE_UP;
7051  	netif_tx_start_all_queues(dev);
7052  
7053  	local_bh_disable();
7054  	qeth_for_each_output_queue(card, queue, i) {
7055  		netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
7056  		napi_enable(&queue->napi);
7057  		napi_schedule(&queue->napi);
7058  	}
7059  
7060  	napi_enable(&card->napi);
7061  	napi_schedule(&card->napi);
7062  	/* kick-start the NAPI softirq: */
7063  	local_bh_enable();
7064  
7065  	return 0;
7066  }
7067  EXPORT_SYMBOL_GPL(qeth_open);
7068  
qeth_stop(struct net_device * dev)7069  int qeth_stop(struct net_device *dev)
7070  {
7071  	struct qeth_card *card = dev->ml_priv;
7072  	struct qeth_qdio_out_q *queue;
7073  	unsigned int i;
7074  
7075  	QETH_CARD_TEXT(card, 4, "qethstop");
7076  
7077  	napi_disable(&card->napi);
7078  	cancel_delayed_work_sync(&card->buffer_reclaim_work);
7079  	qdio_stop_irq(CARD_DDEV(card));
7080  
7081  	/* Quiesce the NAPI instances: */
7082  	qeth_for_each_output_queue(card, queue, i)
7083  		napi_disable(&queue->napi);
7084  
7085  	/* Stop .ndo_start_xmit, might still access queue->napi. */
7086  	netif_tx_disable(dev);
7087  
7088  	qeth_for_each_output_queue(card, queue, i) {
7089  		del_timer_sync(&queue->timer);
7090  		/* Queues may get re-allocated, so remove the NAPIs. */
7091  		netif_napi_del(&queue->napi);
7092  	}
7093  
7094  	return 0;
7095  }
7096  EXPORT_SYMBOL_GPL(qeth_stop);
7097  
qeth_core_init(void)7098  static int __init qeth_core_init(void)
7099  {
7100  	int rc;
7101  
7102  	pr_info("loading core functions\n");
7103  
7104  	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7105  
7106  	rc = qeth_register_dbf_views();
7107  	if (rc)
7108  		goto dbf_err;
7109  	qeth_core_root_dev = root_device_register("qeth");
7110  	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7111  	if (rc)
7112  		goto register_err;
7113  	qeth_core_header_cache =
7114  		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7115  				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7116  				  0, NULL);
7117  	if (!qeth_core_header_cache) {
7118  		rc = -ENOMEM;
7119  		goto slab_err;
7120  	}
7121  	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7122  			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7123  	if (!qeth_qdio_outbuf_cache) {
7124  		rc = -ENOMEM;
7125  		goto cqslab_err;
7126  	}
7127  
7128  	qeth_qaob_cache = kmem_cache_create("qeth_qaob",
7129  					    sizeof(struct qaob),
7130  					    sizeof(struct qaob),
7131  					    0, NULL);
7132  	if (!qeth_qaob_cache) {
7133  		rc = -ENOMEM;
7134  		goto qaob_err;
7135  	}
7136  
7137  	rc = ccw_driver_register(&qeth_ccw_driver);
7138  	if (rc)
7139  		goto ccw_err;
7140  	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7141  	if (rc)
7142  		goto ccwgroup_err;
7143  
7144  	return 0;
7145  
7146  ccwgroup_err:
7147  	ccw_driver_unregister(&qeth_ccw_driver);
7148  ccw_err:
7149  	kmem_cache_destroy(qeth_qaob_cache);
7150  qaob_err:
7151  	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7152  cqslab_err:
7153  	kmem_cache_destroy(qeth_core_header_cache);
7154  slab_err:
7155  	root_device_unregister(qeth_core_root_dev);
7156  register_err:
7157  	qeth_unregister_dbf_views();
7158  dbf_err:
7159  	debugfs_remove_recursive(qeth_debugfs_root);
7160  	pr_err("Initializing the qeth device driver failed\n");
7161  	return rc;
7162  }
7163  
qeth_core_exit(void)7164  static void __exit qeth_core_exit(void)
7165  {
7166  	qeth_clear_dbf_list();
7167  	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7168  	ccw_driver_unregister(&qeth_ccw_driver);
7169  	kmem_cache_destroy(qeth_qaob_cache);
7170  	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7171  	kmem_cache_destroy(qeth_core_header_cache);
7172  	root_device_unregister(qeth_core_root_dev);
7173  	qeth_unregister_dbf_views();
7174  	debugfs_remove_recursive(qeth_debugfs_root);
7175  	pr_info("core functions removed\n");
7176  }
7177  
7178  module_init(qeth_core_init);
7179  module_exit(qeth_core_exit);
7180  MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7181  MODULE_DESCRIPTION("qeth core functions");
7182  MODULE_LICENSE("GPL");
7183