1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * rio_cm - RapidIO Channelized Messaging Driver
4 *
5 * Copyright 2013-2016 Integrated Device Technology, Inc.
6 * Copyright (c) 2015, Prodrive Technologies
7 * Copyright (c) 2015, RapidIO Trade Association
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 #include <linux/sched.h>
15 #include <linux/rio.h>
16 #include <linux/rio_drv.h>
17 #include <linux/slab.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/cdev.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/reboot.h>
24 #include <linux/bitops.h>
25 #include <linux/printk.h>
26 #include <linux/rio_cm_cdev.h>
27
28 #define DRV_NAME "rio_cm"
29 #define DRV_VERSION "1.0.0"
30 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
31 #define DRV_DESC "RapidIO Channelized Messaging Driver"
32 #define DEV_NAME "rio_cm"
33
34 /* Debug output filtering masks */
35 enum {
36 DBG_NONE = 0,
37 DBG_INIT = BIT(0), /* driver init */
38 DBG_EXIT = BIT(1), /* driver exit */
39 DBG_MPORT = BIT(2), /* mport add/remove */
40 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
41 DBG_CHOP = BIT(4), /* channel operations */
42 DBG_WAIT = BIT(5), /* waiting for events */
43 DBG_TX = BIT(6), /* message TX */
44 DBG_TX_EVENT = BIT(7), /* message TX event */
45 DBG_RX_DATA = BIT(8), /* inbound data messages */
46 DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
47 DBG_ALL = ~0,
48 };
49
50 #ifdef DEBUG
51 #define riocm_debug(level, fmt, arg...) \
52 do { \
53 if (DBG_##level & dbg_level) \
54 pr_debug(DRV_NAME ": %s " fmt "\n", \
55 __func__, ##arg); \
56 } while (0)
57 #else
58 #define riocm_debug(level, fmt, arg...) \
59 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
60 #endif
61
62 #define riocm_warn(fmt, arg...) \
63 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
64
65 #define riocm_error(fmt, arg...) \
66 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
67
68
69 static int cmbox = 1;
70 module_param(cmbox, int, S_IRUGO);
71 MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
72
73 static int chstart = 256;
74 module_param(chstart, int, S_IRUGO);
75 MODULE_PARM_DESC(chstart,
76 "Start channel number for dynamic allocation (default 256)");
77
78 #ifdef DEBUG
79 static u32 dbg_level = DBG_NONE;
80 module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
81 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
82 #endif
83
84 MODULE_AUTHOR(DRV_AUTHOR);
85 MODULE_DESCRIPTION(DRV_DESC);
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89 #define RIOCM_TX_RING_SIZE 128
90 #define RIOCM_RX_RING_SIZE 128
91 #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
92
93 #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
94 #define RIOCM_CHNUM_AUTO 0
95 #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
96
97 enum rio_cm_state {
98 RIO_CM_IDLE,
99 RIO_CM_CONNECT,
100 RIO_CM_CONNECTED,
101 RIO_CM_DISCONNECT,
102 RIO_CM_CHAN_BOUND,
103 RIO_CM_LISTEN,
104 RIO_CM_DESTROYING,
105 };
106
107 enum rio_cm_pkt_type {
108 RIO_CM_SYS = 0xaa,
109 RIO_CM_CHAN = 0x55,
110 };
111
112 enum rio_cm_chop {
113 CM_CONN_REQ,
114 CM_CONN_ACK,
115 CM_CONN_CLOSE,
116 CM_DATA_MSG,
117 };
118
119 struct rio_ch_base_bhdr {
120 u32 src_id;
121 u32 dst_id;
122 #define RIO_HDR_LETTER_MASK 0xffff0000
123 #define RIO_HDR_MBOX_MASK 0x0000ffff
124 u8 src_mbox;
125 u8 dst_mbox;
126 u8 type;
127 } __attribute__((__packed__));
128
129 struct rio_ch_chan_hdr {
130 struct rio_ch_base_bhdr bhdr;
131 u8 ch_op;
132 u16 dst_ch;
133 u16 src_ch;
134 u16 msg_len;
135 u16 rsrvd;
136 } __attribute__((__packed__));
137
138 struct tx_req {
139 struct list_head node;
140 struct rio_dev *rdev;
141 void *buffer;
142 size_t len;
143 };
144
145 struct cm_dev {
146 struct list_head list;
147 struct rio_mport *mport;
148 void *rx_buf[RIOCM_RX_RING_SIZE];
149 int rx_slots;
150 struct mutex rx_lock;
151
152 void *tx_buf[RIOCM_TX_RING_SIZE];
153 int tx_slot;
154 int tx_cnt;
155 int tx_ack_slot;
156 struct list_head tx_reqs;
157 spinlock_t tx_lock;
158
159 struct list_head peers;
160 u32 npeers;
161 struct workqueue_struct *rx_wq;
162 struct work_struct rx_work;
163 };
164
165 struct chan_rx_ring {
166 void *buf[RIOCM_RX_RING_SIZE];
167 int head;
168 int tail;
169 int count;
170
171 /* Tracking RX buffers reported to upper level */
172 void *inuse[RIOCM_RX_RING_SIZE];
173 int inuse_cnt;
174 };
175
176 struct rio_channel {
177 u16 id; /* local channel ID */
178 struct kref ref; /* channel refcount */
179 struct file *filp;
180 struct cm_dev *cmdev; /* associated CM device object */
181 struct rio_dev *rdev; /* remote RapidIO device */
182 enum rio_cm_state state;
183 int error;
184 spinlock_t lock;
185 void *context;
186 u32 loc_destid; /* local destID */
187 u32 rem_destid; /* remote destID */
188 u16 rem_channel; /* remote channel ID */
189 struct list_head accept_queue;
190 struct list_head ch_node;
191 struct completion comp;
192 struct completion comp_close;
193 struct chan_rx_ring rx_ring;
194 };
195
196 struct cm_peer {
197 struct list_head node;
198 struct rio_dev *rdev;
199 };
200
201 struct rio_cm_work {
202 struct work_struct work;
203 struct cm_dev *cm;
204 void *data;
205 };
206
207 struct conn_req {
208 struct list_head node;
209 u32 destid; /* requester destID */
210 u16 chan; /* requester channel ID */
211 struct cm_dev *cmdev;
212 };
213
214 /*
215 * A channel_dev structure represents a CM_CDEV
216 * @cdev Character device
217 * @dev Associated device object
218 */
219 struct channel_dev {
220 struct cdev cdev;
221 struct device *dev;
222 };
223
224 static struct rio_channel *riocm_ch_alloc(u16 ch_num);
225 static void riocm_ch_free(struct kref *ref);
226 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
227 void *buffer, size_t len);
228 static int riocm_ch_close(struct rio_channel *ch);
229
230 static DEFINE_SPINLOCK(idr_lock);
231 static DEFINE_IDR(ch_idr);
232
233 static LIST_HEAD(cm_dev_list);
234 static DECLARE_RWSEM(rdev_sem);
235
236 static struct class *dev_class;
237 static unsigned int dev_major;
238 static unsigned int dev_minor_base;
239 static dev_t dev_number;
240 static struct channel_dev riocm_cdev;
241
242 #define is_msg_capable(src_ops, dst_ops) \
243 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
244 (dst_ops & RIO_DST_OPS_DATA_MSG))
245 #define dev_cm_capable(dev) \
246 is_msg_capable(dev->src_ops, dev->dst_ops)
247
riocm_cmp(struct rio_channel * ch,enum rio_cm_state cmp)248 static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
249 {
250 int ret;
251
252 spin_lock_bh(&ch->lock);
253 ret = (ch->state == cmp);
254 spin_unlock_bh(&ch->lock);
255 return ret;
256 }
257
riocm_cmp_exch(struct rio_channel * ch,enum rio_cm_state cmp,enum rio_cm_state exch)258 static int riocm_cmp_exch(struct rio_channel *ch,
259 enum rio_cm_state cmp, enum rio_cm_state exch)
260 {
261 int ret;
262
263 spin_lock_bh(&ch->lock);
264 ret = (ch->state == cmp);
265 if (ret)
266 ch->state = exch;
267 spin_unlock_bh(&ch->lock);
268 return ret;
269 }
270
riocm_exch(struct rio_channel * ch,enum rio_cm_state exch)271 static enum rio_cm_state riocm_exch(struct rio_channel *ch,
272 enum rio_cm_state exch)
273 {
274 enum rio_cm_state old;
275
276 spin_lock_bh(&ch->lock);
277 old = ch->state;
278 ch->state = exch;
279 spin_unlock_bh(&ch->lock);
280 return old;
281 }
282
riocm_get_channel(u16 nr)283 static struct rio_channel *riocm_get_channel(u16 nr)
284 {
285 struct rio_channel *ch;
286
287 spin_lock_bh(&idr_lock);
288 ch = idr_find(&ch_idr, nr);
289 if (ch)
290 kref_get(&ch->ref);
291 spin_unlock_bh(&idr_lock);
292 return ch;
293 }
294
riocm_put_channel(struct rio_channel * ch)295 static void riocm_put_channel(struct rio_channel *ch)
296 {
297 kref_put(&ch->ref, riocm_ch_free);
298 }
299
riocm_rx_get_msg(struct cm_dev * cm)300 static void *riocm_rx_get_msg(struct cm_dev *cm)
301 {
302 void *msg;
303 int i;
304
305 msg = rio_get_inb_message(cm->mport, cmbox);
306 if (msg) {
307 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
308 if (cm->rx_buf[i] == msg) {
309 cm->rx_buf[i] = NULL;
310 cm->rx_slots++;
311 break;
312 }
313 }
314
315 if (i == RIOCM_RX_RING_SIZE)
316 riocm_warn("no record for buffer 0x%p", msg);
317 }
318
319 return msg;
320 }
321
322 /*
323 * riocm_rx_fill - fills a ring of receive buffers for given cm device
324 * @cm: cm_dev object
325 * @nent: max number of entries to fill
326 *
327 * Returns: none
328 */
riocm_rx_fill(struct cm_dev * cm,int nent)329 static void riocm_rx_fill(struct cm_dev *cm, int nent)
330 {
331 int i;
332
333 if (cm->rx_slots == 0)
334 return;
335
336 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
337 if (cm->rx_buf[i] == NULL) {
338 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
339 if (cm->rx_buf[i] == NULL)
340 break;
341 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
342 cm->rx_slots--;
343 nent--;
344 }
345 }
346 }
347
348 /*
349 * riocm_rx_free - frees all receive buffers associated with given cm device
350 * @cm: cm_dev object
351 *
352 * Returns: none
353 */
riocm_rx_free(struct cm_dev * cm)354 static void riocm_rx_free(struct cm_dev *cm)
355 {
356 int i;
357
358 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
359 if (cm->rx_buf[i] != NULL) {
360 kfree(cm->rx_buf[i]);
361 cm->rx_buf[i] = NULL;
362 }
363 }
364 }
365
366 /*
367 * riocm_req_handler - connection request handler
368 * @cm: cm_dev object
369 * @req_data: pointer to the request packet
370 *
371 * Returns: 0 if success, or
372 * -EINVAL if channel is not in correct state,
373 * -ENODEV if cannot find a channel with specified ID,
374 * -ENOMEM if unable to allocate memory to store the request
375 */
riocm_req_handler(struct cm_dev * cm,void * req_data)376 static int riocm_req_handler(struct cm_dev *cm, void *req_data)
377 {
378 struct rio_channel *ch;
379 struct conn_req *req;
380 struct rio_ch_chan_hdr *hh = req_data;
381 u16 chnum;
382
383 chnum = ntohs(hh->dst_ch);
384
385 ch = riocm_get_channel(chnum);
386
387 if (!ch)
388 return -ENODEV;
389
390 if (ch->state != RIO_CM_LISTEN) {
391 riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
392 riocm_put_channel(ch);
393 return -EINVAL;
394 }
395
396 req = kzalloc(sizeof(*req), GFP_KERNEL);
397 if (!req) {
398 riocm_put_channel(ch);
399 return -ENOMEM;
400 }
401
402 req->destid = ntohl(hh->bhdr.src_id);
403 req->chan = ntohs(hh->src_ch);
404 req->cmdev = cm;
405
406 spin_lock_bh(&ch->lock);
407 list_add_tail(&req->node, &ch->accept_queue);
408 spin_unlock_bh(&ch->lock);
409 complete(&ch->comp);
410 riocm_put_channel(ch);
411
412 return 0;
413 }
414
415 /*
416 * riocm_resp_handler - response to connection request handler
417 * @resp_data: pointer to the response packet
418 *
419 * Returns: 0 if success, or
420 * -EINVAL if channel is not in correct state,
421 * -ENODEV if cannot find a channel with specified ID,
422 */
riocm_resp_handler(void * resp_data)423 static int riocm_resp_handler(void *resp_data)
424 {
425 struct rio_channel *ch;
426 struct rio_ch_chan_hdr *hh = resp_data;
427 u16 chnum;
428
429 chnum = ntohs(hh->dst_ch);
430 ch = riocm_get_channel(chnum);
431 if (!ch)
432 return -ENODEV;
433
434 if (ch->state != RIO_CM_CONNECT) {
435 riocm_put_channel(ch);
436 return -EINVAL;
437 }
438
439 riocm_exch(ch, RIO_CM_CONNECTED);
440 ch->rem_channel = ntohs(hh->src_ch);
441 complete(&ch->comp);
442 riocm_put_channel(ch);
443
444 return 0;
445 }
446
447 /*
448 * riocm_close_handler - channel close request handler
449 * @req_data: pointer to the request packet
450 *
451 * Returns: 0 if success, or
452 * -ENODEV if cannot find a channel with specified ID,
453 * + error codes returned by riocm_ch_close.
454 */
riocm_close_handler(void * data)455 static int riocm_close_handler(void *data)
456 {
457 struct rio_channel *ch;
458 struct rio_ch_chan_hdr *hh = data;
459 int ret;
460
461 riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
462
463 spin_lock_bh(&idr_lock);
464 ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
465 if (!ch) {
466 spin_unlock_bh(&idr_lock);
467 return -ENODEV;
468 }
469 idr_remove(&ch_idr, ch->id);
470 spin_unlock_bh(&idr_lock);
471
472 riocm_exch(ch, RIO_CM_DISCONNECT);
473
474 ret = riocm_ch_close(ch);
475 if (ret)
476 riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
477
478 return 0;
479 }
480
481 /*
482 * rio_cm_handler - function that services request (non-data) packets
483 * @cm: cm_dev object
484 * @data: pointer to the packet
485 */
rio_cm_handler(struct cm_dev * cm,void * data)486 static void rio_cm_handler(struct cm_dev *cm, void *data)
487 {
488 struct rio_ch_chan_hdr *hdr;
489
490 if (!rio_mport_is_running(cm->mport))
491 goto out;
492
493 hdr = data;
494
495 riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
496 hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
497
498 switch (hdr->ch_op) {
499 case CM_CONN_REQ:
500 riocm_req_handler(cm, data);
501 break;
502 case CM_CONN_ACK:
503 riocm_resp_handler(data);
504 break;
505 case CM_CONN_CLOSE:
506 riocm_close_handler(data);
507 break;
508 default:
509 riocm_error("Invalid packet header");
510 break;
511 }
512 out:
513 kfree(data);
514 }
515
516 /*
517 * rio_rx_data_handler - received data packet handler
518 * @cm: cm_dev object
519 * @buf: data packet
520 *
521 * Returns: 0 if success, or
522 * -ENODEV if cannot find a channel with specified ID,
523 * -EIO if channel is not in CONNECTED state,
524 * -ENOMEM if channel RX queue is full (packet discarded)
525 */
rio_rx_data_handler(struct cm_dev * cm,void * buf)526 static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
527 {
528 struct rio_ch_chan_hdr *hdr;
529 struct rio_channel *ch;
530
531 hdr = buf;
532
533 riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
534
535 ch = riocm_get_channel(ntohs(hdr->dst_ch));
536 if (!ch) {
537 /* Discard data message for non-existing channel */
538 kfree(buf);
539 return -ENODEV;
540 }
541
542 /* Place pointer to the buffer into channel's RX queue */
543 spin_lock(&ch->lock);
544
545 if (ch->state != RIO_CM_CONNECTED) {
546 /* Channel is not ready to receive data, discard a packet */
547 riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
548 ch->id, ch->state);
549 spin_unlock(&ch->lock);
550 kfree(buf);
551 riocm_put_channel(ch);
552 return -EIO;
553 }
554
555 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
556 /* If RX ring is full, discard a packet */
557 riocm_debug(RX_DATA, "ch=%d is full", ch->id);
558 spin_unlock(&ch->lock);
559 kfree(buf);
560 riocm_put_channel(ch);
561 return -ENOMEM;
562 }
563
564 ch->rx_ring.buf[ch->rx_ring.head] = buf;
565 ch->rx_ring.head++;
566 ch->rx_ring.count++;
567 ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
568
569 complete(&ch->comp);
570
571 spin_unlock(&ch->lock);
572 riocm_put_channel(ch);
573
574 return 0;
575 }
576
577 /*
578 * rio_ibmsg_handler - inbound message packet handler
579 */
rio_ibmsg_handler(struct work_struct * work)580 static void rio_ibmsg_handler(struct work_struct *work)
581 {
582 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
583 void *data;
584 struct rio_ch_chan_hdr *hdr;
585
586 if (!rio_mport_is_running(cm->mport))
587 return;
588
589 while (1) {
590 mutex_lock(&cm->rx_lock);
591 data = riocm_rx_get_msg(cm);
592 if (data)
593 riocm_rx_fill(cm, 1);
594 mutex_unlock(&cm->rx_lock);
595
596 if (data == NULL)
597 break;
598
599 hdr = data;
600
601 if (hdr->bhdr.type != RIO_CM_CHAN) {
602 /* For now simply discard packets other than channel */
603 riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
604 hdr->bhdr.type);
605 kfree(data);
606 continue;
607 }
608
609 /* Process a channel message */
610 if (hdr->ch_op == CM_DATA_MSG)
611 rio_rx_data_handler(cm, data);
612 else
613 rio_cm_handler(cm, data);
614 }
615 }
616
riocm_inb_msg_event(struct rio_mport * mport,void * dev_id,int mbox,int slot)617 static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
618 int mbox, int slot)
619 {
620 struct cm_dev *cm = dev_id;
621
622 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
623 queue_work(cm->rx_wq, &cm->rx_work);
624 }
625
626 /*
627 * rio_txcq_handler - TX completion handler
628 * @cm: cm_dev object
629 * @slot: TX queue slot
630 *
631 * TX completion handler also ensures that pending request packets are placed
632 * into transmit queue as soon as a free slot becomes available. This is done
633 * to give higher priority to request packets during high intensity data flow.
634 */
rio_txcq_handler(struct cm_dev * cm,int slot)635 static void rio_txcq_handler(struct cm_dev *cm, int slot)
636 {
637 int ack_slot;
638
639 /* ATTN: Add TX completion notification if/when direct buffer
640 * transfer is implemented. At this moment only correct tracking
641 * of tx_count is important.
642 */
643 riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
644 cm->mport->id, slot, cm->tx_cnt);
645
646 spin_lock(&cm->tx_lock);
647 ack_slot = cm->tx_ack_slot;
648
649 if (ack_slot == slot)
650 riocm_debug(TX_EVENT, "slot == ack_slot");
651
652 while (cm->tx_cnt && ((ack_slot != slot) ||
653 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
654
655 cm->tx_buf[ack_slot] = NULL;
656 ++ack_slot;
657 ack_slot &= (RIOCM_TX_RING_SIZE - 1);
658 cm->tx_cnt--;
659 }
660
661 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
662 riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
663
664 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
665
666 cm->tx_ack_slot = ack_slot;
667
668 /*
669 * If there are pending requests, insert them into transmit queue
670 */
671 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
672 struct tx_req *req, *_req;
673 int rc;
674
675 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
676 list_del(&req->node);
677 cm->tx_buf[cm->tx_slot] = req->buffer;
678 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
679 req->buffer, req->len);
680 kfree(req->buffer);
681 kfree(req);
682
683 ++cm->tx_cnt;
684 ++cm->tx_slot;
685 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
686 if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
687 break;
688 }
689 }
690
691 spin_unlock(&cm->tx_lock);
692 }
693
riocm_outb_msg_event(struct rio_mport * mport,void * dev_id,int mbox,int slot)694 static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
695 int mbox, int slot)
696 {
697 struct cm_dev *cm = dev_id;
698
699 if (cm && rio_mport_is_running(cm->mport))
700 rio_txcq_handler(cm, slot);
701 }
702
riocm_queue_req(struct cm_dev * cm,struct rio_dev * rdev,void * buffer,size_t len)703 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
704 void *buffer, size_t len)
705 {
706 unsigned long flags;
707 struct tx_req *treq;
708
709 treq = kzalloc(sizeof(*treq), GFP_KERNEL);
710 if (treq == NULL)
711 return -ENOMEM;
712
713 treq->rdev = rdev;
714 treq->buffer = buffer;
715 treq->len = len;
716
717 spin_lock_irqsave(&cm->tx_lock, flags);
718 list_add_tail(&treq->node, &cm->tx_reqs);
719 spin_unlock_irqrestore(&cm->tx_lock, flags);
720 return 0;
721 }
722
723 /*
724 * riocm_post_send - helper function that places packet into msg TX queue
725 * @cm: cm_dev object
726 * @rdev: target RapidIO device object (required by outbound msg interface)
727 * @buffer: pointer to a packet buffer to send
728 * @len: length of data to transfer
729 * @req: request priority flag
730 *
731 * Returns: 0 if success, or error code otherwise.
732 */
riocm_post_send(struct cm_dev * cm,struct rio_dev * rdev,void * buffer,size_t len)733 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
734 void *buffer, size_t len)
735 {
736 int rc;
737 unsigned long flags;
738
739 spin_lock_irqsave(&cm->tx_lock, flags);
740
741 if (cm->mport == NULL) {
742 rc = -ENODEV;
743 goto err_out;
744 }
745
746 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
747 riocm_debug(TX, "Tx Queue is full");
748 rc = -EBUSY;
749 goto err_out;
750 }
751
752 cm->tx_buf[cm->tx_slot] = buffer;
753 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
754
755 riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
756 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
757
758 ++cm->tx_cnt;
759 ++cm->tx_slot;
760 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
761
762 err_out:
763 spin_unlock_irqrestore(&cm->tx_lock, flags);
764 return rc;
765 }
766
767 /*
768 * riocm_ch_send - sends a data packet to a remote device
769 * @ch_id: local channel ID
770 * @buf: pointer to a data buffer to send (including CM header)
771 * @len: length of data to transfer (including CM header)
772 *
773 * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
774 *
775 * Returns: 0 if success, or
776 * -EINVAL if one or more input parameters is/are not valid,
777 * -ENODEV if cannot find a channel with specified ID,
778 * -EAGAIN if a channel is not in CONNECTED state,
779 * + error codes returned by HW send routine.
780 */
riocm_ch_send(u16 ch_id,void * buf,int len)781 static int riocm_ch_send(u16 ch_id, void *buf, int len)
782 {
783 struct rio_channel *ch;
784 struct rio_ch_chan_hdr *hdr;
785 int ret;
786
787 if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
788 return -EINVAL;
789
790 ch = riocm_get_channel(ch_id);
791 if (!ch) {
792 riocm_error("%s(%d) ch_%d not found", current->comm,
793 task_pid_nr(current), ch_id);
794 return -ENODEV;
795 }
796
797 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
798 ret = -EAGAIN;
799 goto err_out;
800 }
801
802 /*
803 * Fill buffer header section with corresponding channel data
804 */
805 hdr = buf;
806
807 hdr->bhdr.src_id = htonl(ch->loc_destid);
808 hdr->bhdr.dst_id = htonl(ch->rem_destid);
809 hdr->bhdr.src_mbox = cmbox;
810 hdr->bhdr.dst_mbox = cmbox;
811 hdr->bhdr.type = RIO_CM_CHAN;
812 hdr->ch_op = CM_DATA_MSG;
813 hdr->dst_ch = htons(ch->rem_channel);
814 hdr->src_ch = htons(ch->id);
815 hdr->msg_len = htons((u16)len);
816
817 /* ATTN: the function call below relies on the fact that underlying
818 * HW-specific add_outb_message() routine copies TX data into its own
819 * internal transfer buffer (true for all RIONET compatible mport
820 * drivers). Must be reviewed if mport driver uses the buffer directly.
821 */
822
823 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
824 if (ret)
825 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
826 err_out:
827 riocm_put_channel(ch);
828 return ret;
829 }
830
riocm_ch_free_rxbuf(struct rio_channel * ch,void * buf)831 static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
832 {
833 int i, ret = -EINVAL;
834
835 spin_lock_bh(&ch->lock);
836
837 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
838 if (ch->rx_ring.inuse[i] == buf) {
839 ch->rx_ring.inuse[i] = NULL;
840 ch->rx_ring.inuse_cnt--;
841 ret = 0;
842 break;
843 }
844 }
845
846 spin_unlock_bh(&ch->lock);
847
848 if (!ret)
849 kfree(buf);
850
851 return ret;
852 }
853
854 /*
855 * riocm_ch_receive - fetch a data packet received for the specified channel
856 * @ch: local channel ID
857 * @buf: pointer to a packet buffer
858 * @timeout: timeout to wait for incoming packet (in jiffies)
859 *
860 * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
861 * -EAGAIN if a channel is not in CONNECTED state,
862 * -ENOMEM if in-use tracking queue is full,
863 * -ETIME if wait timeout expired,
864 * -EINTR if wait was interrupted.
865 */
riocm_ch_receive(struct rio_channel * ch,void ** buf,long timeout)866 static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
867 {
868 void *rxmsg = NULL;
869 int i, ret = 0;
870 long wret;
871
872 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
873 ret = -EAGAIN;
874 goto out;
875 }
876
877 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
878 /* If we do not have entries to track buffers given to upper
879 * layer, reject request.
880 */
881 ret = -ENOMEM;
882 goto out;
883 }
884
885 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
886
887 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
888
889 if (!wret)
890 ret = -ETIME;
891 else if (wret == -ERESTARTSYS)
892 ret = -EINTR;
893 else
894 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
895
896 if (ret)
897 goto out;
898
899 spin_lock_bh(&ch->lock);
900
901 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
902 ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
903 ch->rx_ring.count--;
904 ch->rx_ring.tail++;
905 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
906 ret = -ENOMEM;
907
908 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
909 if (ch->rx_ring.inuse[i] == NULL) {
910 ch->rx_ring.inuse[i] = rxmsg;
911 ch->rx_ring.inuse_cnt++;
912 ret = 0;
913 break;
914 }
915 }
916
917 if (ret) {
918 /* We have no entry to store pending message: drop it */
919 kfree(rxmsg);
920 rxmsg = NULL;
921 }
922
923 spin_unlock_bh(&ch->lock);
924 out:
925 *buf = rxmsg;
926 return ret;
927 }
928
929 /*
930 * riocm_ch_connect - sends a connect request to a remote device
931 * @loc_ch: local channel ID
932 * @cm: CM device to send connect request
933 * @peer: target RapidIO device
934 * @rem_ch: remote channel ID
935 *
936 * Returns: 0 if success, or
937 * -EINVAL if the channel is not in IDLE state,
938 * -EAGAIN if no connection request available immediately,
939 * -ETIME if ACK response timeout expired,
940 * -EINTR if wait for response was interrupted.
941 */
riocm_ch_connect(u16 loc_ch,struct cm_dev * cm,struct cm_peer * peer,u16 rem_ch)942 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
943 struct cm_peer *peer, u16 rem_ch)
944 {
945 struct rio_channel *ch = NULL;
946 struct rio_ch_chan_hdr *hdr;
947 int ret;
948 long wret;
949
950 ch = riocm_get_channel(loc_ch);
951 if (!ch)
952 return -ENODEV;
953
954 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
955 ret = -EINVAL;
956 goto conn_done;
957 }
958
959 ch->cmdev = cm;
960 ch->rdev = peer->rdev;
961 ch->context = NULL;
962 ch->loc_destid = cm->mport->host_deviceid;
963 ch->rem_channel = rem_ch;
964
965 /*
966 * Send connect request to the remote RapidIO device
967 */
968
969 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
970 if (hdr == NULL) {
971 ret = -ENOMEM;
972 goto conn_done;
973 }
974
975 hdr->bhdr.src_id = htonl(ch->loc_destid);
976 hdr->bhdr.dst_id = htonl(peer->rdev->destid);
977 hdr->bhdr.src_mbox = cmbox;
978 hdr->bhdr.dst_mbox = cmbox;
979 hdr->bhdr.type = RIO_CM_CHAN;
980 hdr->ch_op = CM_CONN_REQ;
981 hdr->dst_ch = htons(rem_ch);
982 hdr->src_ch = htons(loc_ch);
983
984 /* ATTN: the function call below relies on the fact that underlying
985 * HW-specific add_outb_message() routine copies TX data into its
986 * internal transfer buffer. Must be reviewed if mport driver uses
987 * this buffer directly.
988 */
989 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
990
991 if (ret != -EBUSY) {
992 kfree(hdr);
993 } else {
994 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
995 if (ret)
996 kfree(hdr);
997 }
998
999 if (ret) {
1000 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
1001 goto conn_done;
1002 }
1003
1004 /* Wait for connect response from the remote device */
1005 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1006 RIOCM_CONNECT_TO * HZ);
1007 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1008
1009 if (!wret)
1010 ret = -ETIME;
1011 else if (wret == -ERESTARTSYS)
1012 ret = -EINTR;
1013 else
1014 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
1015
1016 conn_done:
1017 riocm_put_channel(ch);
1018 return ret;
1019 }
1020
riocm_send_ack(struct rio_channel * ch)1021 static int riocm_send_ack(struct rio_channel *ch)
1022 {
1023 struct rio_ch_chan_hdr *hdr;
1024 int ret;
1025
1026 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1027 if (hdr == NULL)
1028 return -ENOMEM;
1029
1030 hdr->bhdr.src_id = htonl(ch->loc_destid);
1031 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1032 hdr->dst_ch = htons(ch->rem_channel);
1033 hdr->src_ch = htons(ch->id);
1034 hdr->bhdr.src_mbox = cmbox;
1035 hdr->bhdr.dst_mbox = cmbox;
1036 hdr->bhdr.type = RIO_CM_CHAN;
1037 hdr->ch_op = CM_CONN_ACK;
1038
1039 /* ATTN: the function call below relies on the fact that underlying
1040 * add_outb_message() routine copies TX data into its internal transfer
1041 * buffer. Review if switching to direct buffer version.
1042 */
1043 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1044
1045 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
1046 ch->rdev, hdr, sizeof(*hdr)))
1047 return 0;
1048 kfree(hdr);
1049
1050 if (ret)
1051 riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
1052 ch->id, rio_name(ch->rdev), ret);
1053 return ret;
1054 }
1055
1056 /*
1057 * riocm_ch_accept - accept incoming connection request
1058 * @ch_id: channel ID
1059 * @new_ch_id: local mport device
1060 * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
1061 * request is not available).
1062 *
1063 * Returns: pointer to new channel struct if success, or error-valued pointer:
1064 * -ENODEV - cannot find specified channel or mport,
1065 * -EINVAL - the channel is not in IDLE state,
1066 * -EAGAIN - no connection request available immediately (timeout=0),
1067 * -ENOMEM - unable to allocate new channel,
1068 * -ETIME - wait timeout expired,
1069 * -EINTR - wait was interrupted.
1070 */
riocm_ch_accept(u16 ch_id,u16 * new_ch_id,long timeout)1071 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1072 long timeout)
1073 {
1074 struct rio_channel *ch;
1075 struct rio_channel *new_ch;
1076 struct conn_req *req;
1077 struct cm_peer *peer;
1078 int found = 0;
1079 int err = 0;
1080 long wret;
1081
1082 ch = riocm_get_channel(ch_id);
1083 if (!ch)
1084 return ERR_PTR(-EINVAL);
1085
1086 if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
1087 err = -EINVAL;
1088 goto err_put;
1089 }
1090
1091 /* Don't sleep if this is a non blocking call */
1092 if (!timeout) {
1093 if (!try_wait_for_completion(&ch->comp)) {
1094 err = -EAGAIN;
1095 goto err_put;
1096 }
1097 } else {
1098 riocm_debug(WAIT, "on %d", ch->id);
1099
1100 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1101 timeout);
1102 if (!wret) {
1103 err = -ETIME;
1104 goto err_put;
1105 } else if (wret == -ERESTARTSYS) {
1106 err = -EINTR;
1107 goto err_put;
1108 }
1109 }
1110
1111 spin_lock_bh(&ch->lock);
1112
1113 if (ch->state != RIO_CM_LISTEN) {
1114 err = -ECANCELED;
1115 } else if (list_empty(&ch->accept_queue)) {
1116 riocm_debug(WAIT, "on %d accept_queue is empty on completion",
1117 ch->id);
1118 err = -EIO;
1119 }
1120
1121 spin_unlock_bh(&ch->lock);
1122
1123 if (err) {
1124 riocm_debug(WAIT, "on %d returns %d", ch->id, err);
1125 goto err_put;
1126 }
1127
1128 /* Create new channel for this connection */
1129 new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
1130
1131 if (IS_ERR(new_ch)) {
1132 riocm_error("failed to get channel for new req (%ld)",
1133 PTR_ERR(new_ch));
1134 err = -ENOMEM;
1135 goto err_put;
1136 }
1137
1138 spin_lock_bh(&ch->lock);
1139
1140 req = list_first_entry(&ch->accept_queue, struct conn_req, node);
1141 list_del(&req->node);
1142 new_ch->cmdev = ch->cmdev;
1143 new_ch->loc_destid = ch->loc_destid;
1144 new_ch->rem_destid = req->destid;
1145 new_ch->rem_channel = req->chan;
1146
1147 spin_unlock_bh(&ch->lock);
1148 riocm_put_channel(ch);
1149 ch = NULL;
1150 kfree(req);
1151
1152 down_read(&rdev_sem);
1153 /* Find requester's device object */
1154 list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
1155 if (peer->rdev->destid == new_ch->rem_destid) {
1156 riocm_debug(RX_CMD, "found matching device(%s)",
1157 rio_name(peer->rdev));
1158 found = 1;
1159 break;
1160 }
1161 }
1162 up_read(&rdev_sem);
1163
1164 if (!found) {
1165 /* If peer device object not found, simply ignore the request */
1166 err = -ENODEV;
1167 goto err_put_new_ch;
1168 }
1169
1170 new_ch->rdev = peer->rdev;
1171 new_ch->state = RIO_CM_CONNECTED;
1172 spin_lock_init(&new_ch->lock);
1173
1174 /* Acknowledge the connection request. */
1175 riocm_send_ack(new_ch);
1176
1177 *new_ch_id = new_ch->id;
1178 return new_ch;
1179
1180 err_put_new_ch:
1181 spin_lock_bh(&idr_lock);
1182 idr_remove(&ch_idr, new_ch->id);
1183 spin_unlock_bh(&idr_lock);
1184 riocm_put_channel(new_ch);
1185
1186 err_put:
1187 if (ch)
1188 riocm_put_channel(ch);
1189 *new_ch_id = 0;
1190 return ERR_PTR(err);
1191 }
1192
1193 /*
1194 * riocm_ch_listen - puts a channel into LISTEN state
1195 * @ch_id: channel ID
1196 *
1197 * Returns: 0 if success, or
1198 * -EINVAL if the specified channel does not exists or
1199 * is not in CHAN_BOUND state.
1200 */
riocm_ch_listen(u16 ch_id)1201 static int riocm_ch_listen(u16 ch_id)
1202 {
1203 struct rio_channel *ch = NULL;
1204 int ret = 0;
1205
1206 riocm_debug(CHOP, "(ch_%d)", ch_id);
1207
1208 ch = riocm_get_channel(ch_id);
1209 if (!ch)
1210 return -EINVAL;
1211 if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
1212 ret = -EINVAL;
1213 riocm_put_channel(ch);
1214 return ret;
1215 }
1216
1217 /*
1218 * riocm_ch_bind - associate a channel object and an mport device
1219 * @ch_id: channel ID
1220 * @mport_id: local mport device ID
1221 * @context: pointer to the additional caller's context
1222 *
1223 * Returns: 0 if success, or
1224 * -ENODEV if cannot find specified mport,
1225 * -EINVAL if the specified channel does not exist or
1226 * is not in IDLE state.
1227 */
riocm_ch_bind(u16 ch_id,u8 mport_id,void * context)1228 static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
1229 {
1230 struct rio_channel *ch = NULL;
1231 struct cm_dev *cm;
1232 int rc = -ENODEV;
1233
1234 riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
1235
1236 /* Find matching cm_dev object */
1237 down_read(&rdev_sem);
1238 list_for_each_entry(cm, &cm_dev_list, list) {
1239 if ((cm->mport->id == mport_id) &&
1240 rio_mport_is_running(cm->mport)) {
1241 rc = 0;
1242 break;
1243 }
1244 }
1245
1246 if (rc)
1247 goto exit;
1248
1249 ch = riocm_get_channel(ch_id);
1250 if (!ch) {
1251 rc = -EINVAL;
1252 goto exit;
1253 }
1254
1255 spin_lock_bh(&ch->lock);
1256 if (ch->state != RIO_CM_IDLE) {
1257 spin_unlock_bh(&ch->lock);
1258 rc = -EINVAL;
1259 goto err_put;
1260 }
1261
1262 ch->cmdev = cm;
1263 ch->loc_destid = cm->mport->host_deviceid;
1264 ch->context = context;
1265 ch->state = RIO_CM_CHAN_BOUND;
1266 spin_unlock_bh(&ch->lock);
1267 err_put:
1268 riocm_put_channel(ch);
1269 exit:
1270 up_read(&rdev_sem);
1271 return rc;
1272 }
1273
1274 /*
1275 * riocm_ch_alloc - channel object allocation helper routine
1276 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1277 *
1278 * Return value: pointer to newly created channel object,
1279 * or error-valued pointer
1280 */
riocm_ch_alloc(u16 ch_num)1281 static struct rio_channel *riocm_ch_alloc(u16 ch_num)
1282 {
1283 int id;
1284 int start, end;
1285 struct rio_channel *ch;
1286
1287 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
1288 if (!ch)
1289 return ERR_PTR(-ENOMEM);
1290
1291 if (ch_num) {
1292 /* If requested, try to obtain the specified channel ID */
1293 start = ch_num;
1294 end = ch_num + 1;
1295 } else {
1296 /* Obtain channel ID from the dynamic allocation range */
1297 start = chstart;
1298 end = RIOCM_MAX_CHNUM + 1;
1299 }
1300
1301 idr_preload(GFP_KERNEL);
1302 spin_lock_bh(&idr_lock);
1303 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
1304 spin_unlock_bh(&idr_lock);
1305 idr_preload_end();
1306
1307 if (id < 0) {
1308 kfree(ch);
1309 return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
1310 }
1311
1312 ch->id = (u16)id;
1313 ch->state = RIO_CM_IDLE;
1314 spin_lock_init(&ch->lock);
1315 INIT_LIST_HEAD(&ch->accept_queue);
1316 INIT_LIST_HEAD(&ch->ch_node);
1317 init_completion(&ch->comp);
1318 init_completion(&ch->comp_close);
1319 kref_init(&ch->ref);
1320 ch->rx_ring.head = 0;
1321 ch->rx_ring.tail = 0;
1322 ch->rx_ring.count = 0;
1323 ch->rx_ring.inuse_cnt = 0;
1324
1325 return ch;
1326 }
1327
1328 /*
1329 * riocm_ch_create - creates a new channel object and allocates ID for it
1330 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1331 *
1332 * Allocates and initializes a new channel object. If the parameter ch_num > 0
1333 * and is within the valid range, riocm_ch_create tries to allocate the
1334 * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
1335 * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
1336 * Module parameter 'chstart' defines start of an ID range available for dynamic
1337 * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
1338 * Available channel numbers are limited by 16-bit size of channel numbers used
1339 * in the packet header.
1340 *
1341 * Return value: PTR to rio_channel structure if successful (with channel number
1342 * updated via pointer) or error-valued pointer if error.
1343 */
riocm_ch_create(u16 * ch_num)1344 static struct rio_channel *riocm_ch_create(u16 *ch_num)
1345 {
1346 struct rio_channel *ch = NULL;
1347
1348 ch = riocm_ch_alloc(*ch_num);
1349
1350 if (IS_ERR(ch))
1351 riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
1352 *ch_num, PTR_ERR(ch));
1353 else
1354 *ch_num = ch->id;
1355
1356 return ch;
1357 }
1358
1359 /*
1360 * riocm_ch_free - channel object release routine
1361 * @ref: pointer to a channel's kref structure
1362 */
riocm_ch_free(struct kref * ref)1363 static void riocm_ch_free(struct kref *ref)
1364 {
1365 struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
1366 int i;
1367
1368 riocm_debug(CHOP, "(ch_%d)", ch->id);
1369
1370 if (ch->rx_ring.inuse_cnt) {
1371 for (i = 0;
1372 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
1373 if (ch->rx_ring.inuse[i] != NULL) {
1374 kfree(ch->rx_ring.inuse[i]);
1375 ch->rx_ring.inuse_cnt--;
1376 }
1377 }
1378 }
1379
1380 if (ch->rx_ring.count)
1381 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
1382 if (ch->rx_ring.buf[i] != NULL) {
1383 kfree(ch->rx_ring.buf[i]);
1384 ch->rx_ring.count--;
1385 }
1386 }
1387
1388 complete(&ch->comp_close);
1389 }
1390
riocm_send_close(struct rio_channel * ch)1391 static int riocm_send_close(struct rio_channel *ch)
1392 {
1393 struct rio_ch_chan_hdr *hdr;
1394 int ret;
1395
1396 /*
1397 * Send CH_CLOSE notification to the remote RapidIO device
1398 */
1399
1400 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1401 if (hdr == NULL)
1402 return -ENOMEM;
1403
1404 hdr->bhdr.src_id = htonl(ch->loc_destid);
1405 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1406 hdr->bhdr.src_mbox = cmbox;
1407 hdr->bhdr.dst_mbox = cmbox;
1408 hdr->bhdr.type = RIO_CM_CHAN;
1409 hdr->ch_op = CM_CONN_CLOSE;
1410 hdr->dst_ch = htons(ch->rem_channel);
1411 hdr->src_ch = htons(ch->id);
1412
1413 /* ATTN: the function call below relies on the fact that underlying
1414 * add_outb_message() routine copies TX data into its internal transfer
1415 * buffer. Needs to be reviewed if switched to direct buffer mode.
1416 */
1417 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1418
1419 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
1420 hdr, sizeof(*hdr)))
1421 return 0;
1422 kfree(hdr);
1423
1424 if (ret)
1425 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
1426
1427 return ret;
1428 }
1429
1430 /*
1431 * riocm_ch_close - closes a channel object with specified ID (by local request)
1432 * @ch: channel to be closed
1433 */
riocm_ch_close(struct rio_channel * ch)1434 static int riocm_ch_close(struct rio_channel *ch)
1435 {
1436 unsigned long tmo = msecs_to_jiffies(3000);
1437 enum rio_cm_state state;
1438 long wret;
1439 int ret = 0;
1440
1441 riocm_debug(CHOP, "ch_%d by %s(%d)",
1442 ch->id, current->comm, task_pid_nr(current));
1443
1444 state = riocm_exch(ch, RIO_CM_DESTROYING);
1445 if (state == RIO_CM_CONNECTED)
1446 riocm_send_close(ch);
1447
1448 complete_all(&ch->comp);
1449
1450 riocm_put_channel(ch);
1451 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
1452
1453 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1454
1455 if (wret == 0) {
1456 /* Timeout on wait occurred */
1457 riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
1458 current->comm, task_pid_nr(current), ch->id);
1459 ret = -ETIMEDOUT;
1460 } else if (wret == -ERESTARTSYS) {
1461 /* Wait_for_completion was interrupted by a signal */
1462 riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
1463 current->comm, task_pid_nr(current), ch->id);
1464 ret = -EINTR;
1465 }
1466
1467 if (!ret) {
1468 riocm_debug(CHOP, "ch_%d resources released", ch->id);
1469 kfree(ch);
1470 } else {
1471 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
1472 }
1473
1474 return ret;
1475 }
1476
1477 /*
1478 * riocm_cdev_open() - Open character device
1479 */
riocm_cdev_open(struct inode * inode,struct file * filp)1480 static int riocm_cdev_open(struct inode *inode, struct file *filp)
1481 {
1482 riocm_debug(INIT, "by %s(%d) filp=%p ",
1483 current->comm, task_pid_nr(current), filp);
1484
1485 if (list_empty(&cm_dev_list))
1486 return -ENODEV;
1487
1488 return 0;
1489 }
1490
1491 /*
1492 * riocm_cdev_release() - Release character device
1493 */
riocm_cdev_release(struct inode * inode,struct file * filp)1494 static int riocm_cdev_release(struct inode *inode, struct file *filp)
1495 {
1496 struct rio_channel *ch, *_c;
1497 unsigned int i;
1498 LIST_HEAD(list);
1499
1500 riocm_debug(EXIT, "by %s(%d) filp=%p",
1501 current->comm, task_pid_nr(current), filp);
1502
1503 /* Check if there are channels associated with this file descriptor */
1504 spin_lock_bh(&idr_lock);
1505 idr_for_each_entry(&ch_idr, ch, i) {
1506 if (ch && ch->filp == filp) {
1507 riocm_debug(EXIT, "ch_%d not released by %s(%d)",
1508 ch->id, current->comm,
1509 task_pid_nr(current));
1510 idr_remove(&ch_idr, ch->id);
1511 list_add(&ch->ch_node, &list);
1512 }
1513 }
1514 spin_unlock_bh(&idr_lock);
1515
1516 if (!list_empty(&list)) {
1517 list_for_each_entry_safe(ch, _c, &list, ch_node) {
1518 list_del(&ch->ch_node);
1519 riocm_ch_close(ch);
1520 }
1521 }
1522
1523 return 0;
1524 }
1525
1526 /*
1527 * cm_ep_get_list_size() - Reports number of endpoints in the network
1528 */
cm_ep_get_list_size(void __user * arg)1529 static int cm_ep_get_list_size(void __user *arg)
1530 {
1531 u32 __user *p = arg;
1532 u32 mport_id;
1533 u32 count = 0;
1534 struct cm_dev *cm;
1535
1536 if (get_user(mport_id, p))
1537 return -EFAULT;
1538 if (mport_id >= RIO_MAX_MPORTS)
1539 return -EINVAL;
1540
1541 /* Find a matching cm_dev object */
1542 down_read(&rdev_sem);
1543 list_for_each_entry(cm, &cm_dev_list, list) {
1544 if (cm->mport->id == mport_id) {
1545 count = cm->npeers;
1546 up_read(&rdev_sem);
1547 if (copy_to_user(arg, &count, sizeof(u32)))
1548 return -EFAULT;
1549 return 0;
1550 }
1551 }
1552 up_read(&rdev_sem);
1553
1554 return -ENODEV;
1555 }
1556
1557 /*
1558 * cm_ep_get_list() - Returns list of attached endpoints
1559 */
cm_ep_get_list(void __user * arg)1560 static int cm_ep_get_list(void __user *arg)
1561 {
1562 struct cm_dev *cm;
1563 struct cm_peer *peer;
1564 u32 info[2];
1565 void *buf;
1566 u32 nent;
1567 u32 *entry_ptr;
1568 u32 i = 0;
1569 int ret = 0;
1570
1571 if (copy_from_user(&info, arg, sizeof(info)))
1572 return -EFAULT;
1573
1574 if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
1575 return -EINVAL;
1576
1577 /* Find a matching cm_dev object */
1578 down_read(&rdev_sem);
1579 list_for_each_entry(cm, &cm_dev_list, list)
1580 if (cm->mport->id == (u8)info[1])
1581 goto found;
1582
1583 up_read(&rdev_sem);
1584 return -ENODEV;
1585
1586 found:
1587 nent = min(info[0], cm->npeers);
1588 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
1589 if (!buf) {
1590 up_read(&rdev_sem);
1591 return -ENOMEM;
1592 }
1593
1594 entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
1595
1596 list_for_each_entry(peer, &cm->peers, node) {
1597 *entry_ptr = (u32)peer->rdev->destid;
1598 entry_ptr++;
1599 if (++i == nent)
1600 break;
1601 }
1602 up_read(&rdev_sem);
1603
1604 ((u32 *)buf)[0] = i; /* report an updated number of entries */
1605 ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
1606 if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
1607 ret = -EFAULT;
1608
1609 kfree(buf);
1610 return ret;
1611 }
1612
1613 /*
1614 * cm_mport_get_list() - Returns list of available local mport devices
1615 */
cm_mport_get_list(void __user * arg)1616 static int cm_mport_get_list(void __user *arg)
1617 {
1618 int ret = 0;
1619 u32 entries;
1620 void *buf;
1621 struct cm_dev *cm;
1622 u32 *entry_ptr;
1623 int count = 0;
1624
1625 if (copy_from_user(&entries, arg, sizeof(entries)))
1626 return -EFAULT;
1627 if (entries == 0 || entries > RIO_MAX_MPORTS)
1628 return -EINVAL;
1629 buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
1630 if (!buf)
1631 return -ENOMEM;
1632
1633 /* Scan all registered cm_dev objects */
1634 entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
1635 down_read(&rdev_sem);
1636 list_for_each_entry(cm, &cm_dev_list, list) {
1637 if (count++ < entries) {
1638 *entry_ptr = (cm->mport->id << 16) |
1639 cm->mport->host_deviceid;
1640 entry_ptr++;
1641 }
1642 }
1643 up_read(&rdev_sem);
1644
1645 *((u32 *)buf) = count; /* report a real number of entries */
1646 if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
1647 ret = -EFAULT;
1648
1649 kfree(buf);
1650 return ret;
1651 }
1652
1653 /*
1654 * cm_chan_create() - Create a message exchange channel
1655 */
cm_chan_create(struct file * filp,void __user * arg)1656 static int cm_chan_create(struct file *filp, void __user *arg)
1657 {
1658 u16 __user *p = arg;
1659 u16 ch_num;
1660 struct rio_channel *ch;
1661
1662 if (get_user(ch_num, p))
1663 return -EFAULT;
1664
1665 riocm_debug(CHOP, "ch_%d requested by %s(%d)",
1666 ch_num, current->comm, task_pid_nr(current));
1667 ch = riocm_ch_create(&ch_num);
1668 if (IS_ERR(ch))
1669 return PTR_ERR(ch);
1670
1671 ch->filp = filp;
1672 riocm_debug(CHOP, "ch_%d created by %s(%d)",
1673 ch_num, current->comm, task_pid_nr(current));
1674 return put_user(ch_num, p);
1675 }
1676
1677 /*
1678 * cm_chan_close() - Close channel
1679 * @filp: Pointer to file object
1680 * @arg: Channel to close
1681 */
cm_chan_close(struct file * filp,void __user * arg)1682 static int cm_chan_close(struct file *filp, void __user *arg)
1683 {
1684 u16 __user *p = arg;
1685 u16 ch_num;
1686 struct rio_channel *ch;
1687
1688 if (get_user(ch_num, p))
1689 return -EFAULT;
1690
1691 riocm_debug(CHOP, "ch_%d by %s(%d)",
1692 ch_num, current->comm, task_pid_nr(current));
1693
1694 spin_lock_bh(&idr_lock);
1695 ch = idr_find(&ch_idr, ch_num);
1696 if (!ch) {
1697 spin_unlock_bh(&idr_lock);
1698 return 0;
1699 }
1700 if (ch->filp != filp) {
1701 spin_unlock_bh(&idr_lock);
1702 return -EINVAL;
1703 }
1704 idr_remove(&ch_idr, ch->id);
1705 spin_unlock_bh(&idr_lock);
1706
1707 return riocm_ch_close(ch);
1708 }
1709
1710 /*
1711 * cm_chan_bind() - Bind channel
1712 * @arg: Channel number
1713 */
cm_chan_bind(void __user * arg)1714 static int cm_chan_bind(void __user *arg)
1715 {
1716 struct rio_cm_channel chan;
1717
1718 if (copy_from_user(&chan, arg, sizeof(chan)))
1719 return -EFAULT;
1720 if (chan.mport_id >= RIO_MAX_MPORTS)
1721 return -EINVAL;
1722
1723 return riocm_ch_bind(chan.id, chan.mport_id, NULL);
1724 }
1725
1726 /*
1727 * cm_chan_listen() - Listen on channel
1728 * @arg: Channel number
1729 */
cm_chan_listen(void __user * arg)1730 static int cm_chan_listen(void __user *arg)
1731 {
1732 u16 __user *p = arg;
1733 u16 ch_num;
1734
1735 if (get_user(ch_num, p))
1736 return -EFAULT;
1737
1738 return riocm_ch_listen(ch_num);
1739 }
1740
1741 /*
1742 * cm_chan_accept() - Accept incoming connection
1743 * @filp: Pointer to file object
1744 * @arg: Channel number
1745 */
cm_chan_accept(struct file * filp,void __user * arg)1746 static int cm_chan_accept(struct file *filp, void __user *arg)
1747 {
1748 struct rio_cm_accept param;
1749 long accept_to;
1750 struct rio_channel *ch;
1751
1752 if (copy_from_user(¶m, arg, sizeof(param)))
1753 return -EFAULT;
1754
1755 riocm_debug(CHOP, "on ch_%d by %s(%d)",
1756 param.ch_num, current->comm, task_pid_nr(current));
1757
1758 accept_to = param.wait_to ?
1759 msecs_to_jiffies(param.wait_to) : 0;
1760
1761 ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to);
1762 if (IS_ERR(ch))
1763 return PTR_ERR(ch);
1764 ch->filp = filp;
1765
1766 riocm_debug(CHOP, "new ch_%d for %s(%d)",
1767 ch->id, current->comm, task_pid_nr(current));
1768
1769 if (copy_to_user(arg, ¶m, sizeof(param)))
1770 return -EFAULT;
1771 return 0;
1772 }
1773
1774 /*
1775 * cm_chan_connect() - Connect on channel
1776 * @arg: Channel information
1777 */
cm_chan_connect(void __user * arg)1778 static int cm_chan_connect(void __user *arg)
1779 {
1780 struct rio_cm_channel chan;
1781 struct cm_dev *cm;
1782 struct cm_peer *peer;
1783 int ret = -ENODEV;
1784
1785 if (copy_from_user(&chan, arg, sizeof(chan)))
1786 return -EFAULT;
1787 if (chan.mport_id >= RIO_MAX_MPORTS)
1788 return -EINVAL;
1789
1790 down_read(&rdev_sem);
1791
1792 /* Find matching cm_dev object */
1793 list_for_each_entry(cm, &cm_dev_list, list) {
1794 if (cm->mport->id == chan.mport_id) {
1795 ret = 0;
1796 break;
1797 }
1798 }
1799
1800 if (ret)
1801 goto err_out;
1802
1803 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
1804 ret = -EINVAL;
1805 goto err_out;
1806 }
1807
1808 /* Find corresponding RapidIO endpoint device object */
1809 ret = -ENODEV;
1810
1811 list_for_each_entry(peer, &cm->peers, node) {
1812 if (peer->rdev->destid == chan.remote_destid) {
1813 ret = 0;
1814 break;
1815 }
1816 }
1817
1818 if (ret)
1819 goto err_out;
1820
1821 up_read(&rdev_sem);
1822
1823 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
1824 err_out:
1825 up_read(&rdev_sem);
1826 return ret;
1827 }
1828
1829 /*
1830 * cm_chan_msg_send() - Send a message through channel
1831 * @arg: Outbound message information
1832 */
cm_chan_msg_send(void __user * arg)1833 static int cm_chan_msg_send(void __user *arg)
1834 {
1835 struct rio_cm_msg msg;
1836 void *buf;
1837 int ret;
1838
1839 if (copy_from_user(&msg, arg, sizeof(msg)))
1840 return -EFAULT;
1841 if (msg.size > RIO_MAX_MSG_SIZE)
1842 return -EINVAL;
1843
1844 buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
1845 if (IS_ERR(buf))
1846 return PTR_ERR(buf);
1847
1848 ret = riocm_ch_send(msg.ch_num, buf, msg.size);
1849
1850 kfree(buf);
1851 return ret;
1852 }
1853
1854 /*
1855 * cm_chan_msg_rcv() - Receive a message through channel
1856 * @arg: Inbound message information
1857 */
cm_chan_msg_rcv(void __user * arg)1858 static int cm_chan_msg_rcv(void __user *arg)
1859 {
1860 struct rio_cm_msg msg;
1861 struct rio_channel *ch;
1862 void *buf;
1863 long rxto;
1864 int ret = 0, msg_size;
1865
1866 if (copy_from_user(&msg, arg, sizeof(msg)))
1867 return -EFAULT;
1868
1869 if (msg.ch_num == 0 || msg.size == 0)
1870 return -EINVAL;
1871
1872 ch = riocm_get_channel(msg.ch_num);
1873 if (!ch)
1874 return -ENODEV;
1875
1876 rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
1877
1878 ret = riocm_ch_receive(ch, &buf, rxto);
1879 if (ret)
1880 goto out;
1881
1882 msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
1883
1884 if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
1885 ret = -EFAULT;
1886
1887 riocm_ch_free_rxbuf(ch, buf);
1888 out:
1889 riocm_put_channel(ch);
1890 return ret;
1891 }
1892
1893 /*
1894 * riocm_cdev_ioctl() - IOCTL requests handler
1895 */
1896 static long
riocm_cdev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1897 riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1898 {
1899 switch (cmd) {
1900 case RIO_CM_EP_GET_LIST_SIZE:
1901 return cm_ep_get_list_size((void __user *)arg);
1902 case RIO_CM_EP_GET_LIST:
1903 return cm_ep_get_list((void __user *)arg);
1904 case RIO_CM_CHAN_CREATE:
1905 return cm_chan_create(filp, (void __user *)arg);
1906 case RIO_CM_CHAN_CLOSE:
1907 return cm_chan_close(filp, (void __user *)arg);
1908 case RIO_CM_CHAN_BIND:
1909 return cm_chan_bind((void __user *)arg);
1910 case RIO_CM_CHAN_LISTEN:
1911 return cm_chan_listen((void __user *)arg);
1912 case RIO_CM_CHAN_ACCEPT:
1913 return cm_chan_accept(filp, (void __user *)arg);
1914 case RIO_CM_CHAN_CONNECT:
1915 return cm_chan_connect((void __user *)arg);
1916 case RIO_CM_CHAN_SEND:
1917 return cm_chan_msg_send((void __user *)arg);
1918 case RIO_CM_CHAN_RECEIVE:
1919 return cm_chan_msg_rcv((void __user *)arg);
1920 case RIO_CM_MPORT_GET_LIST:
1921 return cm_mport_get_list((void __user *)arg);
1922 default:
1923 break;
1924 }
1925
1926 return -EINVAL;
1927 }
1928
1929 static const struct file_operations riocm_cdev_fops = {
1930 .owner = THIS_MODULE,
1931 .open = riocm_cdev_open,
1932 .release = riocm_cdev_release,
1933 .unlocked_ioctl = riocm_cdev_ioctl,
1934 };
1935
1936 /*
1937 * riocm_add_dev - add new remote RapidIO device into channel management core
1938 * @dev: device object associated with RapidIO device
1939 * @sif: subsystem interface
1940 *
1941 * Adds the specified RapidIO device (if applicable) into peers list of
1942 * the corresponding channel management device (cm_dev).
1943 */
riocm_add_dev(struct device * dev,struct subsys_interface * sif)1944 static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
1945 {
1946 struct cm_peer *peer;
1947 struct rio_dev *rdev = to_rio_dev(dev);
1948 struct cm_dev *cm;
1949
1950 /* Check if the remote device has capabilities required to support CM */
1951 if (!dev_cm_capable(rdev))
1952 return 0;
1953
1954 riocm_debug(RDEV, "(%s)", rio_name(rdev));
1955
1956 peer = kmalloc(sizeof(*peer), GFP_KERNEL);
1957 if (!peer)
1958 return -ENOMEM;
1959
1960 /* Find a corresponding cm_dev object */
1961 down_write(&rdev_sem);
1962 list_for_each_entry(cm, &cm_dev_list, list) {
1963 if (cm->mport == rdev->net->hport)
1964 goto found;
1965 }
1966
1967 up_write(&rdev_sem);
1968 kfree(peer);
1969 return -ENODEV;
1970
1971 found:
1972 peer->rdev = rdev;
1973 list_add_tail(&peer->node, &cm->peers);
1974 cm->npeers++;
1975
1976 up_write(&rdev_sem);
1977 return 0;
1978 }
1979
1980 /*
1981 * riocm_remove_dev - remove remote RapidIO device from channel management core
1982 * @dev: device object associated with RapidIO device
1983 * @sif: subsystem interface
1984 *
1985 * Removes the specified RapidIO device (if applicable) from peers list of
1986 * the corresponding channel management device (cm_dev).
1987 */
riocm_remove_dev(struct device * dev,struct subsys_interface * sif)1988 static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
1989 {
1990 struct rio_dev *rdev = to_rio_dev(dev);
1991 struct cm_dev *cm;
1992 struct cm_peer *peer;
1993 struct rio_channel *ch, *_c;
1994 unsigned int i;
1995 bool found = false;
1996 LIST_HEAD(list);
1997
1998 /* Check if the remote device has capabilities required to support CM */
1999 if (!dev_cm_capable(rdev))
2000 return;
2001
2002 riocm_debug(RDEV, "(%s)", rio_name(rdev));
2003
2004 /* Find matching cm_dev object */
2005 down_write(&rdev_sem);
2006 list_for_each_entry(cm, &cm_dev_list, list) {
2007 if (cm->mport == rdev->net->hport) {
2008 found = true;
2009 break;
2010 }
2011 }
2012
2013 if (!found) {
2014 up_write(&rdev_sem);
2015 return;
2016 }
2017
2018 /* Remove remote device from the list of peers */
2019 found = false;
2020 list_for_each_entry(peer, &cm->peers, node) {
2021 if (peer->rdev == rdev) {
2022 riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
2023 found = true;
2024 list_del(&peer->node);
2025 cm->npeers--;
2026 kfree(peer);
2027 break;
2028 }
2029 }
2030
2031 up_write(&rdev_sem);
2032
2033 if (!found)
2034 return;
2035
2036 /*
2037 * Release channels associated with this peer
2038 */
2039
2040 spin_lock_bh(&idr_lock);
2041 idr_for_each_entry(&ch_idr, ch, i) {
2042 if (ch && ch->rdev == rdev) {
2043 if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
2044 riocm_exch(ch, RIO_CM_DISCONNECT);
2045 idr_remove(&ch_idr, ch->id);
2046 list_add(&ch->ch_node, &list);
2047 }
2048 }
2049 spin_unlock_bh(&idr_lock);
2050
2051 if (!list_empty(&list)) {
2052 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2053 list_del(&ch->ch_node);
2054 riocm_ch_close(ch);
2055 }
2056 }
2057 }
2058
2059 /*
2060 * riocm_cdev_add() - Create rio_cm char device
2061 * @devno: device number assigned to device (MAJ + MIN)
2062 */
riocm_cdev_add(dev_t devno)2063 static int riocm_cdev_add(dev_t devno)
2064 {
2065 int ret;
2066
2067 cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
2068 riocm_cdev.cdev.owner = THIS_MODULE;
2069 ret = cdev_add(&riocm_cdev.cdev, devno, 1);
2070 if (ret < 0) {
2071 riocm_error("Cannot register a device with error %d", ret);
2072 return ret;
2073 }
2074
2075 riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
2076 if (IS_ERR(riocm_cdev.dev)) {
2077 cdev_del(&riocm_cdev.cdev);
2078 return PTR_ERR(riocm_cdev.dev);
2079 }
2080
2081 riocm_debug(MPORT, "Added %s cdev(%d:%d)",
2082 DEV_NAME, MAJOR(devno), MINOR(devno));
2083
2084 return 0;
2085 }
2086
2087 /*
2088 * riocm_add_mport - add new local mport device into channel management core
2089 * @dev: device object associated with mport
2090 *
2091 * When a new mport device is added, CM immediately reserves inbound and
2092 * outbound RapidIO mailboxes that will be used.
2093 */
riocm_add_mport(struct device * dev)2094 static int riocm_add_mport(struct device *dev)
2095 {
2096 int rc;
2097 int i;
2098 struct cm_dev *cm;
2099 struct rio_mport *mport = to_rio_mport(dev);
2100
2101 riocm_debug(MPORT, "add mport %s", mport->name);
2102
2103 cm = kzalloc(sizeof(*cm), GFP_KERNEL);
2104 if (!cm)
2105 return -ENOMEM;
2106
2107 cm->mport = mport;
2108
2109 rc = rio_request_outb_mbox(mport, cm, cmbox,
2110 RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
2111 if (rc) {
2112 riocm_error("failed to allocate OBMBOX_%d on %s",
2113 cmbox, mport->name);
2114 kfree(cm);
2115 return -ENODEV;
2116 }
2117
2118 rc = rio_request_inb_mbox(mport, cm, cmbox,
2119 RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
2120 if (rc) {
2121 riocm_error("failed to allocate IBMBOX_%d on %s",
2122 cmbox, mport->name);
2123 rio_release_outb_mbox(mport, cmbox);
2124 kfree(cm);
2125 return -ENODEV;
2126 }
2127
2128 cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
2129 if (!cm->rx_wq) {
2130 rio_release_inb_mbox(mport, cmbox);
2131 rio_release_outb_mbox(mport, cmbox);
2132 kfree(cm);
2133 return -ENOMEM;
2134 }
2135
2136 /*
2137 * Allocate and register inbound messaging buffers to be ready
2138 * to receive channel and system management requests
2139 */
2140 for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
2141 cm->rx_buf[i] = NULL;
2142
2143 cm->rx_slots = RIOCM_RX_RING_SIZE;
2144 mutex_init(&cm->rx_lock);
2145 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
2146 INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
2147
2148 cm->tx_slot = 0;
2149 cm->tx_cnt = 0;
2150 cm->tx_ack_slot = 0;
2151 spin_lock_init(&cm->tx_lock);
2152
2153 INIT_LIST_HEAD(&cm->peers);
2154 cm->npeers = 0;
2155 INIT_LIST_HEAD(&cm->tx_reqs);
2156
2157 down_write(&rdev_sem);
2158 list_add_tail(&cm->list, &cm_dev_list);
2159 up_write(&rdev_sem);
2160
2161 return 0;
2162 }
2163
2164 /*
2165 * riocm_remove_mport - remove local mport device from channel management core
2166 * @dev: device object associated with mport
2167 *
2168 * Removes a local mport device from the list of registered devices that provide
2169 * channel management services. Returns an error if the specified mport is not
2170 * registered with the CM core.
2171 */
riocm_remove_mport(struct device * dev)2172 static void riocm_remove_mport(struct device *dev)
2173 {
2174 struct rio_mport *mport = to_rio_mport(dev);
2175 struct cm_dev *cm;
2176 struct cm_peer *peer, *temp;
2177 struct rio_channel *ch, *_c;
2178 unsigned int i;
2179 bool found = false;
2180 LIST_HEAD(list);
2181
2182 riocm_debug(MPORT, "%s", mport->name);
2183
2184 /* Find a matching cm_dev object */
2185 down_write(&rdev_sem);
2186 list_for_each_entry(cm, &cm_dev_list, list) {
2187 if (cm->mport == mport) {
2188 list_del(&cm->list);
2189 found = true;
2190 break;
2191 }
2192 }
2193 up_write(&rdev_sem);
2194 if (!found)
2195 return;
2196
2197 flush_workqueue(cm->rx_wq);
2198 destroy_workqueue(cm->rx_wq);
2199
2200 /* Release channels bound to this mport */
2201 spin_lock_bh(&idr_lock);
2202 idr_for_each_entry(&ch_idr, ch, i) {
2203 if (ch->cmdev == cm) {
2204 riocm_debug(RDEV, "%s drop ch_%d",
2205 mport->name, ch->id);
2206 idr_remove(&ch_idr, ch->id);
2207 list_add(&ch->ch_node, &list);
2208 }
2209 }
2210 spin_unlock_bh(&idr_lock);
2211
2212 if (!list_empty(&list)) {
2213 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2214 list_del(&ch->ch_node);
2215 riocm_ch_close(ch);
2216 }
2217 }
2218
2219 rio_release_inb_mbox(mport, cmbox);
2220 rio_release_outb_mbox(mport, cmbox);
2221
2222 /* Remove and free peer entries */
2223 if (!list_empty(&cm->peers))
2224 riocm_debug(RDEV, "ATTN: peer list not empty");
2225 list_for_each_entry_safe(peer, temp, &cm->peers, node) {
2226 riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
2227 list_del(&peer->node);
2228 kfree(peer);
2229 }
2230
2231 riocm_rx_free(cm);
2232 kfree(cm);
2233 riocm_debug(MPORT, "%s done", mport->name);
2234 }
2235
rio_cm_shutdown(struct notifier_block * nb,unsigned long code,void * unused)2236 static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
2237 void *unused)
2238 {
2239 struct rio_channel *ch;
2240 unsigned int i;
2241 LIST_HEAD(list);
2242
2243 riocm_debug(EXIT, ".");
2244
2245 /*
2246 * If there are any channels left in connected state send
2247 * close notification to the connection partner.
2248 * First build a list of channels that require a closing
2249 * notification because function riocm_send_close() should
2250 * be called outside of spinlock protected code.
2251 */
2252 spin_lock_bh(&idr_lock);
2253 idr_for_each_entry(&ch_idr, ch, i) {
2254 if (ch->state == RIO_CM_CONNECTED) {
2255 riocm_debug(EXIT, "close ch %d", ch->id);
2256 idr_remove(&ch_idr, ch->id);
2257 list_add(&ch->ch_node, &list);
2258 }
2259 }
2260 spin_unlock_bh(&idr_lock);
2261
2262 list_for_each_entry(ch, &list, ch_node)
2263 riocm_send_close(ch);
2264
2265 return NOTIFY_DONE;
2266 }
2267
2268 /*
2269 * riocm_interface handles addition/removal of remote RapidIO devices
2270 */
2271 static struct subsys_interface riocm_interface = {
2272 .name = "rio_cm",
2273 .subsys = &rio_bus_type,
2274 .add_dev = riocm_add_dev,
2275 .remove_dev = riocm_remove_dev,
2276 };
2277
2278 /*
2279 * rio_mport_interface handles addition/removal local mport devices
2280 */
2281 static struct class_interface rio_mport_interface __refdata = {
2282 .class = &rio_mport_class,
2283 .add_dev = riocm_add_mport,
2284 .remove_dev = riocm_remove_mport,
2285 };
2286
2287 static struct notifier_block rio_cm_notifier = {
2288 .notifier_call = rio_cm_shutdown,
2289 };
2290
riocm_init(void)2291 static int __init riocm_init(void)
2292 {
2293 int ret;
2294
2295 /* Create device class needed by udev */
2296 dev_class = class_create(DRV_NAME);
2297 if (IS_ERR(dev_class)) {
2298 riocm_error("Cannot create " DRV_NAME " class");
2299 return PTR_ERR(dev_class);
2300 }
2301
2302 ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
2303 if (ret) {
2304 class_destroy(dev_class);
2305 return ret;
2306 }
2307
2308 dev_major = MAJOR(dev_number);
2309 dev_minor_base = MINOR(dev_number);
2310 riocm_debug(INIT, "Registered class with %d major", dev_major);
2311
2312 /*
2313 * Register as rapidio_port class interface to get notifications about
2314 * mport additions and removals.
2315 */
2316 ret = class_interface_register(&rio_mport_interface);
2317 if (ret) {
2318 riocm_error("class_interface_register error: %d", ret);
2319 goto err_reg;
2320 }
2321
2322 /*
2323 * Register as RapidIO bus interface to get notifications about
2324 * addition/removal of remote RapidIO devices.
2325 */
2326 ret = subsys_interface_register(&riocm_interface);
2327 if (ret) {
2328 riocm_error("subsys_interface_register error: %d", ret);
2329 goto err_cl;
2330 }
2331
2332 ret = register_reboot_notifier(&rio_cm_notifier);
2333 if (ret) {
2334 riocm_error("failed to register reboot notifier (err=%d)", ret);
2335 goto err_sif;
2336 }
2337
2338 ret = riocm_cdev_add(dev_number);
2339 if (ret) {
2340 unregister_reboot_notifier(&rio_cm_notifier);
2341 ret = -ENODEV;
2342 goto err_sif;
2343 }
2344
2345 return 0;
2346 err_sif:
2347 subsys_interface_unregister(&riocm_interface);
2348 err_cl:
2349 class_interface_unregister(&rio_mport_interface);
2350 err_reg:
2351 unregister_chrdev_region(dev_number, 1);
2352 class_destroy(dev_class);
2353 return ret;
2354 }
2355
riocm_exit(void)2356 static void __exit riocm_exit(void)
2357 {
2358 riocm_debug(EXIT, "enter");
2359 unregister_reboot_notifier(&rio_cm_notifier);
2360 subsys_interface_unregister(&riocm_interface);
2361 class_interface_unregister(&rio_mport_interface);
2362 idr_destroy(&ch_idr);
2363
2364 device_unregister(riocm_cdev.dev);
2365 cdev_del(&(riocm_cdev.cdev));
2366
2367 class_destroy(dev_class);
2368 unregister_chrdev_region(dev_number, 1);
2369 }
2370
2371 late_initcall(riocm_init);
2372 module_exit(riocm_exit);
2373