Error Trace
[Home]
Bug # 151
Show/hide error trace Error trace
{ 2682 ldv_s_riocm_cdev_fops_file_operations = 0; 2672 LDV_IN_INTERRUPT = 1; 2681 ldv_initialize() { /* Function call is skipped due to function is undefined */} 2690 goto ldv_35569; 2690 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 2693 goto ldv_35568; 2691 ldv_35568:; 2694 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} 2694 switch (tmp); 3008 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } { { } 311 _raw_spin_lock_bh(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 312 return ;; } 2253 i = 0U; 2253 goto ldv_35501; 2253 tmp___1 = idr_get_next(&ch_idr, (int *)(&i)) { /* Function call is skipped due to function is undefined */} 2253 ch = (struct rio_channel *)tmp___1; 2255 goto ldv_35500; 2254 ldv_35500:; 2255 unsigned int __CPAchecker_TMP_1 = (unsigned int)(ch->state); { } { } { }} | Source code 1
2 /*
3 * rio_cm - RapidIO Channelized Messaging Driver
4 *
5 * Copyright 2013-2016 Integrated Device Technology, Inc.
6 * Copyright (c) 2015, Prodrive Technologies
7 * Copyright (c) 2015, RapidIO Trade Association
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL,
15 * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE
17 * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS.
18 */
19
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/sched.h>
25 #include <linux/rio.h>
26 #include <linux/rio_drv.h>
27 #include <linux/slab.h>
28 #include <linux/idr.h>
29 #include <linux/interrupt.h>
30 #include <linux/cdev.h>
31 #include <linux/fs.h>
32 #include <linux/poll.h>
33 #include <linux/reboot.h>
34 #include <linux/bitops.h>
35 #include <linux/printk.h>
36 #include <linux/rio_cm_cdev.h>
37
38 #define DRV_NAME "rio_cm"
39 #define DRV_VERSION "1.0.0"
40 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
41 #define DRV_DESC "RapidIO Channelized Messaging Driver"
42 #define DEV_NAME "rio_cm"
43
44 /* Debug output filtering masks */
45 enum {
46 DBG_NONE = 0,
47 DBG_INIT = BIT(0), /* driver init */
48 DBG_EXIT = BIT(1), /* driver exit */
49 DBG_MPORT = BIT(2), /* mport add/remove */
50 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
51 DBG_CHOP = BIT(4), /* channel operations */
52 DBG_WAIT = BIT(5), /* waiting for events */
53 DBG_TX = BIT(6), /* message TX */
54 DBG_TX_EVENT = BIT(7), /* message TX event */
55 DBG_RX_DATA = BIT(8), /* inbound data messages */
56 DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
57 DBG_ALL = ~0,
58 };
59
60 #ifdef DEBUG
61 #define riocm_debug(level, fmt, arg...) \
62 do { \
63 if (DBG_##level & dbg_level) \
64 pr_debug(DRV_NAME ": %s " fmt "\n", \
65 __func__, ##arg); \
66 } while (0)
67 #else
68 #define riocm_debug(level, fmt, arg...) \
69 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
70 #endif
71
72 #define riocm_warn(fmt, arg...) \
73 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
74
75 #define riocm_error(fmt, arg...) \
76 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
77
78
79 static int cmbox = 1;
80 module_param(cmbox, int, S_IRUGO);
81 MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
82
83 static int chstart = 256;
84 module_param(chstart, int, S_IRUGO);
85 MODULE_PARM_DESC(chstart,
86 "Start channel number for dynamic allocation (default 256)");
87
88 #ifdef DEBUG
89 static u32 dbg_level = DBG_NONE;
90 module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
91 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
92 #endif
93
94 MODULE_AUTHOR(DRV_AUTHOR);
95 MODULE_DESCRIPTION(DRV_DESC);
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_VERSION);
98
99 #define RIOCM_TX_RING_SIZE 128
100 #define RIOCM_RX_RING_SIZE 128
101 #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
102
103 #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
104 #define RIOCM_CHNUM_AUTO 0
105 #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
106
107 enum rio_cm_state {
108 RIO_CM_IDLE,
109 RIO_CM_CONNECT,
110 RIO_CM_CONNECTED,
111 RIO_CM_DISCONNECT,
112 RIO_CM_CHAN_BOUND,
113 RIO_CM_LISTEN,
114 RIO_CM_DESTROYING,
115 };
116
117 enum rio_cm_pkt_type {
118 RIO_CM_SYS = 0xaa,
119 RIO_CM_CHAN = 0x55,
120 };
121
122 enum rio_cm_chop {
123 CM_CONN_REQ,
124 CM_CONN_ACK,
125 CM_CONN_CLOSE,
126 CM_DATA_MSG,
127 };
128
129 struct rio_ch_base_bhdr {
130 u32 src_id;
131 u32 dst_id;
132 #define RIO_HDR_LETTER_MASK 0xffff0000
133 #define RIO_HDR_MBOX_MASK 0x0000ffff
134 u8 src_mbox;
135 u8 dst_mbox;
136 u8 type;
137 } __attribute__((__packed__));
138
139 struct rio_ch_chan_hdr {
140 struct rio_ch_base_bhdr bhdr;
141 u8 ch_op;
142 u16 dst_ch;
143 u16 src_ch;
144 u16 msg_len;
145 u16 rsrvd;
146 } __attribute__((__packed__));
147
148 struct tx_req {
149 struct list_head node;
150 struct rio_dev *rdev;
151 void *buffer;
152 size_t len;
153 };
154
155 struct cm_dev {
156 struct list_head list;
157 struct rio_mport *mport;
158 void *rx_buf[RIOCM_RX_RING_SIZE];
159 int rx_slots;
160 struct mutex rx_lock;
161
162 void *tx_buf[RIOCM_TX_RING_SIZE];
163 int tx_slot;
164 int tx_cnt;
165 int tx_ack_slot;
166 struct list_head tx_reqs;
167 spinlock_t tx_lock;
168
169 struct list_head peers;
170 u32 npeers;
171 struct workqueue_struct *rx_wq;
172 struct work_struct rx_work;
173 };
174
175 struct chan_rx_ring {
176 void *buf[RIOCM_RX_RING_SIZE];
177 int head;
178 int tail;
179 int count;
180
181 /* Tracking RX buffers reported to upper level */
182 void *inuse[RIOCM_RX_RING_SIZE];
183 int inuse_cnt;
184 };
185
186 struct rio_channel {
187 u16 id; /* local channel ID */
188 struct kref ref; /* channel refcount */
189 struct file *filp;
190 struct cm_dev *cmdev; /* associated CM device object */
191 struct rio_dev *rdev; /* remote RapidIO device */
192 enum rio_cm_state state;
193 int error;
194 spinlock_t lock;
195 void *context;
196 u32 loc_destid; /* local destID */
197 u32 rem_destid; /* remote destID */
198 u16 rem_channel; /* remote channel ID */
199 struct list_head accept_queue;
200 struct list_head ch_node;
201 struct completion comp;
202 struct completion comp_close;
203 struct chan_rx_ring rx_ring;
204 };
205
206 struct cm_peer {
207 struct list_head node;
208 struct rio_dev *rdev;
209 };
210
211 struct rio_cm_work {
212 struct work_struct work;
213 struct cm_dev *cm;
214 void *data;
215 };
216
217 struct conn_req {
218 struct list_head node;
219 u32 destid; /* requester destID */
220 u16 chan; /* requester channel ID */
221 struct cm_dev *cmdev;
222 };
223
224 /*
225 * A channel_dev structure represents a CM_CDEV
226 * @cdev Character device
227 * @dev Associated device object
228 */
229 struct channel_dev {
230 struct cdev cdev;
231 struct device *dev;
232 };
233
234 static struct rio_channel *riocm_ch_alloc(u16 ch_num);
235 static void riocm_ch_free(struct kref *ref);
236 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
237 void *buffer, size_t len);
238 static int riocm_ch_close(struct rio_channel *ch);
239
240 static DEFINE_SPINLOCK(idr_lock);
241 static DEFINE_IDR(ch_idr);
242
243 static LIST_HEAD(cm_dev_list);
244 static DECLARE_RWSEM(rdev_sem);
245
246 static struct class *dev_class;
247 static unsigned int dev_major;
248 static unsigned int dev_minor_base;
249 static dev_t dev_number;
250 static struct channel_dev riocm_cdev;
251
252 #define is_msg_capable(src_ops, dst_ops) \
253 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
254 (dst_ops & RIO_DST_OPS_DATA_MSG))
255 #define dev_cm_capable(dev) \
256 is_msg_capable(dev->src_ops, dev->dst_ops)
257
258 static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
259 {
260 int ret;
261
262 spin_lock_bh(&ch->lock);
263 ret = (ch->state == cmp);
264 spin_unlock_bh(&ch->lock);
265 return ret;
266 }
267
268 static int riocm_cmp_exch(struct rio_channel *ch,
269 enum rio_cm_state cmp, enum rio_cm_state exch)
270 {
271 int ret;
272
273 spin_lock_bh(&ch->lock);
274 ret = (ch->state == cmp);
275 if (ret)
276 ch->state = exch;
277 spin_unlock_bh(&ch->lock);
278 return ret;
279 }
280
281 static enum rio_cm_state riocm_exch(struct rio_channel *ch,
282 enum rio_cm_state exch)
283 {
284 enum rio_cm_state old;
285
286 spin_lock_bh(&ch->lock);
287 old = ch->state;
288 ch->state = exch;
289 spin_unlock_bh(&ch->lock);
290 return old;
291 }
292
293 static struct rio_channel *riocm_get_channel(u16 nr)
294 {
295 struct rio_channel *ch;
296
297 spin_lock_bh(&idr_lock);
298 ch = idr_find(&ch_idr, nr);
299 if (ch)
300 kref_get(&ch->ref);
301 spin_unlock_bh(&idr_lock);
302 return ch;
303 }
304
305 static void riocm_put_channel(struct rio_channel *ch)
306 {
307 kref_put(&ch->ref, riocm_ch_free);
308 }
309
310 static void *riocm_rx_get_msg(struct cm_dev *cm)
311 {
312 void *msg;
313 int i;
314
315 msg = rio_get_inb_message(cm->mport, cmbox);
316 if (msg) {
317 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
318 if (cm->rx_buf[i] == msg) {
319 cm->rx_buf[i] = NULL;
320 cm->rx_slots++;
321 break;
322 }
323 }
324
325 if (i == RIOCM_RX_RING_SIZE)
326 riocm_warn("no record for buffer 0x%p", msg);
327 }
328
329 return msg;
330 }
331
332 /*
333 * riocm_rx_fill - fills a ring of receive buffers for given cm device
334 * @cm: cm_dev object
335 * @nent: max number of entries to fill
336 *
337 * Returns: none
338 */
339 static void riocm_rx_fill(struct cm_dev *cm, int nent)
340 {
341 int i;
342
343 if (cm->rx_slots == 0)
344 return;
345
346 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
347 if (cm->rx_buf[i] == NULL) {
348 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
349 if (cm->rx_buf[i] == NULL)
350 break;
351 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
352 cm->rx_slots--;
353 nent--;
354 }
355 }
356 }
357
358 /*
359 * riocm_rx_free - frees all receive buffers associated with given cm device
360 * @cm: cm_dev object
361 *
362 * Returns: none
363 */
364 static void riocm_rx_free(struct cm_dev *cm)
365 {
366 int i;
367
368 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
369 if (cm->rx_buf[i] != NULL) {
370 kfree(cm->rx_buf[i]);
371 cm->rx_buf[i] = NULL;
372 }
373 }
374 }
375
376 /*
377 * riocm_req_handler - connection request handler
378 * @cm: cm_dev object
379 * @req_data: pointer to the request packet
380 *
381 * Returns: 0 if success, or
382 * -EINVAL if channel is not in correct state,
383 * -ENODEV if cannot find a channel with specified ID,
384 * -ENOMEM if unable to allocate memory to store the request
385 */
386 static int riocm_req_handler(struct cm_dev *cm, void *req_data)
387 {
388 struct rio_channel *ch;
389 struct conn_req *req;
390 struct rio_ch_chan_hdr *hh = req_data;
391 u16 chnum;
392
393 chnum = ntohs(hh->dst_ch);
394
395 ch = riocm_get_channel(chnum);
396
397 if (!ch)
398 return -ENODEV;
399
400 if (ch->state != RIO_CM_LISTEN) {
401 riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
402 riocm_put_channel(ch);
403 return -EINVAL;
404 }
405
406 req = kzalloc(sizeof(*req), GFP_KERNEL);
407 if (!req) {
408 riocm_put_channel(ch);
409 return -ENOMEM;
410 }
411
412 req->destid = ntohl(hh->bhdr.src_id);
413 req->chan = ntohs(hh->src_ch);
414 req->cmdev = cm;
415
416 spin_lock_bh(&ch->lock);
417 list_add_tail(&req->node, &ch->accept_queue);
418 spin_unlock_bh(&ch->lock);
419 complete(&ch->comp);
420 riocm_put_channel(ch);
421
422 return 0;
423 }
424
425 /*
426 * riocm_resp_handler - response to connection request handler
427 * @resp_data: pointer to the response packet
428 *
429 * Returns: 0 if success, or
430 * -EINVAL if channel is not in correct state,
431 * -ENODEV if cannot find a channel with specified ID,
432 */
433 static int riocm_resp_handler(void *resp_data)
434 {
435 struct rio_channel *ch;
436 struct rio_ch_chan_hdr *hh = resp_data;
437 u16 chnum;
438
439 chnum = ntohs(hh->dst_ch);
440 ch = riocm_get_channel(chnum);
441 if (!ch)
442 return -ENODEV;
443
444 if (ch->state != RIO_CM_CONNECT) {
445 riocm_put_channel(ch);
446 return -EINVAL;
447 }
448
449 riocm_exch(ch, RIO_CM_CONNECTED);
450 ch->rem_channel = ntohs(hh->src_ch);
451 complete(&ch->comp);
452 riocm_put_channel(ch);
453
454 return 0;
455 }
456
457 /*
458 * riocm_close_handler - channel close request handler
459 * @req_data: pointer to the request packet
460 *
461 * Returns: 0 if success, or
462 * -ENODEV if cannot find a channel with specified ID,
463 * + error codes returned by riocm_ch_close.
464 */
465 static int riocm_close_handler(void *data)
466 {
467 struct rio_channel *ch;
468 struct rio_ch_chan_hdr *hh = data;
469 int ret;
470
471 riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
472
473 spin_lock_bh(&idr_lock);
474 ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
475 if (!ch) {
476 spin_unlock_bh(&idr_lock);
477 return -ENODEV;
478 }
479 idr_remove(&ch_idr, ch->id);
480 spin_unlock_bh(&idr_lock);
481
482 riocm_exch(ch, RIO_CM_DISCONNECT);
483
484 ret = riocm_ch_close(ch);
485 if (ret)
486 riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
487
488 return 0;
489 }
490
491 /*
492 * rio_cm_handler - function that services request (non-data) packets
493 * @cm: cm_dev object
494 * @data: pointer to the packet
495 */
496 static void rio_cm_handler(struct cm_dev *cm, void *data)
497 {
498 struct rio_ch_chan_hdr *hdr;
499
500 if (!rio_mport_is_running(cm->mport))
501 goto out;
502
503 hdr = data;
504
505 riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
506 hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
507
508 switch (hdr->ch_op) {
509 case CM_CONN_REQ:
510 riocm_req_handler(cm, data);
511 break;
512 case CM_CONN_ACK:
513 riocm_resp_handler(data);
514 break;
515 case CM_CONN_CLOSE:
516 riocm_close_handler(data);
517 break;
518 default:
519 riocm_error("Invalid packet header");
520 break;
521 }
522 out:
523 kfree(data);
524 }
525
526 /*
527 * rio_rx_data_handler - received data packet handler
528 * @cm: cm_dev object
529 * @buf: data packet
530 *
531 * Returns: 0 if success, or
532 * -ENODEV if cannot find a channel with specified ID,
533 * -EIO if channel is not in CONNECTED state,
534 * -ENOMEM if channel RX queue is full (packet discarded)
535 */
536 static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
537 {
538 struct rio_ch_chan_hdr *hdr;
539 struct rio_channel *ch;
540
541 hdr = buf;
542
543 riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
544
545 ch = riocm_get_channel(ntohs(hdr->dst_ch));
546 if (!ch) {
547 /* Discard data message for non-existing channel */
548 kfree(buf);
549 return -ENODEV;
550 }
551
552 /* Place pointer to the buffer into channel's RX queue */
553 spin_lock(&ch->lock);
554
555 if (ch->state != RIO_CM_CONNECTED) {
556 /* Channel is not ready to receive data, discard a packet */
557 riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
558 ch->id, ch->state);
559 spin_unlock(&ch->lock);
560 kfree(buf);
561 riocm_put_channel(ch);
562 return -EIO;
563 }
564
565 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
566 /* If RX ring is full, discard a packet */
567 riocm_debug(RX_DATA, "ch=%d is full", ch->id);
568 spin_unlock(&ch->lock);
569 kfree(buf);
570 riocm_put_channel(ch);
571 return -ENOMEM;
572 }
573
574 ch->rx_ring.buf[ch->rx_ring.head] = buf;
575 ch->rx_ring.head++;
576 ch->rx_ring.count++;
577 ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
578
579 complete(&ch->comp);
580
581 spin_unlock(&ch->lock);
582 riocm_put_channel(ch);
583
584 return 0;
585 }
586
587 /*
588 * rio_ibmsg_handler - inbound message packet handler
589 */
590 static void rio_ibmsg_handler(struct work_struct *work)
591 {
592 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
593 void *data;
594 struct rio_ch_chan_hdr *hdr;
595
596 if (!rio_mport_is_running(cm->mport))
597 return;
598
599 while (1) {
600 mutex_lock(&cm->rx_lock);
601 data = riocm_rx_get_msg(cm);
602 if (data)
603 riocm_rx_fill(cm, 1);
604 mutex_unlock(&cm->rx_lock);
605
606 if (data == NULL)
607 break;
608
609 hdr = data;
610
611 if (hdr->bhdr.type != RIO_CM_CHAN) {
612 /* For now simply discard packets other than channel */
613 riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
614 hdr->bhdr.type);
615 kfree(data);
616 continue;
617 }
618
619 /* Process a channel message */
620 if (hdr->ch_op == CM_DATA_MSG)
621 rio_rx_data_handler(cm, data);
622 else
623 rio_cm_handler(cm, data);
624 }
625 }
626
627 static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
628 int mbox, int slot)
629 {
630 struct cm_dev *cm = dev_id;
631
632 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
633 queue_work(cm->rx_wq, &cm->rx_work);
634 }
635
636 /*
637 * rio_txcq_handler - TX completion handler
638 * @cm: cm_dev object
639 * @slot: TX queue slot
640 *
641 * TX completion handler also ensures that pending request packets are placed
642 * into transmit queue as soon as a free slot becomes available. This is done
643 * to give higher priority to request packets during high intensity data flow.
644 */
645 static void rio_txcq_handler(struct cm_dev *cm, int slot)
646 {
647 int ack_slot;
648
649 /* ATTN: Add TX completion notification if/when direct buffer
650 * transfer is implemented. At this moment only correct tracking
651 * of tx_count is important.
652 */
653 riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
654 cm->mport->id, slot, cm->tx_cnt);
655
656 spin_lock(&cm->tx_lock);
657 ack_slot = cm->tx_ack_slot;
658
659 if (ack_slot == slot)
660 riocm_debug(TX_EVENT, "slot == ack_slot");
661
662 while (cm->tx_cnt && ((ack_slot != slot) ||
663 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
664
665 cm->tx_buf[ack_slot] = NULL;
666 ++ack_slot;
667 ack_slot &= (RIOCM_TX_RING_SIZE - 1);
668 cm->tx_cnt--;
669 }
670
671 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
672 riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
673
674 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
675
676 cm->tx_ack_slot = ack_slot;
677
678 /*
679 * If there are pending requests, insert them into transmit queue
680 */
681 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
682 struct tx_req *req, *_req;
683 int rc;
684
685 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
686 list_del(&req->node);
687 cm->tx_buf[cm->tx_slot] = req->buffer;
688 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
689 req->buffer, req->len);
690 kfree(req->buffer);
691 kfree(req);
692
693 ++cm->tx_cnt;
694 ++cm->tx_slot;
695 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
696 if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
697 break;
698 }
699 }
700
701 spin_unlock(&cm->tx_lock);
702 }
703
704 static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
705 int mbox, int slot)
706 {
707 struct cm_dev *cm = dev_id;
708
709 if (cm && rio_mport_is_running(cm->mport))
710 rio_txcq_handler(cm, slot);
711 }
712
713 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
714 void *buffer, size_t len)
715 {
716 unsigned long flags;
717 struct tx_req *treq;
718
719 treq = kzalloc(sizeof(*treq), GFP_KERNEL);
720 if (treq == NULL)
721 return -ENOMEM;
722
723 treq->rdev = rdev;
724 treq->buffer = buffer;
725 treq->len = len;
726
727 spin_lock_irqsave(&cm->tx_lock, flags);
728 list_add_tail(&treq->node, &cm->tx_reqs);
729 spin_unlock_irqrestore(&cm->tx_lock, flags);
730 return 0;
731 }
732
733 /*
734 * riocm_post_send - helper function that places packet into msg TX queue
735 * @cm: cm_dev object
736 * @rdev: target RapidIO device object (required by outbound msg interface)
737 * @buffer: pointer to a packet buffer to send
738 * @len: length of data to transfer
739 * @req: request priority flag
740 *
741 * Returns: 0 if success, or error code otherwise.
742 */
743 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
744 void *buffer, size_t len)
745 {
746 int rc;
747 unsigned long flags;
748
749 spin_lock_irqsave(&cm->tx_lock, flags);
750
751 if (cm->mport == NULL) {
752 rc = -ENODEV;
753 goto err_out;
754 }
755
756 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
757 riocm_debug(TX, "Tx Queue is full");
758 rc = -EBUSY;
759 goto err_out;
760 }
761
762 cm->tx_buf[cm->tx_slot] = buffer;
763 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
764
765 riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
766 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
767
768 ++cm->tx_cnt;
769 ++cm->tx_slot;
770 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
771
772 err_out:
773 spin_unlock_irqrestore(&cm->tx_lock, flags);
774 return rc;
775 }
776
777 /*
778 * riocm_ch_send - sends a data packet to a remote device
779 * @ch_id: local channel ID
780 * @buf: pointer to a data buffer to send (including CM header)
781 * @len: length of data to transfer (including CM header)
782 *
783 * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
784 *
785 * Returns: 0 if success, or
786 * -EINVAL if one or more input parameters is/are not valid,
787 * -ENODEV if cannot find a channel with specified ID,
788 * -EAGAIN if a channel is not in CONNECTED state,
789 * + error codes returned by HW send routine.
790 */
791 static int riocm_ch_send(u16 ch_id, void *buf, int len)
792 {
793 struct rio_channel *ch;
794 struct rio_ch_chan_hdr *hdr;
795 int ret;
796
797 if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
798 return -EINVAL;
799
800 ch = riocm_get_channel(ch_id);
801 if (!ch) {
802 riocm_error("%s(%d) ch_%d not found", current->comm,
803 task_pid_nr(current), ch_id);
804 return -ENODEV;
805 }
806
807 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
808 ret = -EAGAIN;
809 goto err_out;
810 }
811
812 /*
813 * Fill buffer header section with corresponding channel data
814 */
815 hdr = buf;
816
817 hdr->bhdr.src_id = htonl(ch->loc_destid);
818 hdr->bhdr.dst_id = htonl(ch->rem_destid);
819 hdr->bhdr.src_mbox = cmbox;
820 hdr->bhdr.dst_mbox = cmbox;
821 hdr->bhdr.type = RIO_CM_CHAN;
822 hdr->ch_op = CM_DATA_MSG;
823 hdr->dst_ch = htons(ch->rem_channel);
824 hdr->src_ch = htons(ch->id);
825 hdr->msg_len = htons((u16)len);
826
827 /* ATTN: the function call below relies on the fact that underlying
828 * HW-specific add_outb_message() routine copies TX data into its own
829 * internal transfer buffer (true for all RIONET compatible mport
830 * drivers). Must be reviewed if mport driver uses the buffer directly.
831 */
832
833 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
834 if (ret)
835 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
836 err_out:
837 riocm_put_channel(ch);
838 return ret;
839 }
840
841 static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
842 {
843 int i, ret = -EINVAL;
844
845 spin_lock_bh(&ch->lock);
846
847 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
848 if (ch->rx_ring.inuse[i] == buf) {
849 ch->rx_ring.inuse[i] = NULL;
850 ch->rx_ring.inuse_cnt--;
851 ret = 0;
852 break;
853 }
854 }
855
856 spin_unlock_bh(&ch->lock);
857
858 if (!ret)
859 kfree(buf);
860
861 return ret;
862 }
863
864 /*
865 * riocm_ch_receive - fetch a data packet received for the specified channel
866 * @ch: local channel ID
867 * @buf: pointer to a packet buffer
868 * @timeout: timeout to wait for incoming packet (in jiffies)
869 *
870 * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
871 * -EAGAIN if a channel is not in CONNECTED state,
872 * -ENOMEM if in-use tracking queue is full,
873 * -ETIME if wait timeout expired,
874 * -EINTR if wait was interrupted.
875 */
876 static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
877 {
878 void *rxmsg = NULL;
879 int i, ret = 0;
880 long wret;
881
882 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
883 ret = -EAGAIN;
884 goto out;
885 }
886
887 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
888 /* If we do not have entries to track buffers given to upper
889 * layer, reject request.
890 */
891 ret = -ENOMEM;
892 goto out;
893 }
894
895 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
896
897 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
898
899 if (!wret)
900 ret = -ETIME;
901 else if (wret == -ERESTARTSYS)
902 ret = -EINTR;
903 else
904 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
905
906 if (ret)
907 goto out;
908
909 spin_lock_bh(&ch->lock);
910
911 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
912 ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
913 ch->rx_ring.count--;
914 ch->rx_ring.tail++;
915 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
916 ret = -ENOMEM;
917
918 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
919 if (ch->rx_ring.inuse[i] == NULL) {
920 ch->rx_ring.inuse[i] = rxmsg;
921 ch->rx_ring.inuse_cnt++;
922 ret = 0;
923 break;
924 }
925 }
926
927 if (ret) {
928 /* We have no entry to store pending message: drop it */
929 kfree(rxmsg);
930 rxmsg = NULL;
931 }
932
933 spin_unlock_bh(&ch->lock);
934 out:
935 *buf = rxmsg;
936 return ret;
937 }
938
939 /*
940 * riocm_ch_connect - sends a connect request to a remote device
941 * @loc_ch: local channel ID
942 * @cm: CM device to send connect request
943 * @peer: target RapidIO device
944 * @rem_ch: remote channel ID
945 *
946 * Returns: 0 if success, or
947 * -EINVAL if the channel is not in IDLE state,
948 * -EAGAIN if no connection request available immediately,
949 * -ETIME if ACK response timeout expired,
950 * -EINTR if wait for response was interrupted.
951 */
952 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
953 struct cm_peer *peer, u16 rem_ch)
954 {
955 struct rio_channel *ch = NULL;
956 struct rio_ch_chan_hdr *hdr;
957 int ret;
958 long wret;
959
960 ch = riocm_get_channel(loc_ch);
961 if (!ch)
962 return -ENODEV;
963
964 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
965 ret = -EINVAL;
966 goto conn_done;
967 }
968
969 ch->cmdev = cm;
970 ch->rdev = peer->rdev;
971 ch->context = NULL;
972 ch->loc_destid = cm->mport->host_deviceid;
973 ch->rem_channel = rem_ch;
974
975 /*
976 * Send connect request to the remote RapidIO device
977 */
978
979 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
980 if (hdr == NULL) {
981 ret = -ENOMEM;
982 goto conn_done;
983 }
984
985 hdr->bhdr.src_id = htonl(ch->loc_destid);
986 hdr->bhdr.dst_id = htonl(peer->rdev->destid);
987 hdr->bhdr.src_mbox = cmbox;
988 hdr->bhdr.dst_mbox = cmbox;
989 hdr->bhdr.type = RIO_CM_CHAN;
990 hdr->ch_op = CM_CONN_REQ;
991 hdr->dst_ch = htons(rem_ch);
992 hdr->src_ch = htons(loc_ch);
993
994 /* ATTN: the function call below relies on the fact that underlying
995 * HW-specific add_outb_message() routine copies TX data into its
996 * internal transfer buffer. Must be reviewed if mport driver uses
997 * this buffer directly.
998 */
999 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
1000
1001 if (ret != -EBUSY) {
1002 kfree(hdr);
1003 } else {
1004 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
1005 if (ret)
1006 kfree(hdr);
1007 }
1008
1009 if (ret) {
1010 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
1011 goto conn_done;
1012 }
1013
1014 /* Wait for connect response from the remote device */
1015 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1016 RIOCM_CONNECT_TO * HZ);
1017 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1018
1019 if (!wret)
1020 ret = -ETIME;
1021 else if (wret == -ERESTARTSYS)
1022 ret = -EINTR;
1023 else
1024 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
1025
1026 conn_done:
1027 riocm_put_channel(ch);
1028 return ret;
1029 }
1030
1031 static int riocm_send_ack(struct rio_channel *ch)
1032 {
1033 struct rio_ch_chan_hdr *hdr;
1034 int ret;
1035
1036 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1037 if (hdr == NULL)
1038 return -ENOMEM;
1039
1040 hdr->bhdr.src_id = htonl(ch->loc_destid);
1041 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1042 hdr->dst_ch = htons(ch->rem_channel);
1043 hdr->src_ch = htons(ch->id);
1044 hdr->bhdr.src_mbox = cmbox;
1045 hdr->bhdr.dst_mbox = cmbox;
1046 hdr->bhdr.type = RIO_CM_CHAN;
1047 hdr->ch_op = CM_CONN_ACK;
1048
1049 /* ATTN: the function call below relies on the fact that underlying
1050 * add_outb_message() routine copies TX data into its internal transfer
1051 * buffer. Review if switching to direct buffer version.
1052 */
1053 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1054
1055 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
1056 ch->rdev, hdr, sizeof(*hdr)))
1057 return 0;
1058 kfree(hdr);
1059
1060 if (ret)
1061 riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
1062 ch->id, rio_name(ch->rdev), ret);
1063 return ret;
1064 }
1065
1066 /*
1067 * riocm_ch_accept - accept incoming connection request
1068 * @ch_id: channel ID
1069 * @new_ch_id: local mport device
1070 * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
1071 * request is not available).
1072 *
1073 * Returns: pointer to new channel struct if success, or error-valued pointer:
1074 * -ENODEV - cannot find specified channel or mport,
1075 * -EINVAL - the channel is not in IDLE state,
1076 * -EAGAIN - no connection request available immediately (timeout=0),
1077 * -ENOMEM - unable to allocate new channel,
1078 * -ETIME - wait timeout expired,
1079 * -EINTR - wait was interrupted.
1080 */
1081 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1082 long timeout)
1083 {
1084 struct rio_channel *ch = NULL;
1085 struct rio_channel *new_ch = NULL;
1086 struct conn_req *req;
1087 struct cm_peer *peer;
1088 int found = 0;
1089 int err = 0;
1090 long wret;
1091
1092 ch = riocm_get_channel(ch_id);
1093 if (!ch)
1094 return ERR_PTR(-EINVAL);
1095
1096 if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
1097 err = -EINVAL;
1098 goto err_put;
1099 }
1100
1101 /* Don't sleep if this is a non blocking call */
1102 if (!timeout) {
1103 if (!try_wait_for_completion(&ch->comp)) {
1104 err = -EAGAIN;
1105 goto err_put;
1106 }
1107 } else {
1108 riocm_debug(WAIT, "on %d", ch->id);
1109
1110 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1111 timeout);
1112 if (!wret) {
1113 err = -ETIME;
1114 goto err_put;
1115 } else if (wret == -ERESTARTSYS) {
1116 err = -EINTR;
1117 goto err_put;
1118 }
1119 }
1120
1121 spin_lock_bh(&ch->lock);
1122
1123 if (ch->state != RIO_CM_LISTEN) {
1124 err = -ECANCELED;
1125 } else if (list_empty(&ch->accept_queue)) {
1126 riocm_debug(WAIT, "on %d accept_queue is empty on completion",
1127 ch->id);
1128 err = -EIO;
1129 }
1130
1131 spin_unlock_bh(&ch->lock);
1132
1133 if (err) {
1134 riocm_debug(WAIT, "on %d returns %d", ch->id, err);
1135 goto err_put;
1136 }
1137
1138 /* Create new channel for this connection */
1139 new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
1140
1141 if (IS_ERR(new_ch)) {
1142 riocm_error("failed to get channel for new req (%ld)",
1143 PTR_ERR(new_ch));
1144 err = -ENOMEM;
1145 goto err_put;
1146 }
1147
1148 spin_lock_bh(&ch->lock);
1149
1150 req = list_first_entry(&ch->accept_queue, struct conn_req, node);
1151 list_del(&req->node);
1152 new_ch->cmdev = ch->cmdev;
1153 new_ch->loc_destid = ch->loc_destid;
1154 new_ch->rem_destid = req->destid;
1155 new_ch->rem_channel = req->chan;
1156
1157 spin_unlock_bh(&ch->lock);
1158 riocm_put_channel(ch);
1159 kfree(req);
1160
1161 down_read(&rdev_sem);
1162 /* Find requester's device object */
1163 list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
1164 if (peer->rdev->destid == new_ch->rem_destid) {
1165 riocm_debug(RX_CMD, "found matching device(%s)",
1166 rio_name(peer->rdev));
1167 found = 1;
1168 break;
1169 }
1170 }
1171 up_read(&rdev_sem);
1172
1173 if (!found) {
1174 /* If peer device object not found, simply ignore the request */
1175 err = -ENODEV;
1176 goto err_nodev;
1177 }
1178
1179 new_ch->rdev = peer->rdev;
1180 new_ch->state = RIO_CM_CONNECTED;
1181 spin_lock_init(&new_ch->lock);
1182
1183 /* Acknowledge the connection request. */
1184 riocm_send_ack(new_ch);
1185
1186 *new_ch_id = new_ch->id;
1187 return new_ch;
1188 err_put:
1189 riocm_put_channel(ch);
1190 err_nodev:
1191 if (new_ch) {
1192 spin_lock_bh(&idr_lock);
1193 idr_remove(&ch_idr, new_ch->id);
1194 spin_unlock_bh(&idr_lock);
1195 riocm_put_channel(new_ch);
1196 }
1197 *new_ch_id = 0;
1198 return ERR_PTR(err);
1199 }
1200
1201 /*
1202 * riocm_ch_listen - puts a channel into LISTEN state
1203 * @ch_id: channel ID
1204 *
1205 * Returns: 0 if success, or
1206 * -EINVAL if the specified channel does not exists or
1207 * is not in CHAN_BOUND state.
1208 */
1209 static int riocm_ch_listen(u16 ch_id)
1210 {
1211 struct rio_channel *ch = NULL;
1212 int ret = 0;
1213
1214 riocm_debug(CHOP, "(ch_%d)", ch_id);
1215
1216 ch = riocm_get_channel(ch_id);
1217 if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
1218 ret = -EINVAL;
1219 riocm_put_channel(ch);
1220 return ret;
1221 }
1222
1223 /*
1224 * riocm_ch_bind - associate a channel object and an mport device
1225 * @ch_id: channel ID
1226 * @mport_id: local mport device ID
1227 * @context: pointer to the additional caller's context
1228 *
1229 * Returns: 0 if success, or
1230 * -ENODEV if cannot find specified mport,
1231 * -EINVAL if the specified channel does not exist or
1232 * is not in IDLE state.
1233 */
1234 static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
1235 {
1236 struct rio_channel *ch = NULL;
1237 struct cm_dev *cm;
1238 int rc = -ENODEV;
1239
1240 riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
1241
1242 /* Find matching cm_dev object */
1243 down_read(&rdev_sem);
1244 list_for_each_entry(cm, &cm_dev_list, list) {
1245 if ((cm->mport->id == mport_id) &&
1246 rio_mport_is_running(cm->mport)) {
1247 rc = 0;
1248 break;
1249 }
1250 }
1251
1252 if (rc)
1253 goto exit;
1254
1255 ch = riocm_get_channel(ch_id);
1256 if (!ch) {
1257 rc = -EINVAL;
1258 goto exit;
1259 }
1260
1261 spin_lock_bh(&ch->lock);
1262 if (ch->state != RIO_CM_IDLE) {
1263 spin_unlock_bh(&ch->lock);
1264 rc = -EINVAL;
1265 goto err_put;
1266 }
1267
1268 ch->cmdev = cm;
1269 ch->loc_destid = cm->mport->host_deviceid;
1270 ch->context = context;
1271 ch->state = RIO_CM_CHAN_BOUND;
1272 spin_unlock_bh(&ch->lock);
1273 err_put:
1274 riocm_put_channel(ch);
1275 exit:
1276 up_read(&rdev_sem);
1277 return rc;
1278 }
1279
1280 /*
1281 * riocm_ch_alloc - channel object allocation helper routine
1282 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1283 *
1284 * Return value: pointer to newly created channel object,
1285 * or error-valued pointer
1286 */
1287 static struct rio_channel *riocm_ch_alloc(u16 ch_num)
1288 {
1289 int id;
1290 int start, end;
1291 struct rio_channel *ch;
1292
1293 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
1294 if (!ch)
1295 return ERR_PTR(-ENOMEM);
1296
1297 if (ch_num) {
1298 /* If requested, try to obtain the specified channel ID */
1299 start = ch_num;
1300 end = ch_num + 1;
1301 } else {
1302 /* Obtain channel ID from the dynamic allocation range */
1303 start = chstart;
1304 end = RIOCM_MAX_CHNUM + 1;
1305 }
1306
1307 idr_preload(GFP_KERNEL);
1308 spin_lock_bh(&idr_lock);
1309 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
1310 spin_unlock_bh(&idr_lock);
1311 idr_preload_end();
1312
1313 if (id < 0) {
1314 kfree(ch);
1315 return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
1316 }
1317
1318 ch->id = (u16)id;
1319 ch->state = RIO_CM_IDLE;
1320 spin_lock_init(&ch->lock);
1321 INIT_LIST_HEAD(&ch->accept_queue);
1322 INIT_LIST_HEAD(&ch->ch_node);
1323 init_completion(&ch->comp);
1324 init_completion(&ch->comp_close);
1325 kref_init(&ch->ref);
1326 ch->rx_ring.head = 0;
1327 ch->rx_ring.tail = 0;
1328 ch->rx_ring.count = 0;
1329 ch->rx_ring.inuse_cnt = 0;
1330
1331 return ch;
1332 }
1333
1334 /*
1335 * riocm_ch_create - creates a new channel object and allocates ID for it
1336 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1337 *
1338 * Allocates and initializes a new channel object. If the parameter ch_num > 0
1339 * and is within the valid range, riocm_ch_create tries to allocate the
1340 * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
1341 * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
1342 * Module parameter 'chstart' defines start of an ID range available for dynamic
1343 * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
1344 * Available channel numbers are limited by 16-bit size of channel numbers used
1345 * in the packet header.
1346 *
1347 * Return value: PTR to rio_channel structure if successful (with channel number
1348 * updated via pointer) or error-valued pointer if error.
1349 */
1350 static struct rio_channel *riocm_ch_create(u16 *ch_num)
1351 {
1352 struct rio_channel *ch = NULL;
1353
1354 ch = riocm_ch_alloc(*ch_num);
1355
1356 if (IS_ERR(ch))
1357 riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
1358 *ch_num, PTR_ERR(ch));
1359 else
1360 *ch_num = ch->id;
1361
1362 return ch;
1363 }
1364
1365 /*
1366 * riocm_ch_free - channel object release routine
1367 * @ref: pointer to a channel's kref structure
1368 */
1369 static void riocm_ch_free(struct kref *ref)
1370 {
1371 struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
1372 int i;
1373
1374 riocm_debug(CHOP, "(ch_%d)", ch->id);
1375
1376 if (ch->rx_ring.inuse_cnt) {
1377 for (i = 0;
1378 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
1379 if (ch->rx_ring.inuse[i] != NULL) {
1380 kfree(ch->rx_ring.inuse[i]);
1381 ch->rx_ring.inuse_cnt--;
1382 }
1383 }
1384 }
1385
1386 if (ch->rx_ring.count)
1387 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
1388 if (ch->rx_ring.buf[i] != NULL) {
1389 kfree(ch->rx_ring.buf[i]);
1390 ch->rx_ring.count--;
1391 }
1392 }
1393
1394 complete(&ch->comp_close);
1395 }
1396
1397 static int riocm_send_close(struct rio_channel *ch)
1398 {
1399 struct rio_ch_chan_hdr *hdr;
1400 int ret;
1401
1402 /*
1403 * Send CH_CLOSE notification to the remote RapidIO device
1404 */
1405
1406 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1407 if (hdr == NULL)
1408 return -ENOMEM;
1409
1410 hdr->bhdr.src_id = htonl(ch->loc_destid);
1411 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1412 hdr->bhdr.src_mbox = cmbox;
1413 hdr->bhdr.dst_mbox = cmbox;
1414 hdr->bhdr.type = RIO_CM_CHAN;
1415 hdr->ch_op = CM_CONN_CLOSE;
1416 hdr->dst_ch = htons(ch->rem_channel);
1417 hdr->src_ch = htons(ch->id);
1418
1419 /* ATTN: the function call below relies on the fact that underlying
1420 * add_outb_message() routine copies TX data into its internal transfer
1421 * buffer. Needs to be reviewed if switched to direct buffer mode.
1422 */
1423 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1424
1425 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
1426 hdr, sizeof(*hdr)))
1427 return 0;
1428 kfree(hdr);
1429
1430 if (ret)
1431 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
1432
1433 return ret;
1434 }
1435
1436 /*
1437 * riocm_ch_close - closes a channel object with specified ID (by local request)
1438 * @ch: channel to be closed
1439 */
1440 static int riocm_ch_close(struct rio_channel *ch)
1441 {
1442 unsigned long tmo = msecs_to_jiffies(3000);
1443 enum rio_cm_state state;
1444 long wret;
1445 int ret = 0;
1446
1447 riocm_debug(CHOP, "ch_%d by %s(%d)",
1448 ch->id, current->comm, task_pid_nr(current));
1449
1450 state = riocm_exch(ch, RIO_CM_DESTROYING);
1451 if (state == RIO_CM_CONNECTED)
1452 riocm_send_close(ch);
1453
1454 complete_all(&ch->comp);
1455
1456 riocm_put_channel(ch);
1457 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
1458
1459 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1460
1461 if (wret == 0) {
1462 /* Timeout on wait occurred */
1463 riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
1464 current->comm, task_pid_nr(current), ch->id);
1465 ret = -ETIMEDOUT;
1466 } else if (wret == -ERESTARTSYS) {
1467 /* Wait_for_completion was interrupted by a signal */
1468 riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
1469 current->comm, task_pid_nr(current), ch->id);
1470 ret = -EINTR;
1471 }
1472
1473 if (!ret) {
1474 riocm_debug(CHOP, "ch_%d resources released", ch->id);
1475 kfree(ch);
1476 } else {
1477 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
1478 }
1479
1480 return ret;
1481 }
1482
1483 /*
1484 * riocm_cdev_open() - Open character device
1485 */
1486 static int riocm_cdev_open(struct inode *inode, struct file *filp)
1487 {
1488 riocm_debug(INIT, "by %s(%d) filp=%p ",
1489 current->comm, task_pid_nr(current), filp);
1490
1491 if (list_empty(&cm_dev_list))
1492 return -ENODEV;
1493
1494 return 0;
1495 }
1496
1497 /*
1498 * riocm_cdev_release() - Release character device
1499 */
1500 static int riocm_cdev_release(struct inode *inode, struct file *filp)
1501 {
1502 struct rio_channel *ch, *_c;
1503 unsigned int i;
1504 LIST_HEAD(list);
1505
1506 riocm_debug(EXIT, "by %s(%d) filp=%p",
1507 current->comm, task_pid_nr(current), filp);
1508
1509 /* Check if there are channels associated with this file descriptor */
1510 spin_lock_bh(&idr_lock);
1511 idr_for_each_entry(&ch_idr, ch, i) {
1512 if (ch && ch->filp == filp) {
1513 riocm_debug(EXIT, "ch_%d not released by %s(%d)",
1514 ch->id, current->comm,
1515 task_pid_nr(current));
1516 idr_remove(&ch_idr, ch->id);
1517 list_add(&ch->ch_node, &list);
1518 }
1519 }
1520 spin_unlock_bh(&idr_lock);
1521
1522 if (!list_empty(&list)) {
1523 list_for_each_entry_safe(ch, _c, &list, ch_node) {
1524 list_del(&ch->ch_node);
1525 riocm_ch_close(ch);
1526 }
1527 }
1528
1529 return 0;
1530 }
1531
1532 /*
1533 * cm_ep_get_list_size() - Reports number of endpoints in the network
1534 */
1535 static int cm_ep_get_list_size(void __user *arg)
1536 {
1537 u32 __user *p = arg;
1538 u32 mport_id;
1539 u32 count = 0;
1540 struct cm_dev *cm;
1541
1542 if (get_user(mport_id, p))
1543 return -EFAULT;
1544 if (mport_id >= RIO_MAX_MPORTS)
1545 return -EINVAL;
1546
1547 /* Find a matching cm_dev object */
1548 down_read(&rdev_sem);
1549 list_for_each_entry(cm, &cm_dev_list, list) {
1550 if (cm->mport->id == mport_id) {
1551 count = cm->npeers;
1552 up_read(&rdev_sem);
1553 if (copy_to_user(arg, &count, sizeof(u32)))
1554 return -EFAULT;
1555 return 0;
1556 }
1557 }
1558 up_read(&rdev_sem);
1559
1560 return -ENODEV;
1561 }
1562
1563 /*
1564 * cm_ep_get_list() - Returns list of attached endpoints
1565 */
1566 static int cm_ep_get_list(void __user *arg)
1567 {
1568 struct cm_dev *cm;
1569 struct cm_peer *peer;
1570 u32 info[2];
1571 void *buf;
1572 u32 nent;
1573 u32 *entry_ptr;
1574 u32 i = 0;
1575 int ret = 0;
1576
1577 if (copy_from_user(&info, arg, sizeof(info)))
1578 return -EFAULT;
1579
1580 if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
1581 return -EINVAL;
1582
1583 /* Find a matching cm_dev object */
1584 down_read(&rdev_sem);
1585 list_for_each_entry(cm, &cm_dev_list, list)
1586 if (cm->mport->id == (u8)info[1])
1587 goto found;
1588
1589 up_read(&rdev_sem);
1590 return -ENODEV;
1591
1592 found:
1593 nent = min(info[0], cm->npeers);
1594 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
1595 if (!buf) {
1596 up_read(&rdev_sem);
1597 return -ENOMEM;
1598 }
1599
1600 entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
1601
1602 list_for_each_entry(peer, &cm->peers, node) {
1603 *entry_ptr = (u32)peer->rdev->destid;
1604 entry_ptr++;
1605 if (++i == nent)
1606 break;
1607 }
1608 up_read(&rdev_sem);
1609
1610 ((u32 *)buf)[0] = i; /* report an updated number of entries */
1611 ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
1612 if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
1613 ret = -EFAULT;
1614
1615 kfree(buf);
1616 return ret;
1617 }
1618
1619 /*
1620 * cm_mport_get_list() - Returns list of available local mport devices
1621 */
1622 static int cm_mport_get_list(void __user *arg)
1623 {
1624 int ret = 0;
1625 u32 entries;
1626 void *buf;
1627 struct cm_dev *cm;
1628 u32 *entry_ptr;
1629 int count = 0;
1630
1631 if (copy_from_user(&entries, arg, sizeof(entries)))
1632 return -EFAULT;
1633 if (entries == 0 || entries > RIO_MAX_MPORTS)
1634 return -EINVAL;
1635 buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
1636 if (!buf)
1637 return -ENOMEM;
1638
1639 /* Scan all registered cm_dev objects */
1640 entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
1641 down_read(&rdev_sem);
1642 list_for_each_entry(cm, &cm_dev_list, list) {
1643 if (count++ < entries) {
1644 *entry_ptr = (cm->mport->id << 16) |
1645 cm->mport->host_deviceid;
1646 entry_ptr++;
1647 }
1648 }
1649 up_read(&rdev_sem);
1650
1651 *((u32 *)buf) = count; /* report a real number of entries */
1652 if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
1653 ret = -EFAULT;
1654
1655 kfree(buf);
1656 return ret;
1657 }
1658
1659 /*
1660 * cm_chan_create() - Create a message exchange channel
1661 */
1662 static int cm_chan_create(struct file *filp, void __user *arg)
1663 {
1664 u16 __user *p = arg;
1665 u16 ch_num;
1666 struct rio_channel *ch;
1667
1668 if (get_user(ch_num, p))
1669 return -EFAULT;
1670
1671 riocm_debug(CHOP, "ch_%d requested by %s(%d)",
1672 ch_num, current->comm, task_pid_nr(current));
1673 ch = riocm_ch_create(&ch_num);
1674 if (IS_ERR(ch))
1675 return PTR_ERR(ch);
1676
1677 ch->filp = filp;
1678 riocm_debug(CHOP, "ch_%d created by %s(%d)",
1679 ch_num, current->comm, task_pid_nr(current));
1680 return put_user(ch_num, p);
1681 }
1682
1683 /*
1684 * cm_chan_close() - Close channel
1685 * @filp: Pointer to file object
1686 * @arg: Channel to close
1687 */
1688 static int cm_chan_close(struct file *filp, void __user *arg)
1689 {
1690 u16 __user *p = arg;
1691 u16 ch_num;
1692 struct rio_channel *ch;
1693
1694 if (get_user(ch_num, p))
1695 return -EFAULT;
1696
1697 riocm_debug(CHOP, "ch_%d by %s(%d)",
1698 ch_num, current->comm, task_pid_nr(current));
1699
1700 spin_lock_bh(&idr_lock);
1701 ch = idr_find(&ch_idr, ch_num);
1702 if (!ch) {
1703 spin_unlock_bh(&idr_lock);
1704 return 0;
1705 }
1706 if (ch->filp != filp) {
1707 spin_unlock_bh(&idr_lock);
1708 return -EINVAL;
1709 }
1710 idr_remove(&ch_idr, ch->id);
1711 spin_unlock_bh(&idr_lock);
1712
1713 return riocm_ch_close(ch);
1714 }
1715
1716 /*
1717 * cm_chan_bind() - Bind channel
1718 * @arg: Channel number
1719 */
1720 static int cm_chan_bind(void __user *arg)
1721 {
1722 struct rio_cm_channel chan;
1723
1724 if (copy_from_user(&chan, arg, sizeof(chan)))
1725 return -EFAULT;
1726 if (chan.mport_id >= RIO_MAX_MPORTS)
1727 return -EINVAL;
1728
1729 return riocm_ch_bind(chan.id, chan.mport_id, NULL);
1730 }
1731
1732 /*
1733 * cm_chan_listen() - Listen on channel
1734 * @arg: Channel number
1735 */
1736 static int cm_chan_listen(void __user *arg)
1737 {
1738 u16 __user *p = arg;
1739 u16 ch_num;
1740
1741 if (get_user(ch_num, p))
1742 return -EFAULT;
1743
1744 return riocm_ch_listen(ch_num);
1745 }
1746
1747 /*
1748 * cm_chan_accept() - Accept incoming connection
1749 * @filp: Pointer to file object
1750 * @arg: Channel number
1751 */
1752 static int cm_chan_accept(struct file *filp, void __user *arg)
1753 {
1754 struct rio_cm_accept param;
1755 long accept_to;
1756 struct rio_channel *ch;
1757
1758 if (copy_from_user(¶m, arg, sizeof(param)))
1759 return -EFAULT;
1760
1761 riocm_debug(CHOP, "on ch_%d by %s(%d)",
1762 param.ch_num, current->comm, task_pid_nr(current));
1763
1764 accept_to = param.wait_to ?
1765 msecs_to_jiffies(param.wait_to) : 0;
1766
1767 ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to);
1768 if (IS_ERR(ch))
1769 return PTR_ERR(ch);
1770 ch->filp = filp;
1771
1772 riocm_debug(CHOP, "new ch_%d for %s(%d)",
1773 ch->id, current->comm, task_pid_nr(current));
1774
1775 if (copy_to_user(arg, ¶m, sizeof(param)))
1776 return -EFAULT;
1777 return 0;
1778 }
1779
1780 /*
1781 * cm_chan_connect() - Connect on channel
1782 * @arg: Channel information
1783 */
1784 static int cm_chan_connect(void __user *arg)
1785 {
1786 struct rio_cm_channel chan;
1787 struct cm_dev *cm;
1788 struct cm_peer *peer;
1789 int ret = -ENODEV;
1790
1791 if (copy_from_user(&chan, arg, sizeof(chan)))
1792 return -EFAULT;
1793 if (chan.mport_id >= RIO_MAX_MPORTS)
1794 return -EINVAL;
1795
1796 down_read(&rdev_sem);
1797
1798 /* Find matching cm_dev object */
1799 list_for_each_entry(cm, &cm_dev_list, list) {
1800 if (cm->mport->id == chan.mport_id) {
1801 ret = 0;
1802 break;
1803 }
1804 }
1805
1806 if (ret)
1807 goto err_out;
1808
1809 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
1810 ret = -EINVAL;
1811 goto err_out;
1812 }
1813
1814 /* Find corresponding RapidIO endpoint device object */
1815 ret = -ENODEV;
1816
1817 list_for_each_entry(peer, &cm->peers, node) {
1818 if (peer->rdev->destid == chan.remote_destid) {
1819 ret = 0;
1820 break;
1821 }
1822 }
1823
1824 if (ret)
1825 goto err_out;
1826
1827 up_read(&rdev_sem);
1828
1829 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
1830 err_out:
1831 up_read(&rdev_sem);
1832 return ret;
1833 }
1834
1835 /*
1836 * cm_chan_msg_send() - Send a message through channel
1837 * @arg: Outbound message information
1838 */
1839 static int cm_chan_msg_send(void __user *arg)
1840 {
1841 struct rio_cm_msg msg;
1842 void *buf;
1843 int ret = 0;
1844
1845 if (copy_from_user(&msg, arg, sizeof(msg)))
1846 return -EFAULT;
1847 if (msg.size > RIO_MAX_MSG_SIZE)
1848 return -EINVAL;
1849
1850 buf = kmalloc(msg.size, GFP_KERNEL);
1851 if (!buf)
1852 return -ENOMEM;
1853
1854 if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
1855 ret = -EFAULT;
1856 goto out;
1857 }
1858
1859 ret = riocm_ch_send(msg.ch_num, buf, msg.size);
1860 out:
1861 kfree(buf);
1862 return ret;
1863 }
1864
1865 /*
1866 * cm_chan_msg_rcv() - Receive a message through channel
1867 * @arg: Inbound message information
1868 */
1869 static int cm_chan_msg_rcv(void __user *arg)
1870 {
1871 struct rio_cm_msg msg;
1872 struct rio_channel *ch;
1873 void *buf;
1874 long rxto;
1875 int ret = 0, msg_size;
1876
1877 if (copy_from_user(&msg, arg, sizeof(msg)))
1878 return -EFAULT;
1879
1880 if (msg.ch_num == 0 || msg.size == 0)
1881 return -EINVAL;
1882
1883 ch = riocm_get_channel(msg.ch_num);
1884 if (!ch)
1885 return -ENODEV;
1886
1887 rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
1888
1889 ret = riocm_ch_receive(ch, &buf, rxto);
1890 if (ret)
1891 goto out;
1892
1893 msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
1894
1895 if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
1896 ret = -EFAULT;
1897
1898 riocm_ch_free_rxbuf(ch, buf);
1899 out:
1900 riocm_put_channel(ch);
1901 return ret;
1902 }
1903
1904 /*
1905 * riocm_cdev_ioctl() - IOCTL requests handler
1906 */
1907 static long
1908 riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1909 {
1910 switch (cmd) {
1911 case RIO_CM_EP_GET_LIST_SIZE:
1912 return cm_ep_get_list_size((void __user *)arg);
1913 case RIO_CM_EP_GET_LIST:
1914 return cm_ep_get_list((void __user *)arg);
1915 case RIO_CM_CHAN_CREATE:
1916 return cm_chan_create(filp, (void __user *)arg);
1917 case RIO_CM_CHAN_CLOSE:
1918 return cm_chan_close(filp, (void __user *)arg);
1919 case RIO_CM_CHAN_BIND:
1920 return cm_chan_bind((void __user *)arg);
1921 case RIO_CM_CHAN_LISTEN:
1922 return cm_chan_listen((void __user *)arg);
1923 case RIO_CM_CHAN_ACCEPT:
1924 return cm_chan_accept(filp, (void __user *)arg);
1925 case RIO_CM_CHAN_CONNECT:
1926 return cm_chan_connect((void __user *)arg);
1927 case RIO_CM_CHAN_SEND:
1928 return cm_chan_msg_send((void __user *)arg);
1929 case RIO_CM_CHAN_RECEIVE:
1930 return cm_chan_msg_rcv((void __user *)arg);
1931 case RIO_CM_MPORT_GET_LIST:
1932 return cm_mport_get_list((void __user *)arg);
1933 default:
1934 break;
1935 }
1936
1937 return -EINVAL;
1938 }
1939
1940 static const struct file_operations riocm_cdev_fops = {
1941 .owner = THIS_MODULE,
1942 .open = riocm_cdev_open,
1943 .release = riocm_cdev_release,
1944 .unlocked_ioctl = riocm_cdev_ioctl,
1945 };
1946
1947 /*
1948 * riocm_add_dev - add new remote RapidIO device into channel management core
1949 * @dev: device object associated with RapidIO device
1950 * @sif: subsystem interface
1951 *
1952 * Adds the specified RapidIO device (if applicable) into peers list of
1953 * the corresponding channel management device (cm_dev).
1954 */
1955 static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
1956 {
1957 struct cm_peer *peer;
1958 struct rio_dev *rdev = to_rio_dev(dev);
1959 struct cm_dev *cm;
1960
1961 /* Check if the remote device has capabilities required to support CM */
1962 if (!dev_cm_capable(rdev))
1963 return 0;
1964
1965 riocm_debug(RDEV, "(%s)", rio_name(rdev));
1966
1967 peer = kmalloc(sizeof(*peer), GFP_KERNEL);
1968 if (!peer)
1969 return -ENOMEM;
1970
1971 /* Find a corresponding cm_dev object */
1972 down_write(&rdev_sem);
1973 list_for_each_entry(cm, &cm_dev_list, list) {
1974 if (cm->mport == rdev->net->hport)
1975 goto found;
1976 }
1977
1978 up_write(&rdev_sem);
1979 kfree(peer);
1980 return -ENODEV;
1981
1982 found:
1983 peer->rdev = rdev;
1984 list_add_tail(&peer->node, &cm->peers);
1985 cm->npeers++;
1986
1987 up_write(&rdev_sem);
1988 return 0;
1989 }
1990
1991 /*
1992 * riocm_remove_dev - remove remote RapidIO device from channel management core
1993 * @dev: device object associated with RapidIO device
1994 * @sif: subsystem interface
1995 *
1996 * Removes the specified RapidIO device (if applicable) from peers list of
1997 * the corresponding channel management device (cm_dev).
1998 */
1999 static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
2000 {
2001 struct rio_dev *rdev = to_rio_dev(dev);
2002 struct cm_dev *cm;
2003 struct cm_peer *peer;
2004 struct rio_channel *ch, *_c;
2005 unsigned int i;
2006 bool found = false;
2007 LIST_HEAD(list);
2008
2009 /* Check if the remote device has capabilities required to support CM */
2010 if (!dev_cm_capable(rdev))
2011 return;
2012
2013 riocm_debug(RDEV, "(%s)", rio_name(rdev));
2014
2015 /* Find matching cm_dev object */
2016 down_write(&rdev_sem);
2017 list_for_each_entry(cm, &cm_dev_list, list) {
2018 if (cm->mport == rdev->net->hport) {
2019 found = true;
2020 break;
2021 }
2022 }
2023
2024 if (!found) {
2025 up_write(&rdev_sem);
2026 return;
2027 }
2028
2029 /* Remove remote device from the list of peers */
2030 found = false;
2031 list_for_each_entry(peer, &cm->peers, node) {
2032 if (peer->rdev == rdev) {
2033 riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
2034 found = true;
2035 list_del(&peer->node);
2036 cm->npeers--;
2037 kfree(peer);
2038 break;
2039 }
2040 }
2041
2042 up_write(&rdev_sem);
2043
2044 if (!found)
2045 return;
2046
2047 /*
2048 * Release channels associated with this peer
2049 */
2050
2051 spin_lock_bh(&idr_lock);
2052 idr_for_each_entry(&ch_idr, ch, i) {
2053 if (ch && ch->rdev == rdev) {
2054 if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
2055 riocm_exch(ch, RIO_CM_DISCONNECT);
2056 idr_remove(&ch_idr, ch->id);
2057 list_add(&ch->ch_node, &list);
2058 }
2059 }
2060 spin_unlock_bh(&idr_lock);
2061
2062 if (!list_empty(&list)) {
2063 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2064 list_del(&ch->ch_node);
2065 riocm_ch_close(ch);
2066 }
2067 }
2068 }
2069
2070 /*
2071 * riocm_cdev_add() - Create rio_cm char device
2072 * @devno: device number assigned to device (MAJ + MIN)
2073 */
2074 static int riocm_cdev_add(dev_t devno)
2075 {
2076 int ret;
2077
2078 cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
2079 riocm_cdev.cdev.owner = THIS_MODULE;
2080 ret = cdev_add(&riocm_cdev.cdev, devno, 1);
2081 if (ret < 0) {
2082 riocm_error("Cannot register a device with error %d", ret);
2083 return ret;
2084 }
2085
2086 riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
2087 if (IS_ERR(riocm_cdev.dev)) {
2088 cdev_del(&riocm_cdev.cdev);
2089 return PTR_ERR(riocm_cdev.dev);
2090 }
2091
2092 riocm_debug(MPORT, "Added %s cdev(%d:%d)",
2093 DEV_NAME, MAJOR(devno), MINOR(devno));
2094
2095 return 0;
2096 }
2097
2098 /*
2099 * riocm_add_mport - add new local mport device into channel management core
2100 * @dev: device object associated with mport
2101 * @class_intf: class interface
2102 *
2103 * When a new mport device is added, CM immediately reserves inbound and
2104 * outbound RapidIO mailboxes that will be used.
2105 */
2106 static int riocm_add_mport(struct device *dev,
2107 struct class_interface *class_intf)
2108 {
2109 int rc;
2110 int i;
2111 struct cm_dev *cm;
2112 struct rio_mport *mport = to_rio_mport(dev);
2113
2114 riocm_debug(MPORT, "add mport %s", mport->name);
2115
2116 cm = kzalloc(sizeof(*cm), GFP_KERNEL);
2117 if (!cm)
2118 return -ENOMEM;
2119
2120 cm->mport = mport;
2121
2122 rc = rio_request_outb_mbox(mport, cm, cmbox,
2123 RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
2124 if (rc) {
2125 riocm_error("failed to allocate OBMBOX_%d on %s",
2126 cmbox, mport->name);
2127 kfree(cm);
2128 return -ENODEV;
2129 }
2130
2131 rc = rio_request_inb_mbox(mport, cm, cmbox,
2132 RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
2133 if (rc) {
2134 riocm_error("failed to allocate IBMBOX_%d on %s",
2135 cmbox, mport->name);
2136 rio_release_outb_mbox(mport, cmbox);
2137 kfree(cm);
2138 return -ENODEV;
2139 }
2140
2141 /*
2142 * Allocate and register inbound messaging buffers to be ready
2143 * to receive channel and system management requests
2144 */
2145 for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
2146 cm->rx_buf[i] = NULL;
2147
2148 cm->rx_slots = RIOCM_RX_RING_SIZE;
2149 mutex_init(&cm->rx_lock);
2150 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
2151 cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
2152 INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
2153
2154 cm->tx_slot = 0;
2155 cm->tx_cnt = 0;
2156 cm->tx_ack_slot = 0;
2157 spin_lock_init(&cm->tx_lock);
2158
2159 INIT_LIST_HEAD(&cm->peers);
2160 cm->npeers = 0;
2161 INIT_LIST_HEAD(&cm->tx_reqs);
2162
2163 down_write(&rdev_sem);
2164 list_add_tail(&cm->list, &cm_dev_list);
2165 up_write(&rdev_sem);
2166
2167 return 0;
2168 }
2169
2170 /*
2171 * riocm_remove_mport - remove local mport device from channel management core
2172 * @dev: device object associated with mport
2173 * @class_intf: class interface
2174 *
2175 * Removes a local mport device from the list of registered devices that provide
2176 * channel management services. Returns an error if the specified mport is not
2177 * registered with the CM core.
2178 */
2179 static void riocm_remove_mport(struct device *dev,
2180 struct class_interface *class_intf)
2181 {
2182 struct rio_mport *mport = to_rio_mport(dev);
2183 struct cm_dev *cm;
2184 struct cm_peer *peer, *temp;
2185 struct rio_channel *ch, *_c;
2186 unsigned int i;
2187 bool found = false;
2188 LIST_HEAD(list);
2189
2190 riocm_debug(MPORT, "%s", mport->name);
2191
2192 /* Find a matching cm_dev object */
2193 down_write(&rdev_sem);
2194 list_for_each_entry(cm, &cm_dev_list, list) {
2195 if (cm->mport == mport) {
2196 list_del(&cm->list);
2197 found = true;
2198 break;
2199 }
2200 }
2201 up_write(&rdev_sem);
2202 if (!found)
2203 return;
2204
2205 flush_workqueue(cm->rx_wq);
2206 destroy_workqueue(cm->rx_wq);
2207
2208 /* Release channels bound to this mport */
2209 spin_lock_bh(&idr_lock);
2210 idr_for_each_entry(&ch_idr, ch, i) {
2211 if (ch->cmdev == cm) {
2212 riocm_debug(RDEV, "%s drop ch_%d",
2213 mport->name, ch->id);
2214 idr_remove(&ch_idr, ch->id);
2215 list_add(&ch->ch_node, &list);
2216 }
2217 }
2218 spin_unlock_bh(&idr_lock);
2219
2220 if (!list_empty(&list)) {
2221 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2222 list_del(&ch->ch_node);
2223 riocm_ch_close(ch);
2224 }
2225 }
2226
2227 rio_release_inb_mbox(mport, cmbox);
2228 rio_release_outb_mbox(mport, cmbox);
2229
2230 /* Remove and free peer entries */
2231 if (!list_empty(&cm->peers))
2232 riocm_debug(RDEV, "ATTN: peer list not empty");
2233 list_for_each_entry_safe(peer, temp, &cm->peers, node) {
2234 riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
2235 list_del(&peer->node);
2236 kfree(peer);
2237 }
2238
2239 riocm_rx_free(cm);
2240 kfree(cm);
2241 riocm_debug(MPORT, "%s done", mport->name);
2242 }
2243
2244 static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
2245 void *unused)
2246 {
2247 struct rio_channel *ch;
2248 unsigned int i;
2249
2250 riocm_debug(EXIT, ".");
2251
2252 spin_lock_bh(&idr_lock);
2253 idr_for_each_entry(&ch_idr, ch, i) {
2254 riocm_debug(EXIT, "close ch %d", ch->id);
2255 if (ch->state == RIO_CM_CONNECTED)
2256 riocm_send_close(ch);
2257 }
2258 spin_unlock_bh(&idr_lock);
2259
2260 return NOTIFY_DONE;
2261 }
2262
2263 /*
2264 * riocm_interface handles addition/removal of remote RapidIO devices
2265 */
2266 static struct subsys_interface riocm_interface = {
2267 .name = "rio_cm",
2268 .subsys = &rio_bus_type,
2269 .add_dev = riocm_add_dev,
2270 .remove_dev = riocm_remove_dev,
2271 };
2272
2273 /*
2274 * rio_mport_interface handles addition/removal local mport devices
2275 */
2276 static struct class_interface rio_mport_interface __refdata = {
2277 .class = &rio_mport_class,
2278 .add_dev = riocm_add_mport,
2279 .remove_dev = riocm_remove_mport,
2280 };
2281
2282 static struct notifier_block rio_cm_notifier = {
2283 .notifier_call = rio_cm_shutdown,
2284 };
2285
2286 static int __init riocm_init(void)
2287 {
2288 int ret;
2289
2290 /* Create device class needed by udev */
2291 dev_class = class_create(THIS_MODULE, DRV_NAME);
2292 if (IS_ERR(dev_class)) {
2293 riocm_error("Cannot create " DRV_NAME " class");
2294 return PTR_ERR(dev_class);
2295 }
2296
2297 ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
2298 if (ret) {
2299 class_destroy(dev_class);
2300 return ret;
2301 }
2302
2303 dev_major = MAJOR(dev_number);
2304 dev_minor_base = MINOR(dev_number);
2305 riocm_debug(INIT, "Registered class with %d major", dev_major);
2306
2307 /*
2308 * Register as rapidio_port class interface to get notifications about
2309 * mport additions and removals.
2310 */
2311 ret = class_interface_register(&rio_mport_interface);
2312 if (ret) {
2313 riocm_error("class_interface_register error: %d", ret);
2314 goto err_reg;
2315 }
2316
2317 /*
2318 * Register as RapidIO bus interface to get notifications about
2319 * addition/removal of remote RapidIO devices.
2320 */
2321 ret = subsys_interface_register(&riocm_interface);
2322 if (ret) {
2323 riocm_error("subsys_interface_register error: %d", ret);
2324 goto err_cl;
2325 }
2326
2327 ret = register_reboot_notifier(&rio_cm_notifier);
2328 if (ret) {
2329 riocm_error("failed to register reboot notifier (err=%d)", ret);
2330 goto err_sif;
2331 }
2332
2333 ret = riocm_cdev_add(dev_number);
2334 if (ret) {
2335 unregister_reboot_notifier(&rio_cm_notifier);
2336 ret = -ENODEV;
2337 goto err_sif;
2338 }
2339
2340 return 0;
2341 err_sif:
2342 subsys_interface_unregister(&riocm_interface);
2343 err_cl:
2344 class_interface_unregister(&rio_mport_interface);
2345 err_reg:
2346 unregister_chrdev_region(dev_number, 1);
2347 class_destroy(dev_class);
2348 return ret;
2349 }
2350
2351 static void __exit riocm_exit(void)
2352 {
2353 riocm_debug(EXIT, "enter");
2354 unregister_reboot_notifier(&rio_cm_notifier);
2355 subsys_interface_unregister(&riocm_interface);
2356 class_interface_unregister(&rio_mport_interface);
2357 idr_destroy(&ch_idr);
2358
2359 device_unregister(riocm_cdev.dev);
2360 cdev_del(&(riocm_cdev.cdev));
2361
2362 class_destroy(dev_class);
2363 unregister_chrdev_region(dev_number, 1);
2364 }
2365
2366 late_initcall(riocm_init);
2367 module_exit(riocm_exit);
2368
2369
2370
2371
2372
2373 /* LDV_COMMENT_BEGIN_MAIN */
2374 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2375
2376 /*###########################################################################*/
2377
2378 /*############## Driver Environment Generator 0.2 output ####################*/
2379
2380 /*###########################################################################*/
2381
2382
2383
2384 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2385 void ldv_check_final_state(void);
2386
2387 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2388 void ldv_check_return_value(int res);
2389
2390 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2391 void ldv_check_return_value_probe(int res);
2392
2393 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2394 void ldv_initialize(void);
2395
2396 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2397 void ldv_handler_precall(void);
2398
2399 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2400 int nondet_int(void);
2401
2402 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2403 int LDV_IN_INTERRUPT;
2404
2405 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2406 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2407
2408
2409
2410 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2411 /*============================= VARIABLE DECLARATION PART =============================*/
2412 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/
2413 /* content: static int riocm_cdev_open(struct inode *inode, struct file *filp)*/
2414 /* LDV_COMMENT_BEGIN_PREP */
2415 #define DRV_NAME "rio_cm"
2416 #define DRV_VERSION "1.0.0"
2417 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2418 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2419 #define DEV_NAME "rio_cm"
2420 #ifdef DEBUG
2421 #define riocm_debug(level, fmt, arg...) \
2422 do { \
2423 if (DBG_##level & dbg_level) \
2424 pr_debug(DRV_NAME ": %s " fmt "\n", \
2425 __func__, ##arg); \
2426 } while (0)
2427 #else
2428 #define riocm_debug(level, fmt, arg...) \
2429 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2430 #endif
2431 #define riocm_warn(fmt, arg...) \
2432 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2433 #define riocm_error(fmt, arg...) \
2434 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2435 #ifdef DEBUG
2436 #endif
2437 #define RIOCM_TX_RING_SIZE 128
2438 #define RIOCM_RX_RING_SIZE 128
2439 #define RIOCM_CONNECT_TO 3
2440 #define RIOCM_MAX_CHNUM 0xffff
2441 #define RIOCM_CHNUM_AUTO 0
2442 #define RIOCM_MAX_EP_COUNT 0x10000
2443 #define RIO_HDR_LETTER_MASK 0xffff0000
2444 #define RIO_HDR_MBOX_MASK 0x0000ffff
2445 #define is_msg_capable(src_ops, dst_ops) \
2446 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2447 (dst_ops & RIO_DST_OPS_DATA_MSG))
2448 #define dev_cm_capable(dev) \
2449 is_msg_capable(dev->src_ops, dev->dst_ops)
2450 /* LDV_COMMENT_END_PREP */
2451 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_open" */
2452 struct inode * var_group1;
2453 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_open" */
2454 struct file * var_group2;
2455 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "riocm_cdev_open" */
2456 static int res_riocm_cdev_open_32;
2457 /* content: static int riocm_cdev_release(struct inode *inode, struct file *filp)*/
2458 /* LDV_COMMENT_BEGIN_PREP */
2459 #define DRV_NAME "rio_cm"
2460 #define DRV_VERSION "1.0.0"
2461 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2462 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2463 #define DEV_NAME "rio_cm"
2464 #ifdef DEBUG
2465 #define riocm_debug(level, fmt, arg...) \
2466 do { \
2467 if (DBG_##level & dbg_level) \
2468 pr_debug(DRV_NAME ": %s " fmt "\n", \
2469 __func__, ##arg); \
2470 } while (0)
2471 #else
2472 #define riocm_debug(level, fmt, arg...) \
2473 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2474 #endif
2475 #define riocm_warn(fmt, arg...) \
2476 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2477 #define riocm_error(fmt, arg...) \
2478 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2479 #ifdef DEBUG
2480 #endif
2481 #define RIOCM_TX_RING_SIZE 128
2482 #define RIOCM_RX_RING_SIZE 128
2483 #define RIOCM_CONNECT_TO 3
2484 #define RIOCM_MAX_CHNUM 0xffff
2485 #define RIOCM_CHNUM_AUTO 0
2486 #define RIOCM_MAX_EP_COUNT 0x10000
2487 #define RIO_HDR_LETTER_MASK 0xffff0000
2488 #define RIO_HDR_MBOX_MASK 0x0000ffff
2489 #define is_msg_capable(src_ops, dst_ops) \
2490 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2491 (dst_ops & RIO_DST_OPS_DATA_MSG))
2492 #define dev_cm_capable(dev) \
2493 is_msg_capable(dev->src_ops, dev->dst_ops)
2494 /* LDV_COMMENT_END_PREP */
2495 /* content: static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)*/
2496 /* LDV_COMMENT_BEGIN_PREP */
2497 #define DRV_NAME "rio_cm"
2498 #define DRV_VERSION "1.0.0"
2499 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2500 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2501 #define DEV_NAME "rio_cm"
2502 #ifdef DEBUG
2503 #define riocm_debug(level, fmt, arg...) \
2504 do { \
2505 if (DBG_##level & dbg_level) \
2506 pr_debug(DRV_NAME ": %s " fmt "\n", \
2507 __func__, ##arg); \
2508 } while (0)
2509 #else
2510 #define riocm_debug(level, fmt, arg...) \
2511 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2512 #endif
2513 #define riocm_warn(fmt, arg...) \
2514 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2515 #define riocm_error(fmt, arg...) \
2516 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2517 #ifdef DEBUG
2518 #endif
2519 #define RIOCM_TX_RING_SIZE 128
2520 #define RIOCM_RX_RING_SIZE 128
2521 #define RIOCM_CONNECT_TO 3
2522 #define RIOCM_MAX_CHNUM 0xffff
2523 #define RIOCM_CHNUM_AUTO 0
2524 #define RIOCM_MAX_EP_COUNT 0x10000
2525 #define RIO_HDR_LETTER_MASK 0xffff0000
2526 #define RIO_HDR_MBOX_MASK 0x0000ffff
2527 #define is_msg_capable(src_ops, dst_ops) \
2528 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2529 (dst_ops & RIO_DST_OPS_DATA_MSG))
2530 #define dev_cm_capable(dev) \
2531 is_msg_capable(dev->src_ops, dev->dst_ops)
2532 /* LDV_COMMENT_END_PREP */
2533 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_ioctl" */
2534 unsigned int var_riocm_cdev_ioctl_45_p1;
2535 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_ioctl" */
2536 unsigned long var_riocm_cdev_ioctl_45_p2;
2537
2538 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/
2539 /* content: static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)*/
2540 /* LDV_COMMENT_BEGIN_PREP */
2541 #define DRV_NAME "rio_cm"
2542 #define DRV_VERSION "1.0.0"
2543 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2544 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2545 #define DEV_NAME "rio_cm"
2546 #ifdef DEBUG
2547 #define riocm_debug(level, fmt, arg...) \
2548 do { \
2549 if (DBG_##level & dbg_level) \
2550 pr_debug(DRV_NAME ": %s " fmt "\n", \
2551 __func__, ##arg); \
2552 } while (0)
2553 #else
2554 #define riocm_debug(level, fmt, arg...) \
2555 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2556 #endif
2557 #define riocm_warn(fmt, arg...) \
2558 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2559 #define riocm_error(fmt, arg...) \
2560 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2561 #ifdef DEBUG
2562 #endif
2563 #define RIOCM_TX_RING_SIZE 128
2564 #define RIOCM_RX_RING_SIZE 128
2565 #define RIOCM_CONNECT_TO 3
2566 #define RIOCM_MAX_CHNUM 0xffff
2567 #define RIOCM_CHNUM_AUTO 0
2568 #define RIOCM_MAX_EP_COUNT 0x10000
2569 #define RIO_HDR_LETTER_MASK 0xffff0000
2570 #define RIO_HDR_MBOX_MASK 0x0000ffff
2571 #define is_msg_capable(src_ops, dst_ops) \
2572 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2573 (dst_ops & RIO_DST_OPS_DATA_MSG))
2574 #define dev_cm_capable(dev) \
2575 is_msg_capable(dev->src_ops, dev->dst_ops)
2576 /* LDV_COMMENT_END_PREP */
2577 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_add_dev" */
2578 struct device * var_group3;
2579 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_add_dev" */
2580 struct subsys_interface * var_group4;
2581 /* content: static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)*/
2582 /* LDV_COMMENT_BEGIN_PREP */
2583 #define DRV_NAME "rio_cm"
2584 #define DRV_VERSION "1.0.0"
2585 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2586 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2587 #define DEV_NAME "rio_cm"
2588 #ifdef DEBUG
2589 #define riocm_debug(level, fmt, arg...) \
2590 do { \
2591 if (DBG_##level & dbg_level) \
2592 pr_debug(DRV_NAME ": %s " fmt "\n", \
2593 __func__, ##arg); \
2594 } while (0)
2595 #else
2596 #define riocm_debug(level, fmt, arg...) \
2597 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2598 #endif
2599 #define riocm_warn(fmt, arg...) \
2600 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2601 #define riocm_error(fmt, arg...) \
2602 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2603 #ifdef DEBUG
2604 #endif
2605 #define RIOCM_TX_RING_SIZE 128
2606 #define RIOCM_RX_RING_SIZE 128
2607 #define RIOCM_CONNECT_TO 3
2608 #define RIOCM_MAX_CHNUM 0xffff
2609 #define RIOCM_CHNUM_AUTO 0
2610 #define RIOCM_MAX_EP_COUNT 0x10000
2611 #define RIO_HDR_LETTER_MASK 0xffff0000
2612 #define RIO_HDR_MBOX_MASK 0x0000ffff
2613 #define is_msg_capable(src_ops, dst_ops) \
2614 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2615 (dst_ops & RIO_DST_OPS_DATA_MSG))
2616 #define dev_cm_capable(dev) \
2617 is_msg_capable(dev->src_ops, dev->dst_ops)
2618 /* LDV_COMMENT_END_PREP */
2619
2620 /** STRUCT: struct type: notifier_block, struct name: rio_cm_notifier **/
2621 /* content: static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused)*/
2622 /* LDV_COMMENT_BEGIN_PREP */
2623 #define DRV_NAME "rio_cm"
2624 #define DRV_VERSION "1.0.0"
2625 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2626 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2627 #define DEV_NAME "rio_cm"
2628 #ifdef DEBUG
2629 #define riocm_debug(level, fmt, arg...) \
2630 do { \
2631 if (DBG_##level & dbg_level) \
2632 pr_debug(DRV_NAME ": %s " fmt "\n", \
2633 __func__, ##arg); \
2634 } while (0)
2635 #else
2636 #define riocm_debug(level, fmt, arg...) \
2637 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2638 #endif
2639 #define riocm_warn(fmt, arg...) \
2640 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2641 #define riocm_error(fmt, arg...) \
2642 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2643 #ifdef DEBUG
2644 #endif
2645 #define RIOCM_TX_RING_SIZE 128
2646 #define RIOCM_RX_RING_SIZE 128
2647 #define RIOCM_CONNECT_TO 3
2648 #define RIOCM_MAX_CHNUM 0xffff
2649 #define RIOCM_CHNUM_AUTO 0
2650 #define RIOCM_MAX_EP_COUNT 0x10000
2651 #define RIO_HDR_LETTER_MASK 0xffff0000
2652 #define RIO_HDR_MBOX_MASK 0x0000ffff
2653 #define is_msg_capable(src_ops, dst_ops) \
2654 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2655 (dst_ops & RIO_DST_OPS_DATA_MSG))
2656 #define dev_cm_capable(dev) \
2657 is_msg_capable(dev->src_ops, dev->dst_ops)
2658 /* LDV_COMMENT_END_PREP */
2659 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */
2660 struct notifier_block * var_group5;
2661 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */
2662 unsigned long var_rio_cm_shutdown_51_p1;
2663 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */
2664 void * var_rio_cm_shutdown_51_p2;
2665
2666
2667
2668
2669 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2670 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2671 /*============================= VARIABLE INITIALIZING PART =============================*/
2672 LDV_IN_INTERRUPT=1;
2673
2674
2675
2676
2677 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2678 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2679 /*============================= FUNCTION CALL SECTION =============================*/
2680 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2681 ldv_initialize();
2682 int ldv_s_riocm_cdev_fops_file_operations = 0;
2683
2684
2685
2686
2687
2688
2689
2690 while( nondet_int()
2691 || !(ldv_s_riocm_cdev_fops_file_operations == 0)
2692 ) {
2693
2694 switch(nondet_int()) {
2695
2696 case 0: {
2697
2698 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/
2699 if(ldv_s_riocm_cdev_fops_file_operations==0) {
2700
2701 /* content: static int riocm_cdev_open(struct inode *inode, struct file *filp)*/
2702 /* LDV_COMMENT_BEGIN_PREP */
2703 #define DRV_NAME "rio_cm"
2704 #define DRV_VERSION "1.0.0"
2705 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2706 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2707 #define DEV_NAME "rio_cm"
2708 #ifdef DEBUG
2709 #define riocm_debug(level, fmt, arg...) \
2710 do { \
2711 if (DBG_##level & dbg_level) \
2712 pr_debug(DRV_NAME ": %s " fmt "\n", \
2713 __func__, ##arg); \
2714 } while (0)
2715 #else
2716 #define riocm_debug(level, fmt, arg...) \
2717 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2718 #endif
2719 #define riocm_warn(fmt, arg...) \
2720 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2721 #define riocm_error(fmt, arg...) \
2722 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2723 #ifdef DEBUG
2724 #endif
2725 #define RIOCM_TX_RING_SIZE 128
2726 #define RIOCM_RX_RING_SIZE 128
2727 #define RIOCM_CONNECT_TO 3
2728 #define RIOCM_MAX_CHNUM 0xffff
2729 #define RIOCM_CHNUM_AUTO 0
2730 #define RIOCM_MAX_EP_COUNT 0x10000
2731 #define RIO_HDR_LETTER_MASK 0xffff0000
2732 #define RIO_HDR_MBOX_MASK 0x0000ffff
2733 #define is_msg_capable(src_ops, dst_ops) \
2734 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2735 (dst_ops & RIO_DST_OPS_DATA_MSG))
2736 #define dev_cm_capable(dev) \
2737 is_msg_capable(dev->src_ops, dev->dst_ops)
2738 /* LDV_COMMENT_END_PREP */
2739 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "riocm_cdev_fops". Standart function test for correct return result. */
2740 ldv_handler_precall();
2741 res_riocm_cdev_open_32 = riocm_cdev_open( var_group1, var_group2);
2742 ldv_check_return_value(res_riocm_cdev_open_32);
2743 if(res_riocm_cdev_open_32)
2744 goto ldv_module_exit;
2745 ldv_s_riocm_cdev_fops_file_operations++;
2746
2747 }
2748
2749 }
2750
2751 break;
2752 case 1: {
2753
2754 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/
2755 if(ldv_s_riocm_cdev_fops_file_operations==1) {
2756
2757 /* content: static int riocm_cdev_release(struct inode *inode, struct file *filp)*/
2758 /* LDV_COMMENT_BEGIN_PREP */
2759 #define DRV_NAME "rio_cm"
2760 #define DRV_VERSION "1.0.0"
2761 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2762 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2763 #define DEV_NAME "rio_cm"
2764 #ifdef DEBUG
2765 #define riocm_debug(level, fmt, arg...) \
2766 do { \
2767 if (DBG_##level & dbg_level) \
2768 pr_debug(DRV_NAME ": %s " fmt "\n", \
2769 __func__, ##arg); \
2770 } while (0)
2771 #else
2772 #define riocm_debug(level, fmt, arg...) \
2773 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2774 #endif
2775 #define riocm_warn(fmt, arg...) \
2776 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2777 #define riocm_error(fmt, arg...) \
2778 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2779 #ifdef DEBUG
2780 #endif
2781 #define RIOCM_TX_RING_SIZE 128
2782 #define RIOCM_RX_RING_SIZE 128
2783 #define RIOCM_CONNECT_TO 3
2784 #define RIOCM_MAX_CHNUM 0xffff
2785 #define RIOCM_CHNUM_AUTO 0
2786 #define RIOCM_MAX_EP_COUNT 0x10000
2787 #define RIO_HDR_LETTER_MASK 0xffff0000
2788 #define RIO_HDR_MBOX_MASK 0x0000ffff
2789 #define is_msg_capable(src_ops, dst_ops) \
2790 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2791 (dst_ops & RIO_DST_OPS_DATA_MSG))
2792 #define dev_cm_capable(dev) \
2793 is_msg_capable(dev->src_ops, dev->dst_ops)
2794 /* LDV_COMMENT_END_PREP */
2795 /* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "riocm_cdev_fops" */
2796 ldv_handler_precall();
2797 riocm_cdev_release( var_group1, var_group2);
2798 ldv_s_riocm_cdev_fops_file_operations=0;
2799
2800 }
2801
2802 }
2803
2804 break;
2805 case 2: {
2806
2807 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/
2808
2809
2810 /* content: static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)*/
2811 /* LDV_COMMENT_BEGIN_PREP */
2812 #define DRV_NAME "rio_cm"
2813 #define DRV_VERSION "1.0.0"
2814 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2815 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2816 #define DEV_NAME "rio_cm"
2817 #ifdef DEBUG
2818 #define riocm_debug(level, fmt, arg...) \
2819 do { \
2820 if (DBG_##level & dbg_level) \
2821 pr_debug(DRV_NAME ": %s " fmt "\n", \
2822 __func__, ##arg); \
2823 } while (0)
2824 #else
2825 #define riocm_debug(level, fmt, arg...) \
2826 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2827 #endif
2828 #define riocm_warn(fmt, arg...) \
2829 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2830 #define riocm_error(fmt, arg...) \
2831 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2832 #ifdef DEBUG
2833 #endif
2834 #define RIOCM_TX_RING_SIZE 128
2835 #define RIOCM_RX_RING_SIZE 128
2836 #define RIOCM_CONNECT_TO 3
2837 #define RIOCM_MAX_CHNUM 0xffff
2838 #define RIOCM_CHNUM_AUTO 0
2839 #define RIOCM_MAX_EP_COUNT 0x10000
2840 #define RIO_HDR_LETTER_MASK 0xffff0000
2841 #define RIO_HDR_MBOX_MASK 0x0000ffff
2842 #define is_msg_capable(src_ops, dst_ops) \
2843 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2844 (dst_ops & RIO_DST_OPS_DATA_MSG))
2845 #define dev_cm_capable(dev) \
2846 is_msg_capable(dev->src_ops, dev->dst_ops)
2847 /* LDV_COMMENT_END_PREP */
2848 /* LDV_COMMENT_FUNCTION_CALL Function from field "unlocked_ioctl" from driver structure with callbacks "riocm_cdev_fops" */
2849 ldv_handler_precall();
2850 riocm_cdev_ioctl( var_group2, var_riocm_cdev_ioctl_45_p1, var_riocm_cdev_ioctl_45_p2);
2851
2852
2853
2854
2855 }
2856
2857 break;
2858 case 3: {
2859
2860 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/
2861
2862
2863 /* content: static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)*/
2864 /* LDV_COMMENT_BEGIN_PREP */
2865 #define DRV_NAME "rio_cm"
2866 #define DRV_VERSION "1.0.0"
2867 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2868 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2869 #define DEV_NAME "rio_cm"
2870 #ifdef DEBUG
2871 #define riocm_debug(level, fmt, arg...) \
2872 do { \
2873 if (DBG_##level & dbg_level) \
2874 pr_debug(DRV_NAME ": %s " fmt "\n", \
2875 __func__, ##arg); \
2876 } while (0)
2877 #else
2878 #define riocm_debug(level, fmt, arg...) \
2879 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2880 #endif
2881 #define riocm_warn(fmt, arg...) \
2882 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2883 #define riocm_error(fmt, arg...) \
2884 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2885 #ifdef DEBUG
2886 #endif
2887 #define RIOCM_TX_RING_SIZE 128
2888 #define RIOCM_RX_RING_SIZE 128
2889 #define RIOCM_CONNECT_TO 3
2890 #define RIOCM_MAX_CHNUM 0xffff
2891 #define RIOCM_CHNUM_AUTO 0
2892 #define RIOCM_MAX_EP_COUNT 0x10000
2893 #define RIO_HDR_LETTER_MASK 0xffff0000
2894 #define RIO_HDR_MBOX_MASK 0x0000ffff
2895 #define is_msg_capable(src_ops, dst_ops) \
2896 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2897 (dst_ops & RIO_DST_OPS_DATA_MSG))
2898 #define dev_cm_capable(dev) \
2899 is_msg_capable(dev->src_ops, dev->dst_ops)
2900 /* LDV_COMMENT_END_PREP */
2901 /* LDV_COMMENT_FUNCTION_CALL Function from field "add_dev" from driver structure with callbacks "riocm_interface" */
2902 ldv_handler_precall();
2903 riocm_add_dev( var_group3, var_group4);
2904
2905
2906
2907
2908 }
2909
2910 break;
2911 case 4: {
2912
2913 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/
2914
2915
2916 /* content: static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)*/
2917 /* LDV_COMMENT_BEGIN_PREP */
2918 #define DRV_NAME "rio_cm"
2919 #define DRV_VERSION "1.0.0"
2920 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2921 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2922 #define DEV_NAME "rio_cm"
2923 #ifdef DEBUG
2924 #define riocm_debug(level, fmt, arg...) \
2925 do { \
2926 if (DBG_##level & dbg_level) \
2927 pr_debug(DRV_NAME ": %s " fmt "\n", \
2928 __func__, ##arg); \
2929 } while (0)
2930 #else
2931 #define riocm_debug(level, fmt, arg...) \
2932 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2933 #endif
2934 #define riocm_warn(fmt, arg...) \
2935 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2936 #define riocm_error(fmt, arg...) \
2937 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2938 #ifdef DEBUG
2939 #endif
2940 #define RIOCM_TX_RING_SIZE 128
2941 #define RIOCM_RX_RING_SIZE 128
2942 #define RIOCM_CONNECT_TO 3
2943 #define RIOCM_MAX_CHNUM 0xffff
2944 #define RIOCM_CHNUM_AUTO 0
2945 #define RIOCM_MAX_EP_COUNT 0x10000
2946 #define RIO_HDR_LETTER_MASK 0xffff0000
2947 #define RIO_HDR_MBOX_MASK 0x0000ffff
2948 #define is_msg_capable(src_ops, dst_ops) \
2949 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
2950 (dst_ops & RIO_DST_OPS_DATA_MSG))
2951 #define dev_cm_capable(dev) \
2952 is_msg_capable(dev->src_ops, dev->dst_ops)
2953 /* LDV_COMMENT_END_PREP */
2954 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove_dev" from driver structure with callbacks "riocm_interface" */
2955 ldv_handler_precall();
2956 riocm_remove_dev( var_group3, var_group4);
2957
2958
2959
2960
2961 }
2962
2963 break;
2964 case 5: {
2965
2966 /** STRUCT: struct type: notifier_block, struct name: rio_cm_notifier **/
2967
2968
2969 /* content: static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused)*/
2970 /* LDV_COMMENT_BEGIN_PREP */
2971 #define DRV_NAME "rio_cm"
2972 #define DRV_VERSION "1.0.0"
2973 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
2974 #define DRV_DESC "RapidIO Channelized Messaging Driver"
2975 #define DEV_NAME "rio_cm"
2976 #ifdef DEBUG
2977 #define riocm_debug(level, fmt, arg...) \
2978 do { \
2979 if (DBG_##level & dbg_level) \
2980 pr_debug(DRV_NAME ": %s " fmt "\n", \
2981 __func__, ##arg); \
2982 } while (0)
2983 #else
2984 #define riocm_debug(level, fmt, arg...) \
2985 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
2986 #endif
2987 #define riocm_warn(fmt, arg...) \
2988 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
2989 #define riocm_error(fmt, arg...) \
2990 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
2991 #ifdef DEBUG
2992 #endif
2993 #define RIOCM_TX_RING_SIZE 128
2994 #define RIOCM_RX_RING_SIZE 128
2995 #define RIOCM_CONNECT_TO 3
2996 #define RIOCM_MAX_CHNUM 0xffff
2997 #define RIOCM_CHNUM_AUTO 0
2998 #define RIOCM_MAX_EP_COUNT 0x10000
2999 #define RIO_HDR_LETTER_MASK 0xffff0000
3000 #define RIO_HDR_MBOX_MASK 0x0000ffff
3001 #define is_msg_capable(src_ops, dst_ops) \
3002 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
3003 (dst_ops & RIO_DST_OPS_DATA_MSG))
3004 #define dev_cm_capable(dev) \
3005 is_msg_capable(dev->src_ops, dev->dst_ops)
3006 /* LDV_COMMENT_END_PREP */
3007 /* LDV_COMMENT_FUNCTION_CALL Function from field "notifier_call" from driver structure with callbacks "rio_cm_notifier" */
3008 ldv_handler_precall();
3009 rio_cm_shutdown( var_group5, var_rio_cm_shutdown_51_p1, var_rio_cm_shutdown_51_p2);
3010
3011
3012
3013
3014 }
3015
3016 break;
3017 default: break;
3018
3019 }
3020
3021 }
3022
3023 ldv_module_exit:
3024
3025 /** INIT: init_type: ST_MODULE_EXIT **/
3026 /* content: static void __exit riocm_exit(void)*/
3027 /* LDV_COMMENT_BEGIN_PREP */
3028 #define DRV_NAME "rio_cm"
3029 #define DRV_VERSION "1.0.0"
3030 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
3031 #define DRV_DESC "RapidIO Channelized Messaging Driver"
3032 #define DEV_NAME "rio_cm"
3033 #ifdef DEBUG
3034 #define riocm_debug(level, fmt, arg...) \
3035 do { \
3036 if (DBG_##level & dbg_level) \
3037 pr_debug(DRV_NAME ": %s " fmt "\n", \
3038 __func__, ##arg); \
3039 } while (0)
3040 #else
3041 #define riocm_debug(level, fmt, arg...) \
3042 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
3043 #endif
3044 #define riocm_warn(fmt, arg...) \
3045 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
3046 #define riocm_error(fmt, arg...) \
3047 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
3048 #ifdef DEBUG
3049 #endif
3050 #define RIOCM_TX_RING_SIZE 128
3051 #define RIOCM_RX_RING_SIZE 128
3052 #define RIOCM_CONNECT_TO 3
3053 #define RIOCM_MAX_CHNUM 0xffff
3054 #define RIOCM_CHNUM_AUTO 0
3055 #define RIOCM_MAX_EP_COUNT 0x10000
3056 #define RIO_HDR_LETTER_MASK 0xffff0000
3057 #define RIO_HDR_MBOX_MASK 0x0000ffff
3058 #define is_msg_capable(src_ops, dst_ops) \
3059 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
3060 (dst_ops & RIO_DST_OPS_DATA_MSG))
3061 #define dev_cm_capable(dev) \
3062 is_msg_capable(dev->src_ops, dev->dst_ops)
3063 /* LDV_COMMENT_END_PREP */
3064 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
3065 ldv_handler_precall();
3066 riocm_exit();
3067
3068 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3069 ldv_final: ldv_check_final_state();
3070
3071 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3072 return;
3073
3074 }
3075 #endif
3076
3077 /* LDV_COMMENT_END_MAIN */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.8-rc1.tar.xz | drivers/rapidio/rio_cm.ko | 43_1a | CPAchecker | Bug | Fixed | 2016-09-09 23:37:19 | L0245 |
Comment
Reported: 9 Sep 2016
[Home]