Error Trace
[Home]
Bug # 79
Show/hide error trace Error trace
-__BLAST_initialize_/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a/linux-4.1-rc1.tar.xz/csd_deg_dscv/2900/dscv_tempdir/dscv/rcv/152_1a/main-ldv_main0_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/usb/gadget/udc/mv_udc_core.o.i() { 7 -LDV_IO_MEMS = 0; release_done.done = 0; release_done.wait.lock.__annonCompField19.rlock.raw_lock.__annonCompField7.head_tail = 0; release_done.wait.lock.__annonCompField19.rlock.magic = 3735899821; release_done.wait.lock.__annonCompField19.rlock.owner_cpu = 4294967295; release_done.wait.lock.__annonCompField19.rlock.owner = -1; release_done.wait.lock.__annonCompField19.rlock.dep_map.key = 0; release_done.wait.lock.__annonCompField19.rlock.dep_map.class_cache[ 0 ] = 0; release_done.wait.lock.__annonCompField19.rlock.dep_map.class_cache[ 1 ] = 0; release_done.wait.lock.__annonCompField19.rlock.dep_map.name = "(release_done).wait.lock"; release_done.wait.lock.__annonCompField19.rlock.dep_map.cpu = 0; release_done.wait.lock.__annonCompField19.rlock.dep_map.ip = 0; release_done.wait.task_list.next = &(&(&(release_done))->wait)->task_list; release_done.wait.task_list.prev = &(&(&(release_done))->wait)->task_list; driver_name[ 0 ] = 109; driver_name[ 1 ] = 118; driver_name[ 2 ] = 95; driver_name[ 3 ] = 117; driver_name[ 4 ] = 100; driver_name[ 5 ] = 99; driver_name[ 6 ] = 0; mv_ep0_desc.bLength = 7; mv_ep0_desc.bDescriptorType = 5; mv_ep0_desc.bEndpointAddress = 0; mv_ep0_desc.bmAttributes = 0; mv_ep0_desc.wMaxPacketSize = 64; mv_ep0_desc.bInterval = 0; mv_ep0_desc.bRefresh = 0; mv_ep0_desc.bSynchAddress = 0; mv_ep_ops.enable = &(mv_ep_enable); mv_ep_ops.disable = &(mv_ep_disable); mv_ep_ops.alloc_request = &(mv_alloc_request); mv_ep_ops.free_request = &(mv_free_request); mv_ep_ops.queue = &(mv_ep_queue); mv_ep_ops.dequeue = &(mv_ep_dequeue); mv_ep_ops.set_halt = &(mv_ep_set_halt); mv_ep_ops.set_wedge = &(mv_ep_set_wedge); mv_ep_ops.fifo_status = 0; mv_ep_ops.fifo_flush = &(mv_ep_fifo_flush); mv_ops.get_frame = &(mv_udc_get_frame); mv_ops.wakeup = &(mv_udc_wakeup); mv_ops.set_selfpowered = 0; mv_ops.vbus_session = &(mv_udc_vbus_session); mv_ops.vbus_draw = 0; mv_ops.pullup = &(mv_udc_pullup); mv_ops.ioctl = 0; mv_ops.get_config_params = 0; mv_ops.udc_start = &(mv_udc_start); mv_ops.udc_stop = &(mv_udc_stop); return ; } -__BLAST_initialize_/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a/linux-4.1-rc1.tar.xz/csd_deg_dscv/2900/dscv_tempdir/dscv/rcv/152_1a/main-ldv_main0_sequence_infinite_withcheck_stateful/preprocess/1-cpp//work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a/linux-4.1-rc1.tar.xz/csd_deg_dscv/2900/dscv_tempdir/rule-instrumentor/152_1a/common-model/ldv_common_model.o.i() { return ; } { 2994 ldv_initialize() { /* Function call is skipped due to function is undefined */} 3006 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 3010 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} { { 616 tmp = devm_kmalloc(dev, size, gfp | 32768) { /* Function call is skipped due to function is undefined */} } 2123 udc = tmp___0; 2127 *(udc).done = &(release_done); 2128 *(udc).pdata = tmp___1; 2129 __raw_spin_lock_init(&(&(&(udc)->lock)->__annonCompField19)->rlock, "&(&udc->lock)->rlock", &(__key)) { /* Function call is skipped due to function is undefined */} 2131 *(udc).dev = pdev; 2148 *(udc).clk = devm_clk_get(&(pdev)->dev, 0) { /* Function call is skipped due to function is undefined */} 2152 r = platform_get_resource_byname(*(udc).dev, 512, "capregs") { /* Function call is skipped due to function is undefined */} 2158 tmp___7 = devm_ioremap(&(pdev)->dev, *(r).start, tmp___6) { /* Function call is skipped due to function is undefined */} 2158 *(udc).cap_regs = tmp___7; 2165 r = platform_get_resource_byname(*(udc).dev, 512, "phyregs") { /* Function call is skipped due to function is undefined */} { { } 14 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 14 ptr = tmp; 16 LDV_IO_MEMS = LDV_IO_MEMS + 1; 17 __retres4 = ptr; } { 1080 -descriptor.modname = "mv_udc"; descriptor.function = "mv_udc_enable_internal"; descriptor.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a/linux-4.1-rc1.tar.xz/csd_deg_dscv/2900/dscv_tempdir/dscv/ri/152_1a/drivers/usb/gadget/udc/mv_udc_core.c"; descriptor.format = "enable udc\n"; descriptor.lineno = 1080; descriptor.flags = 0; { { 457 ret = clk_prepare(clk) { /* Function call is skipped due to function is undefined */} 459 __retres3 = ret; } 971 return ; } 1083 cil_9 = *(*(udc).pdata).phy_init; 1083 retval = *(cil_9)(*(udc).phy_regs) { /* Function call is skipped due to function is undefined */} 1085 dev_err(&(*(udc).dev)->dev, "init phy error %d\n", retval) { /* Function call is skipped due to function is undefined */} { { 470 clk_disable(clk) { /* Function call is skipped due to function is undefined */} 471 clk_unprepare(clk) { /* Function call is skipped due to function is undefined */} 468 return ; } 976 return ; } 1088 __retres11 = retval; } 2180 __retres40 = retval; } 3646 ldv_check_return_value(res_mv_udc_probe_59) { /* Function call is skipped due to function is undefined */} 3647 ldv_check_return_value_probe(res_mv_udc_probe_59) { /* Function call is skipped due to function is undefined */} } | Source code 1
2 /*
3 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
4 * Author: Chao Xie <chao.xie@marvell.com>
5 * Neil Zhang <zhangwm@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/timer.h>
25 #include <linux/list.h>
26 #include <linux/interrupt.h>
27 #include <linux/moduleparam.h>
28 #include <linux/device.h>
29 #include <linux/usb/ch9.h>
30 #include <linux/usb/gadget.h>
31 #include <linux/usb/otg.h>
32 #include <linux/pm.h>
33 #include <linux/io.h>
34 #include <linux/irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/clk.h>
37 #include <linux/platform_data/mv_usb.h>
38 #include <asm/unaligned.h>
39
40 #include "mv_udc.h"
41
42 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
43 #define DRIVER_VERSION "8 Nov 2010"
44
45 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
46 ((ep)->udc->ep0_dir) : ((ep)->direction))
47
48 /* timeout value -- usec */
49 #define RESET_TIMEOUT 10000
50 #define FLUSH_TIMEOUT 10000
51 #define EPSTATUS_TIMEOUT 10000
52 #define PRIME_TIMEOUT 10000
53 #define READSAFE_TIMEOUT 1000
54
55 #define LOOPS_USEC_SHIFT 1
56 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
57 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
58
59 static DECLARE_COMPLETION(release_done);
60
61 static const char driver_name[] = "mv_udc";
62 static const char driver_desc[] = DRIVER_DESC;
63
64 static void nuke(struct mv_ep *ep, int status);
65 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
66
67 /* for endpoint 0 operations */
68 static const struct usb_endpoint_descriptor mv_ep0_desc = {
69 .bLength = USB_DT_ENDPOINT_SIZE,
70 .bDescriptorType = USB_DT_ENDPOINT,
71 .bEndpointAddress = 0,
72 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
73 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
74 };
75
76 static void ep0_reset(struct mv_udc *udc)
77 {
78 struct mv_ep *ep;
79 u32 epctrlx;
80 int i = 0;
81
82 /* ep0 in and out */
83 for (i = 0; i < 2; i++) {
84 ep = &udc->eps[i];
85 ep->udc = udc;
86
87 /* ep0 dQH */
88 ep->dqh = &udc->ep_dqh[i];
89
90 /* configure ep0 endpoint capabilities in dQH */
91 ep->dqh->max_packet_length =
92 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
93 | EP_QUEUE_HEAD_IOS;
94
95 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
96
97 epctrlx = readl(&udc->op_regs->epctrlx[0]);
98 if (i) { /* TX */
99 epctrlx |= EPCTRL_TX_ENABLE
100 | (USB_ENDPOINT_XFER_CONTROL
101 << EPCTRL_TX_EP_TYPE_SHIFT);
102
103 } else { /* RX */
104 epctrlx |= EPCTRL_RX_ENABLE
105 | (USB_ENDPOINT_XFER_CONTROL
106 << EPCTRL_RX_EP_TYPE_SHIFT);
107 }
108
109 writel(epctrlx, &udc->op_regs->epctrlx[0]);
110 }
111 }
112
113 /* protocol ep0 stall, will automatically be cleared on new transaction */
114 static void ep0_stall(struct mv_udc *udc)
115 {
116 u32 epctrlx;
117
118 /* set TX and RX to stall */
119 epctrlx = readl(&udc->op_regs->epctrlx[0]);
120 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
121 writel(epctrlx, &udc->op_regs->epctrlx[0]);
122
123 /* update ep0 state */
124 udc->ep0_state = WAIT_FOR_SETUP;
125 udc->ep0_dir = EP_DIR_OUT;
126 }
127
128 static int process_ep_req(struct mv_udc *udc, int index,
129 struct mv_req *curr_req)
130 {
131 struct mv_dtd *curr_dtd;
132 struct mv_dqh *curr_dqh;
133 int td_complete, actual, remaining_length;
134 int i, direction;
135 int retval = 0;
136 u32 errors;
137 u32 bit_pos;
138
139 curr_dqh = &udc->ep_dqh[index];
140 direction = index % 2;
141
142 curr_dtd = curr_req->head;
143 td_complete = 0;
144 actual = curr_req->req.length;
145
146 for (i = 0; i < curr_req->dtd_count; i++) {
147 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
148 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
149 udc->eps[index].name);
150 return 1;
151 }
152
153 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
154 if (!errors) {
155 remaining_length =
156 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
157 >> DTD_LENGTH_BIT_POS;
158 actual -= remaining_length;
159
160 if (remaining_length) {
161 if (direction) {
162 dev_dbg(&udc->dev->dev,
163 "TX dTD remains data\n");
164 retval = -EPROTO;
165 break;
166 } else
167 break;
168 }
169 } else {
170 dev_info(&udc->dev->dev,
171 "complete_tr error: ep=%d %s: error = 0x%x\n",
172 index >> 1, direction ? "SEND" : "RECV",
173 errors);
174 if (errors & DTD_STATUS_HALTED) {
175 /* Clear the errors and Halt condition */
176 curr_dqh->size_ioc_int_sts &= ~errors;
177 retval = -EPIPE;
178 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
179 retval = -EPROTO;
180 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
181 retval = -EILSEQ;
182 }
183 }
184 if (i != curr_req->dtd_count - 1)
185 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
186 }
187 if (retval)
188 return retval;
189
190 if (direction == EP_DIR_OUT)
191 bit_pos = 1 << curr_req->ep->ep_num;
192 else
193 bit_pos = 1 << (16 + curr_req->ep->ep_num);
194
195 while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
196 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
197 while (readl(&udc->op_regs->epstatus) & bit_pos)
198 udelay(1);
199 break;
200 }
201 udelay(1);
202 }
203
204 curr_req->req.actual = actual;
205
206 return 0;
207 }
208
209 /*
210 * done() - retire a request; caller blocked irqs
211 * @status : request status to be set, only works when
212 * request is still in progress.
213 */
214 static void done(struct mv_ep *ep, struct mv_req *req, int status)
215 __releases(&ep->udc->lock)
216 __acquires(&ep->udc->lock)
217 {
218 struct mv_udc *udc = NULL;
219 unsigned char stopped = ep->stopped;
220 struct mv_dtd *curr_td, *next_td;
221 int j;
222
223 udc = (struct mv_udc *)ep->udc;
224 /* Removed the req from fsl_ep->queue */
225 list_del_init(&req->queue);
226
227 /* req.status should be set as -EINPROGRESS in ep_queue() */
228 if (req->req.status == -EINPROGRESS)
229 req->req.status = status;
230 else
231 status = req->req.status;
232
233 /* Free dtd for the request */
234 next_td = req->head;
235 for (j = 0; j < req->dtd_count; j++) {
236 curr_td = next_td;
237 if (j != req->dtd_count - 1)
238 next_td = curr_td->next_dtd_virt;
239 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
240 }
241
242 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
243
244 if (status && (status != -ESHUTDOWN))
245 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
246 ep->ep.name, &req->req, status,
247 req->req.actual, req->req.length);
248
249 ep->stopped = 1;
250
251 spin_unlock(&ep->udc->lock);
252
253 usb_gadget_giveback_request(&ep->ep, &req->req);
254
255 spin_lock(&ep->udc->lock);
256 ep->stopped = stopped;
257 }
258
259 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
260 {
261 struct mv_udc *udc;
262 struct mv_dqh *dqh;
263 u32 bit_pos, direction;
264 u32 usbcmd, epstatus;
265 unsigned int loops;
266 int retval = 0;
267
268 udc = ep->udc;
269 direction = ep_dir(ep);
270 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
271 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
272
273 /* check if the pipe is empty */
274 if (!(list_empty(&ep->queue))) {
275 struct mv_req *lastreq;
276 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
277 lastreq->tail->dtd_next =
278 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
279
280 wmb();
281
282 if (readl(&udc->op_regs->epprime) & bit_pos)
283 goto done;
284
285 loops = LOOPS(READSAFE_TIMEOUT);
286 while (1) {
287 /* start with setting the semaphores */
288 usbcmd = readl(&udc->op_regs->usbcmd);
289 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
290 writel(usbcmd, &udc->op_regs->usbcmd);
291
292 /* read the endpoint status */
293 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
294
295 /*
296 * Reread the ATDTW semaphore bit to check if it is
297 * cleared. When hardware see a hazard, it will clear
298 * the bit or else we remain set to 1 and we can
299 * proceed with priming of endpoint if not already
300 * primed.
301 */
302 if (readl(&udc->op_regs->usbcmd)
303 & USBCMD_ATDTW_TRIPWIRE_SET)
304 break;
305
306 loops--;
307 if (loops == 0) {
308 dev_err(&udc->dev->dev,
309 "Timeout for ATDTW_TRIPWIRE...\n");
310 retval = -ETIME;
311 goto done;
312 }
313 udelay(LOOPS_USEC);
314 }
315
316 /* Clear the semaphore */
317 usbcmd = readl(&udc->op_regs->usbcmd);
318 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
319 writel(usbcmd, &udc->op_regs->usbcmd);
320
321 if (epstatus)
322 goto done;
323 }
324
325 /* Write dQH next pointer and terminate bit to 0 */
326 dqh->next_dtd_ptr = req->head->td_dma
327 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
328
329 /* clear active and halt bit, in case set from a previous error */
330 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
331
332 /* Ensure that updates to the QH will occur before priming. */
333 wmb();
334
335 /* Prime the Endpoint */
336 writel(bit_pos, &udc->op_regs->epprime);
337
338 done:
339 return retval;
340 }
341
342 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
343 dma_addr_t *dma, int *is_last)
344 {
345 struct mv_dtd *dtd;
346 struct mv_udc *udc;
347 struct mv_dqh *dqh;
348 u32 temp, mult = 0;
349
350 /* how big will this transfer be? */
351 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
352 dqh = req->ep->dqh;
353 mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
354 & 0x3;
355 *length = min(req->req.length - req->req.actual,
356 (unsigned)(mult * req->ep->ep.maxpacket));
357 } else
358 *length = min(req->req.length - req->req.actual,
359 (unsigned)EP_MAX_LENGTH_TRANSFER);
360
361 udc = req->ep->udc;
362
363 /*
364 * Be careful that no _GFP_HIGHMEM is set,
365 * or we can not use dma_to_virt
366 */
367 dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
368 if (dtd == NULL)
369 return dtd;
370
371 dtd->td_dma = *dma;
372 /* initialize buffer page pointers */
373 temp = (u32)(req->req.dma + req->req.actual);
374 dtd->buff_ptr0 = cpu_to_le32(temp);
375 temp &= ~0xFFF;
376 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
377 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
378 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
379 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
380
381 req->req.actual += *length;
382
383 /* zlp is needed if req->req.zero is set */
384 if (req->req.zero) {
385 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
386 *is_last = 1;
387 else
388 *is_last = 0;
389 } else if (req->req.length == req->req.actual)
390 *is_last = 1;
391 else
392 *is_last = 0;
393
394 /* Fill in the transfer size; set active bit */
395 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
396
397 /* Enable interrupt for the last dtd of a request */
398 if (*is_last && !req->req.no_interrupt)
399 temp |= DTD_IOC;
400
401 temp |= mult << 10;
402
403 dtd->size_ioc_sts = temp;
404
405 mb();
406
407 return dtd;
408 }
409
410 /* generate dTD linked list for a request */
411 static int req_to_dtd(struct mv_req *req)
412 {
413 unsigned count;
414 int is_last, is_first = 1;
415 struct mv_dtd *dtd, *last_dtd = NULL;
416 struct mv_udc *udc;
417 dma_addr_t dma;
418
419 udc = req->ep->udc;
420
421 do {
422 dtd = build_dtd(req, &count, &dma, &is_last);
423 if (dtd == NULL)
424 return -ENOMEM;
425
426 if (is_first) {
427 is_first = 0;
428 req->head = dtd;
429 } else {
430 last_dtd->dtd_next = dma;
431 last_dtd->next_dtd_virt = dtd;
432 }
433 last_dtd = dtd;
434 req->dtd_count++;
435 } while (!is_last);
436
437 /* set terminate bit to 1 for the last dTD */
438 dtd->dtd_next = DTD_NEXT_TERMINATE;
439
440 req->tail = dtd;
441
442 return 0;
443 }
444
445 static int mv_ep_enable(struct usb_ep *_ep,
446 const struct usb_endpoint_descriptor *desc)
447 {
448 struct mv_udc *udc;
449 struct mv_ep *ep;
450 struct mv_dqh *dqh;
451 u16 max = 0;
452 u32 bit_pos, epctrlx, direction;
453 unsigned char zlt = 0, ios = 0, mult = 0;
454 unsigned long flags;
455
456 ep = container_of(_ep, struct mv_ep, ep);
457 udc = ep->udc;
458
459 if (!_ep || !desc
460 || desc->bDescriptorType != USB_DT_ENDPOINT)
461 return -EINVAL;
462
463 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
464 return -ESHUTDOWN;
465
466 direction = ep_dir(ep);
467 max = usb_endpoint_maxp(desc);
468
469 /*
470 * disable HW zero length termination select
471 * driver handles zero length packet through req->req.zero
472 */
473 zlt = 1;
474
475 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
476
477 /* Check if the Endpoint is Primed */
478 if ((readl(&udc->op_regs->epprime) & bit_pos)
479 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
480 dev_info(&udc->dev->dev,
481 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
482 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
483 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
484 (unsigned)readl(&udc->op_regs->epprime),
485 (unsigned)readl(&udc->op_regs->epstatus),
486 (unsigned)bit_pos);
487 goto en_done;
488 }
489 /* Set the max packet length, interrupt on Setup and Mult fields */
490 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
491 case USB_ENDPOINT_XFER_BULK:
492 zlt = 1;
493 mult = 0;
494 break;
495 case USB_ENDPOINT_XFER_CONTROL:
496 ios = 1;
497 case USB_ENDPOINT_XFER_INT:
498 mult = 0;
499 break;
500 case USB_ENDPOINT_XFER_ISOC:
501 /* Calculate transactions needed for high bandwidth iso */
502 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
503 max = max & 0x7ff; /* bit 0~10 */
504 /* 3 transactions at most */
505 if (mult > 3)
506 goto en_done;
507 break;
508 default:
509 goto en_done;
510 }
511
512 spin_lock_irqsave(&udc->lock, flags);
513 /* Get the endpoint queue head address */
514 dqh = ep->dqh;
515 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
516 | (mult << EP_QUEUE_HEAD_MULT_POS)
517 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
518 | (ios ? EP_QUEUE_HEAD_IOS : 0);
519 dqh->next_dtd_ptr = 1;
520 dqh->size_ioc_int_sts = 0;
521
522 ep->ep.maxpacket = max;
523 ep->ep.desc = desc;
524 ep->stopped = 0;
525
526 /* Enable the endpoint for Rx or Tx and set the endpoint type */
527 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
528 if (direction == EP_DIR_IN) {
529 epctrlx &= ~EPCTRL_TX_ALL_MASK;
530 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
531 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
532 << EPCTRL_TX_EP_TYPE_SHIFT);
533 } else {
534 epctrlx &= ~EPCTRL_RX_ALL_MASK;
535 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
536 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
537 << EPCTRL_RX_EP_TYPE_SHIFT);
538 }
539 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
540
541 /*
542 * Implement Guideline (GL# USB-7) The unused endpoint type must
543 * be programmed to bulk.
544 */
545 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
546 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
547 epctrlx |= (USB_ENDPOINT_XFER_BULK
548 << EPCTRL_RX_EP_TYPE_SHIFT);
549 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
550 }
551
552 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
553 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
554 epctrlx |= (USB_ENDPOINT_XFER_BULK
555 << EPCTRL_TX_EP_TYPE_SHIFT);
556 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
557 }
558
559 spin_unlock_irqrestore(&udc->lock, flags);
560
561 return 0;
562 en_done:
563 return -EINVAL;
564 }
565
566 static int mv_ep_disable(struct usb_ep *_ep)
567 {
568 struct mv_udc *udc;
569 struct mv_ep *ep;
570 struct mv_dqh *dqh;
571 u32 bit_pos, epctrlx, direction;
572 unsigned long flags;
573
574 ep = container_of(_ep, struct mv_ep, ep);
575 if ((_ep == NULL) || !ep->ep.desc)
576 return -EINVAL;
577
578 udc = ep->udc;
579
580 /* Get the endpoint queue head address */
581 dqh = ep->dqh;
582
583 spin_lock_irqsave(&udc->lock, flags);
584
585 direction = ep_dir(ep);
586 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
587
588 /* Reset the max packet length and the interrupt on Setup */
589 dqh->max_packet_length = 0;
590
591 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
592 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
593 epctrlx &= ~((direction == EP_DIR_IN)
594 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
595 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
596 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
597
598 /* nuke all pending requests (does flush) */
599 nuke(ep, -ESHUTDOWN);
600
601 ep->ep.desc = NULL;
602 ep->stopped = 1;
603
604 spin_unlock_irqrestore(&udc->lock, flags);
605
606 return 0;
607 }
608
609 static struct usb_request *
610 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
611 {
612 struct mv_req *req = NULL;
613
614 req = kzalloc(sizeof *req, gfp_flags);
615 if (!req)
616 return NULL;
617
618 req->req.dma = DMA_ADDR_INVALID;
619 INIT_LIST_HEAD(&req->queue);
620
621 return &req->req;
622 }
623
624 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
625 {
626 struct mv_req *req = NULL;
627
628 req = container_of(_req, struct mv_req, req);
629
630 if (_req)
631 kfree(req);
632 }
633
634 static void mv_ep_fifo_flush(struct usb_ep *_ep)
635 {
636 struct mv_udc *udc;
637 u32 bit_pos, direction;
638 struct mv_ep *ep;
639 unsigned int loops;
640
641 if (!_ep)
642 return;
643
644 ep = container_of(_ep, struct mv_ep, ep);
645 if (!ep->ep.desc)
646 return;
647
648 udc = ep->udc;
649 direction = ep_dir(ep);
650
651 if (ep->ep_num == 0)
652 bit_pos = (1 << 16) | 1;
653 else if (direction == EP_DIR_OUT)
654 bit_pos = 1 << ep->ep_num;
655 else
656 bit_pos = 1 << (16 + ep->ep_num);
657
658 loops = LOOPS(EPSTATUS_TIMEOUT);
659 do {
660 unsigned int inter_loops;
661
662 if (loops == 0) {
663 dev_err(&udc->dev->dev,
664 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
665 (unsigned)readl(&udc->op_regs->epstatus),
666 (unsigned)bit_pos);
667 return;
668 }
669 /* Write 1 to the Flush register */
670 writel(bit_pos, &udc->op_regs->epflush);
671
672 /* Wait until flushing completed */
673 inter_loops = LOOPS(FLUSH_TIMEOUT);
674 while (readl(&udc->op_regs->epflush)) {
675 /*
676 * ENDPTFLUSH bit should be cleared to indicate this
677 * operation is complete
678 */
679 if (inter_loops == 0) {
680 dev_err(&udc->dev->dev,
681 "TIMEOUT for ENDPTFLUSH=0x%x,"
682 "bit_pos=0x%x\n",
683 (unsigned)readl(&udc->op_regs->epflush),
684 (unsigned)bit_pos);
685 return;
686 }
687 inter_loops--;
688 udelay(LOOPS_USEC);
689 }
690 loops--;
691 } while (readl(&udc->op_regs->epstatus) & bit_pos);
692 }
693
694 /* queues (submits) an I/O request to an endpoint */
695 static int
696 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
697 {
698 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
699 struct mv_req *req = container_of(_req, struct mv_req, req);
700 struct mv_udc *udc = ep->udc;
701 unsigned long flags;
702 int retval;
703
704 /* catch various bogus parameters */
705 if (!_req || !req->req.complete || !req->req.buf
706 || !list_empty(&req->queue)) {
707 dev_err(&udc->dev->dev, "%s, bad params", __func__);
708 return -EINVAL;
709 }
710 if (unlikely(!_ep || !ep->ep.desc)) {
711 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
712 return -EINVAL;
713 }
714
715 udc = ep->udc;
716 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
717 return -ESHUTDOWN;
718
719 req->ep = ep;
720
721 /* map virtual address to hardware */
722 retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
723 if (retval)
724 return retval;
725
726 req->req.status = -EINPROGRESS;
727 req->req.actual = 0;
728 req->dtd_count = 0;
729
730 spin_lock_irqsave(&udc->lock, flags);
731
732 /* build dtds and push them to device queue */
733 if (!req_to_dtd(req)) {
734 retval = queue_dtd(ep, req);
735 if (retval) {
736 spin_unlock_irqrestore(&udc->lock, flags);
737 dev_err(&udc->dev->dev, "Failed to queue dtd\n");
738 goto err_unmap_dma;
739 }
740 } else {
741 spin_unlock_irqrestore(&udc->lock, flags);
742 dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
743 retval = -ENOMEM;
744 goto err_unmap_dma;
745 }
746
747 /* Update ep0 state */
748 if (ep->ep_num == 0)
749 udc->ep0_state = DATA_STATE_XMIT;
750
751 /* irq handler advances the queue */
752 list_add_tail(&req->queue, &ep->queue);
753 spin_unlock_irqrestore(&udc->lock, flags);
754
755 return 0;
756
757 err_unmap_dma:
758 usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
759
760 return retval;
761 }
762
763 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
764 {
765 struct mv_dqh *dqh = ep->dqh;
766 u32 bit_pos;
767
768 /* Write dQH next pointer and terminate bit to 0 */
769 dqh->next_dtd_ptr = req->head->td_dma
770 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
771
772 /* clear active and halt bit, in case set from a previous error */
773 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
774
775 /* Ensure that updates to the QH will occure before priming. */
776 wmb();
777
778 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
779
780 /* Prime the Endpoint */
781 writel(bit_pos, &ep->udc->op_regs->epprime);
782 }
783
784 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
785 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
786 {
787 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
788 struct mv_req *req;
789 struct mv_udc *udc = ep->udc;
790 unsigned long flags;
791 int stopped, ret = 0;
792 u32 epctrlx;
793
794 if (!_ep || !_req)
795 return -EINVAL;
796
797 spin_lock_irqsave(&ep->udc->lock, flags);
798 stopped = ep->stopped;
799
800 /* Stop the ep before we deal with the queue */
801 ep->stopped = 1;
802 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
803 if (ep_dir(ep) == EP_DIR_IN)
804 epctrlx &= ~EPCTRL_TX_ENABLE;
805 else
806 epctrlx &= ~EPCTRL_RX_ENABLE;
807 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
808
809 /* make sure it's actually queued on this endpoint */
810 list_for_each_entry(req, &ep->queue, queue) {
811 if (&req->req == _req)
812 break;
813 }
814 if (&req->req != _req) {
815 ret = -EINVAL;
816 goto out;
817 }
818
819 /* The request is in progress, or completed but not dequeued */
820 if (ep->queue.next == &req->queue) {
821 _req->status = -ECONNRESET;
822 mv_ep_fifo_flush(_ep); /* flush current transfer */
823
824 /* The request isn't the last request in this ep queue */
825 if (req->queue.next != &ep->queue) {
826 struct mv_req *next_req;
827
828 next_req = list_entry(req->queue.next,
829 struct mv_req, queue);
830
831 /* Point the QH to the first TD of next request */
832 mv_prime_ep(ep, next_req);
833 } else {
834 struct mv_dqh *qh;
835
836 qh = ep->dqh;
837 qh->next_dtd_ptr = 1;
838 qh->size_ioc_int_sts = 0;
839 }
840
841 /* The request hasn't been processed, patch up the TD chain */
842 } else {
843 struct mv_req *prev_req;
844
845 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
846 writel(readl(&req->tail->dtd_next),
847 &prev_req->tail->dtd_next);
848
849 }
850
851 done(ep, req, -ECONNRESET);
852
853 /* Enable EP */
854 out:
855 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
856 if (ep_dir(ep) == EP_DIR_IN)
857 epctrlx |= EPCTRL_TX_ENABLE;
858 else
859 epctrlx |= EPCTRL_RX_ENABLE;
860 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
861 ep->stopped = stopped;
862
863 spin_unlock_irqrestore(&ep->udc->lock, flags);
864 return ret;
865 }
866
867 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
868 {
869 u32 epctrlx;
870
871 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
872
873 if (stall) {
874 if (direction == EP_DIR_IN)
875 epctrlx |= EPCTRL_TX_EP_STALL;
876 else
877 epctrlx |= EPCTRL_RX_EP_STALL;
878 } else {
879 if (direction == EP_DIR_IN) {
880 epctrlx &= ~EPCTRL_TX_EP_STALL;
881 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
882 } else {
883 epctrlx &= ~EPCTRL_RX_EP_STALL;
884 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
885 }
886 }
887 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
888 }
889
890 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
891 {
892 u32 epctrlx;
893
894 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
895
896 if (direction == EP_DIR_OUT)
897 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
898 else
899 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
900 }
901
902 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
903 {
904 struct mv_ep *ep;
905 unsigned long flags = 0;
906 int status = 0;
907 struct mv_udc *udc;
908
909 ep = container_of(_ep, struct mv_ep, ep);
910 udc = ep->udc;
911 if (!_ep || !ep->ep.desc) {
912 status = -EINVAL;
913 goto out;
914 }
915
916 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
917 status = -EOPNOTSUPP;
918 goto out;
919 }
920
921 /*
922 * Attempt to halt IN ep will fail if any transfer requests
923 * are still queue
924 */
925 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
926 status = -EAGAIN;
927 goto out;
928 }
929
930 spin_lock_irqsave(&ep->udc->lock, flags);
931 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
932 if (halt && wedge)
933 ep->wedge = 1;
934 else if (!halt)
935 ep->wedge = 0;
936 spin_unlock_irqrestore(&ep->udc->lock, flags);
937
938 if (ep->ep_num == 0) {
939 udc->ep0_state = WAIT_FOR_SETUP;
940 udc->ep0_dir = EP_DIR_OUT;
941 }
942 out:
943 return status;
944 }
945
946 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
947 {
948 return mv_ep_set_halt_wedge(_ep, halt, 0);
949 }
950
951 static int mv_ep_set_wedge(struct usb_ep *_ep)
952 {
953 return mv_ep_set_halt_wedge(_ep, 1, 1);
954 }
955
956 static struct usb_ep_ops mv_ep_ops = {
957 .enable = mv_ep_enable,
958 .disable = mv_ep_disable,
959
960 .alloc_request = mv_alloc_request,
961 .free_request = mv_free_request,
962
963 .queue = mv_ep_queue,
964 .dequeue = mv_ep_dequeue,
965
966 .set_wedge = mv_ep_set_wedge,
967 .set_halt = mv_ep_set_halt,
968 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
969 };
970
971 static void udc_clock_enable(struct mv_udc *udc)
972 {
973 clk_prepare_enable(udc->clk);
974 }
975
976 static void udc_clock_disable(struct mv_udc *udc)
977 {
978 clk_disable_unprepare(udc->clk);
979 }
980
981 static void udc_stop(struct mv_udc *udc)
982 {
983 u32 tmp;
984
985 /* Disable interrupts */
986 tmp = readl(&udc->op_regs->usbintr);
987 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
988 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
989 writel(tmp, &udc->op_regs->usbintr);
990
991 udc->stopped = 1;
992
993 /* Reset the Run the bit in the command register to stop VUSB */
994 tmp = readl(&udc->op_regs->usbcmd);
995 tmp &= ~USBCMD_RUN_STOP;
996 writel(tmp, &udc->op_regs->usbcmd);
997 }
998
999 static void udc_start(struct mv_udc *udc)
1000 {
1001 u32 usbintr;
1002
1003 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1004 | USBINTR_PORT_CHANGE_DETECT_EN
1005 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1006 /* Enable interrupts */
1007 writel(usbintr, &udc->op_regs->usbintr);
1008
1009 udc->stopped = 0;
1010
1011 /* Set the Run bit in the command register */
1012 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1013 }
1014
1015 static int udc_reset(struct mv_udc *udc)
1016 {
1017 unsigned int loops;
1018 u32 tmp, portsc;
1019
1020 /* Stop the controller */
1021 tmp = readl(&udc->op_regs->usbcmd);
1022 tmp &= ~USBCMD_RUN_STOP;
1023 writel(tmp, &udc->op_regs->usbcmd);
1024
1025 /* Reset the controller to get default values */
1026 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1027
1028 /* wait for reset to complete */
1029 loops = LOOPS(RESET_TIMEOUT);
1030 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1031 if (loops == 0) {
1032 dev_err(&udc->dev->dev,
1033 "Wait for RESET completed TIMEOUT\n");
1034 return -ETIMEDOUT;
1035 }
1036 loops--;
1037 udelay(LOOPS_USEC);
1038 }
1039
1040 /* set controller to device mode */
1041 tmp = readl(&udc->op_regs->usbmode);
1042 tmp |= USBMODE_CTRL_MODE_DEVICE;
1043
1044 /* turn setup lockout off, require setup tripwire in usbcmd */
1045 tmp |= USBMODE_SETUP_LOCK_OFF;
1046
1047 writel(tmp, &udc->op_regs->usbmode);
1048
1049 writel(0x0, &udc->op_regs->epsetupstat);
1050
1051 /* Configure the Endpoint List Address */
1052 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1053 &udc->op_regs->eplistaddr);
1054
1055 portsc = readl(&udc->op_regs->portsc[0]);
1056 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1057 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1058
1059 if (udc->force_fs)
1060 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1061 else
1062 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1063
1064 writel(portsc, &udc->op_regs->portsc[0]);
1065
1066 tmp = readl(&udc->op_regs->epctrlx[0]);
1067 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1068 writel(tmp, &udc->op_regs->epctrlx[0]);
1069
1070 return 0;
1071 }
1072
1073 static int mv_udc_enable_internal(struct mv_udc *udc)
1074 {
1075 int retval;
1076
1077 if (udc->active)
1078 return 0;
1079
1080 dev_dbg(&udc->dev->dev, "enable udc\n");
1081 udc_clock_enable(udc);
1082 if (udc->pdata->phy_init) {
1083 retval = udc->pdata->phy_init(udc->phy_regs);
1084 if (retval) {
1085 dev_err(&udc->dev->dev,
1086 "init phy error %d\n", retval);
1087 udc_clock_disable(udc);
1088 return retval;
1089 }
1090 }
1091 udc->active = 1;
1092
1093 return 0;
1094 }
1095
1096 static int mv_udc_enable(struct mv_udc *udc)
1097 {
1098 if (udc->clock_gating)
1099 return mv_udc_enable_internal(udc);
1100
1101 return 0;
1102 }
1103
1104 static void mv_udc_disable_internal(struct mv_udc *udc)
1105 {
1106 if (udc->active) {
1107 dev_dbg(&udc->dev->dev, "disable udc\n");
1108 if (udc->pdata->phy_deinit)
1109 udc->pdata->phy_deinit(udc->phy_regs);
1110 udc_clock_disable(udc);
1111 udc->active = 0;
1112 }
1113 }
1114
1115 static void mv_udc_disable(struct mv_udc *udc)
1116 {
1117 if (udc->clock_gating)
1118 mv_udc_disable_internal(udc);
1119 }
1120
1121 static int mv_udc_get_frame(struct usb_gadget *gadget)
1122 {
1123 struct mv_udc *udc;
1124 u16 retval;
1125
1126 if (!gadget)
1127 return -ENODEV;
1128
1129 udc = container_of(gadget, struct mv_udc, gadget);
1130
1131 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1132
1133 return retval;
1134 }
1135
1136 /* Tries to wake up the host connected to this gadget */
1137 static int mv_udc_wakeup(struct usb_gadget *gadget)
1138 {
1139 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1140 u32 portsc;
1141
1142 /* Remote wakeup feature not enabled by host */
1143 if (!udc->remote_wakeup)
1144 return -ENOTSUPP;
1145
1146 portsc = readl(&udc->op_regs->portsc);
1147 /* not suspended? */
1148 if (!(portsc & PORTSCX_PORT_SUSPEND))
1149 return 0;
1150 /* trigger force resume */
1151 portsc |= PORTSCX_PORT_FORCE_RESUME;
1152 writel(portsc, &udc->op_regs->portsc[0]);
1153 return 0;
1154 }
1155
1156 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1157 {
1158 struct mv_udc *udc;
1159 unsigned long flags;
1160 int retval = 0;
1161
1162 udc = container_of(gadget, struct mv_udc, gadget);
1163 spin_lock_irqsave(&udc->lock, flags);
1164
1165 udc->vbus_active = (is_active != 0);
1166
1167 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1168 __func__, udc->softconnect, udc->vbus_active);
1169
1170 if (udc->driver && udc->softconnect && udc->vbus_active) {
1171 retval = mv_udc_enable(udc);
1172 if (retval == 0) {
1173 /* Clock is disabled, need re-init registers */
1174 udc_reset(udc);
1175 ep0_reset(udc);
1176 udc_start(udc);
1177 }
1178 } else if (udc->driver && udc->softconnect) {
1179 if (!udc->active)
1180 goto out;
1181
1182 /* stop all the transfer in queue*/
1183 stop_activity(udc, udc->driver);
1184 udc_stop(udc);
1185 mv_udc_disable(udc);
1186 }
1187
1188 out:
1189 spin_unlock_irqrestore(&udc->lock, flags);
1190 return retval;
1191 }
1192
1193 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1194 {
1195 struct mv_udc *udc;
1196 unsigned long flags;
1197 int retval = 0;
1198
1199 udc = container_of(gadget, struct mv_udc, gadget);
1200 spin_lock_irqsave(&udc->lock, flags);
1201
1202 udc->softconnect = (is_on != 0);
1203
1204 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1205 __func__, udc->softconnect, udc->vbus_active);
1206
1207 if (udc->driver && udc->softconnect && udc->vbus_active) {
1208 retval = mv_udc_enable(udc);
1209 if (retval == 0) {
1210 /* Clock is disabled, need re-init registers */
1211 udc_reset(udc);
1212 ep0_reset(udc);
1213 udc_start(udc);
1214 }
1215 } else if (udc->driver && udc->vbus_active) {
1216 /* stop all the transfer in queue*/
1217 stop_activity(udc, udc->driver);
1218 udc_stop(udc);
1219 mv_udc_disable(udc);
1220 }
1221
1222 spin_unlock_irqrestore(&udc->lock, flags);
1223 return retval;
1224 }
1225
1226 static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1227 static int mv_udc_stop(struct usb_gadget *);
1228 /* device controller usb_gadget_ops structure */
1229 static const struct usb_gadget_ops mv_ops = {
1230
1231 /* returns the current frame number */
1232 .get_frame = mv_udc_get_frame,
1233
1234 /* tries to wake up the host connected to this gadget */
1235 .wakeup = mv_udc_wakeup,
1236
1237 /* notify controller that VBUS is powered or not */
1238 .vbus_session = mv_udc_vbus_session,
1239
1240 /* D+ pullup, software-controlled connect/disconnect to USB host */
1241 .pullup = mv_udc_pullup,
1242 .udc_start = mv_udc_start,
1243 .udc_stop = mv_udc_stop,
1244 };
1245
1246 static int eps_init(struct mv_udc *udc)
1247 {
1248 struct mv_ep *ep;
1249 char name[14];
1250 int i;
1251
1252 /* initialize ep0 */
1253 ep = &udc->eps[0];
1254 ep->udc = udc;
1255 strncpy(ep->name, "ep0", sizeof(ep->name));
1256 ep->ep.name = ep->name;
1257 ep->ep.ops = &mv_ep_ops;
1258 ep->wedge = 0;
1259 ep->stopped = 0;
1260 usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1261 ep->ep_num = 0;
1262 ep->ep.desc = &mv_ep0_desc;
1263 INIT_LIST_HEAD(&ep->queue);
1264
1265 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1266
1267 /* initialize other endpoints */
1268 for (i = 2; i < udc->max_eps * 2; i++) {
1269 ep = &udc->eps[i];
1270 if (i % 2) {
1271 snprintf(name, sizeof(name), "ep%din", i / 2);
1272 ep->direction = EP_DIR_IN;
1273 } else {
1274 snprintf(name, sizeof(name), "ep%dout", i / 2);
1275 ep->direction = EP_DIR_OUT;
1276 }
1277 ep->udc = udc;
1278 strncpy(ep->name, name, sizeof(ep->name));
1279 ep->ep.name = ep->name;
1280
1281 ep->ep.ops = &mv_ep_ops;
1282 ep->stopped = 0;
1283 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1284 ep->ep_num = i / 2;
1285
1286 INIT_LIST_HEAD(&ep->queue);
1287 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1288
1289 ep->dqh = &udc->ep_dqh[i];
1290 }
1291
1292 return 0;
1293 }
1294
1295 /* delete all endpoint requests, called with spinlock held */
1296 static void nuke(struct mv_ep *ep, int status)
1297 {
1298 /* called with spinlock held */
1299 ep->stopped = 1;
1300
1301 /* endpoint fifo flush */
1302 mv_ep_fifo_flush(&ep->ep);
1303
1304 while (!list_empty(&ep->queue)) {
1305 struct mv_req *req = NULL;
1306 req = list_entry(ep->queue.next, struct mv_req, queue);
1307 done(ep, req, status);
1308 }
1309 }
1310
1311 static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1312 {
1313 struct mv_ep *ep;
1314
1315 nuke(&udc->eps[0], -ESHUTDOWN);
1316
1317 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1318 nuke(ep, -ESHUTDOWN);
1319 }
1320
1321 /* report reset; the driver is already quiesced */
1322 if (driver) {
1323 spin_unlock(&udc->lock);
1324 usb_gadget_udc_reset(&udc->gadget, driver);
1325 spin_lock(&udc->lock);
1326 }
1327 }
1328 /* stop all USB activities */
1329 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1330 {
1331 struct mv_ep *ep;
1332
1333 nuke(&udc->eps[0], -ESHUTDOWN);
1334
1335 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1336 nuke(ep, -ESHUTDOWN);
1337 }
1338
1339 /* report disconnect; the driver is already quiesced */
1340 if (driver) {
1341 spin_unlock(&udc->lock);
1342 driver->disconnect(&udc->gadget);
1343 spin_lock(&udc->lock);
1344 }
1345 }
1346
1347 static int mv_udc_start(struct usb_gadget *gadget,
1348 struct usb_gadget_driver *driver)
1349 {
1350 struct mv_udc *udc;
1351 int retval = 0;
1352 unsigned long flags;
1353
1354 udc = container_of(gadget, struct mv_udc, gadget);
1355
1356 if (udc->driver)
1357 return -EBUSY;
1358
1359 spin_lock_irqsave(&udc->lock, flags);
1360
1361 /* hook up the driver ... */
1362 driver->driver.bus = NULL;
1363 udc->driver = driver;
1364
1365 udc->usb_state = USB_STATE_ATTACHED;
1366 udc->ep0_state = WAIT_FOR_SETUP;
1367 udc->ep0_dir = EP_DIR_OUT;
1368
1369 spin_unlock_irqrestore(&udc->lock, flags);
1370
1371 if (udc->transceiver) {
1372 retval = otg_set_peripheral(udc->transceiver->otg,
1373 &udc->gadget);
1374 if (retval) {
1375 dev_err(&udc->dev->dev,
1376 "unable to register peripheral to otg\n");
1377 udc->driver = NULL;
1378 return retval;
1379 }
1380 }
1381
1382 /* When boot with cable attached, there will be no vbus irq occurred */
1383 if (udc->qwork)
1384 queue_work(udc->qwork, &udc->vbus_work);
1385
1386 return 0;
1387 }
1388
1389 static int mv_udc_stop(struct usb_gadget *gadget)
1390 {
1391 struct mv_udc *udc;
1392 unsigned long flags;
1393
1394 udc = container_of(gadget, struct mv_udc, gadget);
1395
1396 spin_lock_irqsave(&udc->lock, flags);
1397
1398 mv_udc_enable(udc);
1399 udc_stop(udc);
1400
1401 /* stop all usb activities */
1402 udc->gadget.speed = USB_SPEED_UNKNOWN;
1403 stop_activity(udc, NULL);
1404 mv_udc_disable(udc);
1405
1406 spin_unlock_irqrestore(&udc->lock, flags);
1407
1408 /* unbind gadget driver */
1409 udc->driver = NULL;
1410
1411 return 0;
1412 }
1413
1414 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1415 {
1416 u32 portsc;
1417
1418 portsc = readl(&udc->op_regs->portsc[0]);
1419 portsc |= mode << 16;
1420 writel(portsc, &udc->op_regs->portsc[0]);
1421 }
1422
1423 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1424 {
1425 struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1426 struct mv_req *req = container_of(_req, struct mv_req, req);
1427 struct mv_udc *udc;
1428 unsigned long flags;
1429
1430 udc = mvep->udc;
1431
1432 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1433
1434 spin_lock_irqsave(&udc->lock, flags);
1435 if (req->test_mode) {
1436 mv_set_ptc(udc, req->test_mode);
1437 req->test_mode = 0;
1438 }
1439 spin_unlock_irqrestore(&udc->lock, flags);
1440 }
1441
1442 static int
1443 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1444 {
1445 int retval = 0;
1446 struct mv_req *req;
1447 struct mv_ep *ep;
1448
1449 ep = &udc->eps[0];
1450 udc->ep0_dir = direction;
1451 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1452
1453 req = udc->status_req;
1454
1455 /* fill in the reqest structure */
1456 if (empty == false) {
1457 *((u16 *) req->req.buf) = cpu_to_le16(status);
1458 req->req.length = 2;
1459 } else
1460 req->req.length = 0;
1461
1462 req->ep = ep;
1463 req->req.status = -EINPROGRESS;
1464 req->req.actual = 0;
1465 if (udc->test_mode) {
1466 req->req.complete = prime_status_complete;
1467 req->test_mode = udc->test_mode;
1468 udc->test_mode = 0;
1469 } else
1470 req->req.complete = NULL;
1471 req->dtd_count = 0;
1472
1473 if (req->req.dma == DMA_ADDR_INVALID) {
1474 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1475 req->req.buf, req->req.length,
1476 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1477 req->mapped = 1;
1478 }
1479
1480 /* prime the data phase */
1481 if (!req_to_dtd(req)) {
1482 retval = queue_dtd(ep, req);
1483 if (retval) {
1484 dev_err(&udc->dev->dev,
1485 "Failed to queue dtd when prime status\n");
1486 goto out;
1487 }
1488 } else{ /* no mem */
1489 retval = -ENOMEM;
1490 dev_err(&udc->dev->dev,
1491 "Failed to dma_pool_alloc when prime status\n");
1492 goto out;
1493 }
1494
1495 list_add_tail(&req->queue, &ep->queue);
1496
1497 return 0;
1498 out:
1499 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1500
1501 return retval;
1502 }
1503
1504 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1505 {
1506 if (index <= TEST_FORCE_EN) {
1507 udc->test_mode = index;
1508 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1509 ep0_stall(udc);
1510 } else
1511 dev_err(&udc->dev->dev,
1512 "This test mode(%d) is not supported\n", index);
1513 }
1514
1515 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1516 {
1517 udc->dev_addr = (u8)setup->wValue;
1518
1519 /* update usb state */
1520 udc->usb_state = USB_STATE_ADDRESS;
1521
1522 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1523 ep0_stall(udc);
1524 }
1525
1526 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1527 struct usb_ctrlrequest *setup)
1528 {
1529 u16 status = 0;
1530 int retval;
1531
1532 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1533 != (USB_DIR_IN | USB_TYPE_STANDARD))
1534 return;
1535
1536 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1537 status = 1 << USB_DEVICE_SELF_POWERED;
1538 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1539 } else if ((setup->bRequestType & USB_RECIP_MASK)
1540 == USB_RECIP_INTERFACE) {
1541 /* get interface status */
1542 status = 0;
1543 } else if ((setup->bRequestType & USB_RECIP_MASK)
1544 == USB_RECIP_ENDPOINT) {
1545 u8 ep_num, direction;
1546
1547 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1548 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1549 ? EP_DIR_IN : EP_DIR_OUT;
1550 status = ep_is_stall(udc, ep_num, direction)
1551 << USB_ENDPOINT_HALT;
1552 }
1553
1554 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1555 if (retval)
1556 ep0_stall(udc);
1557 else
1558 udc->ep0_state = DATA_STATE_XMIT;
1559 }
1560
1561 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1562 {
1563 u8 ep_num;
1564 u8 direction;
1565 struct mv_ep *ep;
1566
1567 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1568 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1569 switch (setup->wValue) {
1570 case USB_DEVICE_REMOTE_WAKEUP:
1571 udc->remote_wakeup = 0;
1572 break;
1573 default:
1574 goto out;
1575 }
1576 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1577 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1578 switch (setup->wValue) {
1579 case USB_ENDPOINT_HALT:
1580 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1581 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1582 ? EP_DIR_IN : EP_DIR_OUT;
1583 if (setup->wValue != 0 || setup->wLength != 0
1584 || ep_num > udc->max_eps)
1585 goto out;
1586 ep = &udc->eps[ep_num * 2 + direction];
1587 if (ep->wedge == 1)
1588 break;
1589 spin_unlock(&udc->lock);
1590 ep_set_stall(udc, ep_num, direction, 0);
1591 spin_lock(&udc->lock);
1592 break;
1593 default:
1594 goto out;
1595 }
1596 } else
1597 goto out;
1598
1599 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1600 ep0_stall(udc);
1601 out:
1602 return;
1603 }
1604
1605 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1606 {
1607 u8 ep_num;
1608 u8 direction;
1609
1610 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1611 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1612 switch (setup->wValue) {
1613 case USB_DEVICE_REMOTE_WAKEUP:
1614 udc->remote_wakeup = 1;
1615 break;
1616 case USB_DEVICE_TEST_MODE:
1617 if (setup->wIndex & 0xFF
1618 || udc->gadget.speed != USB_SPEED_HIGH)
1619 ep0_stall(udc);
1620
1621 if (udc->usb_state != USB_STATE_CONFIGURED
1622 && udc->usb_state != USB_STATE_ADDRESS
1623 && udc->usb_state != USB_STATE_DEFAULT)
1624 ep0_stall(udc);
1625
1626 mv_udc_testmode(udc, (setup->wIndex >> 8));
1627 goto out;
1628 default:
1629 goto out;
1630 }
1631 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1632 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1633 switch (setup->wValue) {
1634 case USB_ENDPOINT_HALT:
1635 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1636 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1637 ? EP_DIR_IN : EP_DIR_OUT;
1638 if (setup->wValue != 0 || setup->wLength != 0
1639 || ep_num > udc->max_eps)
1640 goto out;
1641 spin_unlock(&udc->lock);
1642 ep_set_stall(udc, ep_num, direction, 1);
1643 spin_lock(&udc->lock);
1644 break;
1645 default:
1646 goto out;
1647 }
1648 } else
1649 goto out;
1650
1651 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1652 ep0_stall(udc);
1653 out:
1654 return;
1655 }
1656
1657 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1658 struct usb_ctrlrequest *setup)
1659 __releases(&ep->udc->lock)
1660 __acquires(&ep->udc->lock)
1661 {
1662 bool delegate = false;
1663
1664 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1665
1666 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1667 setup->bRequestType, setup->bRequest,
1668 setup->wValue, setup->wIndex, setup->wLength);
1669 /* We process some standard setup requests here */
1670 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1671 switch (setup->bRequest) {
1672 case USB_REQ_GET_STATUS:
1673 ch9getstatus(udc, ep_num, setup);
1674 break;
1675
1676 case USB_REQ_SET_ADDRESS:
1677 ch9setaddress(udc, setup);
1678 break;
1679
1680 case USB_REQ_CLEAR_FEATURE:
1681 ch9clearfeature(udc, setup);
1682 break;
1683
1684 case USB_REQ_SET_FEATURE:
1685 ch9setfeature(udc, setup);
1686 break;
1687
1688 default:
1689 delegate = true;
1690 }
1691 } else
1692 delegate = true;
1693
1694 /* delegate USB standard requests to the gadget driver */
1695 if (delegate == true) {
1696 /* USB requests handled by gadget */
1697 if (setup->wLength) {
1698 /* DATA phase from gadget, STATUS phase from udc */
1699 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1700 ? EP_DIR_IN : EP_DIR_OUT;
1701 spin_unlock(&udc->lock);
1702 if (udc->driver->setup(&udc->gadget,
1703 &udc->local_setup_buff) < 0)
1704 ep0_stall(udc);
1705 spin_lock(&udc->lock);
1706 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1707 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1708 } else {
1709 /* no DATA phase, IN STATUS phase from gadget */
1710 udc->ep0_dir = EP_DIR_IN;
1711 spin_unlock(&udc->lock);
1712 if (udc->driver->setup(&udc->gadget,
1713 &udc->local_setup_buff) < 0)
1714 ep0_stall(udc);
1715 spin_lock(&udc->lock);
1716 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1717 }
1718 }
1719 }
1720
1721 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
1722 static void ep0_req_complete(struct mv_udc *udc,
1723 struct mv_ep *ep0, struct mv_req *req)
1724 {
1725 u32 new_addr;
1726
1727 if (udc->usb_state == USB_STATE_ADDRESS) {
1728 /* set the new address */
1729 new_addr = (u32)udc->dev_addr;
1730 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1731 &udc->op_regs->deviceaddr);
1732 }
1733
1734 done(ep0, req, 0);
1735
1736 switch (udc->ep0_state) {
1737 case DATA_STATE_XMIT:
1738 /* receive status phase */
1739 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1740 ep0_stall(udc);
1741 break;
1742 case DATA_STATE_RECV:
1743 /* send status phase */
1744 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1745 ep0_stall(udc);
1746 break;
1747 case WAIT_FOR_OUT_STATUS:
1748 udc->ep0_state = WAIT_FOR_SETUP;
1749 break;
1750 case WAIT_FOR_SETUP:
1751 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1752 break;
1753 default:
1754 ep0_stall(udc);
1755 break;
1756 }
1757 }
1758
1759 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1760 {
1761 u32 temp;
1762 struct mv_dqh *dqh;
1763
1764 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1765
1766 /* Clear bit in ENDPTSETUPSTAT */
1767 writel((1 << ep_num), &udc->op_regs->epsetupstat);
1768
1769 /* while a hazard exists when setup package arrives */
1770 do {
1771 /* Set Setup Tripwire */
1772 temp = readl(&udc->op_regs->usbcmd);
1773 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1774
1775 /* Copy the setup packet to local buffer */
1776 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1777 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1778
1779 /* Clear Setup Tripwire */
1780 temp = readl(&udc->op_regs->usbcmd);
1781 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1782 }
1783
1784 static void irq_process_tr_complete(struct mv_udc *udc)
1785 {
1786 u32 tmp, bit_pos;
1787 int i, ep_num = 0, direction = 0;
1788 struct mv_ep *curr_ep;
1789 struct mv_req *curr_req, *temp_req;
1790 int status;
1791
1792 /*
1793 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1794 * because the setup packets are to be read ASAP
1795 */
1796
1797 /* Process all Setup packet received interrupts */
1798 tmp = readl(&udc->op_regs->epsetupstat);
1799
1800 if (tmp) {
1801 for (i = 0; i < udc->max_eps; i++) {
1802 if (tmp & (1 << i)) {
1803 get_setup_data(udc, i,
1804 (u8 *)(&udc->local_setup_buff));
1805 handle_setup_packet(udc, i,
1806 &udc->local_setup_buff);
1807 }
1808 }
1809 }
1810
1811 /* Don't clear the endpoint setup status register here.
1812 * It is cleared as a setup packet is read out of the buffer
1813 */
1814
1815 /* Process non-setup transaction complete interrupts */
1816 tmp = readl(&udc->op_regs->epcomplete);
1817
1818 if (!tmp)
1819 return;
1820
1821 writel(tmp, &udc->op_regs->epcomplete);
1822
1823 for (i = 0; i < udc->max_eps * 2; i++) {
1824 ep_num = i >> 1;
1825 direction = i % 2;
1826
1827 bit_pos = 1 << (ep_num + 16 * direction);
1828
1829 if (!(bit_pos & tmp))
1830 continue;
1831
1832 if (i == 1)
1833 curr_ep = &udc->eps[0];
1834 else
1835 curr_ep = &udc->eps[i];
1836 /* process the req queue until an uncomplete request */
1837 list_for_each_entry_safe(curr_req, temp_req,
1838 &curr_ep->queue, queue) {
1839 status = process_ep_req(udc, i, curr_req);
1840 if (status)
1841 break;
1842
1843 /* write back status to req */
1844 curr_req->req.status = status;
1845
1846 /* ep0 request completion */
1847 if (ep_num == 0) {
1848 ep0_req_complete(udc, curr_ep, curr_req);
1849 break;
1850 } else {
1851 done(curr_ep, curr_req, status);
1852 }
1853 }
1854 }
1855 }
1856
1857 static void irq_process_reset(struct mv_udc *udc)
1858 {
1859 u32 tmp;
1860 unsigned int loops;
1861
1862 udc->ep0_dir = EP_DIR_OUT;
1863 udc->ep0_state = WAIT_FOR_SETUP;
1864 udc->remote_wakeup = 0; /* default to 0 on reset */
1865
1866 /* The address bits are past bit 25-31. Set the address */
1867 tmp = readl(&udc->op_regs->deviceaddr);
1868 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1869 writel(tmp, &udc->op_regs->deviceaddr);
1870
1871 /* Clear all the setup token semaphores */
1872 tmp = readl(&udc->op_regs->epsetupstat);
1873 writel(tmp, &udc->op_regs->epsetupstat);
1874
1875 /* Clear all the endpoint complete status bits */
1876 tmp = readl(&udc->op_regs->epcomplete);
1877 writel(tmp, &udc->op_regs->epcomplete);
1878
1879 /* wait until all endptprime bits cleared */
1880 loops = LOOPS(PRIME_TIMEOUT);
1881 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1882 if (loops == 0) {
1883 dev_err(&udc->dev->dev,
1884 "Timeout for ENDPTPRIME = 0x%x\n",
1885 readl(&udc->op_regs->epprime));
1886 break;
1887 }
1888 loops--;
1889 udelay(LOOPS_USEC);
1890 }
1891
1892 /* Write 1s to the Flush register */
1893 writel((u32)~0, &udc->op_regs->epflush);
1894
1895 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1896 dev_info(&udc->dev->dev, "usb bus reset\n");
1897 udc->usb_state = USB_STATE_DEFAULT;
1898 /* reset all the queues, stop all USB activities */
1899 gadget_reset(udc, udc->driver);
1900 } else {
1901 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1902 readl(&udc->op_regs->portsc));
1903
1904 /*
1905 * re-initialize
1906 * controller reset
1907 */
1908 udc_reset(udc);
1909
1910 /* reset all the queues, stop all USB activities */
1911 stop_activity(udc, udc->driver);
1912
1913 /* reset ep0 dQH and endptctrl */
1914 ep0_reset(udc);
1915
1916 /* enable interrupt and set controller to run state */
1917 udc_start(udc);
1918
1919 udc->usb_state = USB_STATE_ATTACHED;
1920 }
1921 }
1922
1923 static void handle_bus_resume(struct mv_udc *udc)
1924 {
1925 udc->usb_state = udc->resume_state;
1926 udc->resume_state = 0;
1927
1928 /* report resume to the driver */
1929 if (udc->driver) {
1930 if (udc->driver->resume) {
1931 spin_unlock(&udc->lock);
1932 udc->driver->resume(&udc->gadget);
1933 spin_lock(&udc->lock);
1934 }
1935 }
1936 }
1937
1938 static void irq_process_suspend(struct mv_udc *udc)
1939 {
1940 udc->resume_state = udc->usb_state;
1941 udc->usb_state = USB_STATE_SUSPENDED;
1942
1943 if (udc->driver->suspend) {
1944 spin_unlock(&udc->lock);
1945 udc->driver->suspend(&udc->gadget);
1946 spin_lock(&udc->lock);
1947 }
1948 }
1949
1950 static void irq_process_port_change(struct mv_udc *udc)
1951 {
1952 u32 portsc;
1953
1954 portsc = readl(&udc->op_regs->portsc[0]);
1955 if (!(portsc & PORTSCX_PORT_RESET)) {
1956 /* Get the speed */
1957 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1958 switch (speed) {
1959 case PORTSCX_PORT_SPEED_HIGH:
1960 udc->gadget.speed = USB_SPEED_HIGH;
1961 break;
1962 case PORTSCX_PORT_SPEED_FULL:
1963 udc->gadget.speed = USB_SPEED_FULL;
1964 break;
1965 case PORTSCX_PORT_SPEED_LOW:
1966 udc->gadget.speed = USB_SPEED_LOW;
1967 break;
1968 default:
1969 udc->gadget.speed = USB_SPEED_UNKNOWN;
1970 break;
1971 }
1972 }
1973
1974 if (portsc & PORTSCX_PORT_SUSPEND) {
1975 udc->resume_state = udc->usb_state;
1976 udc->usb_state = USB_STATE_SUSPENDED;
1977 if (udc->driver->suspend) {
1978 spin_unlock(&udc->lock);
1979 udc->driver->suspend(&udc->gadget);
1980 spin_lock(&udc->lock);
1981 }
1982 }
1983
1984 if (!(portsc & PORTSCX_PORT_SUSPEND)
1985 && udc->usb_state == USB_STATE_SUSPENDED) {
1986 handle_bus_resume(udc);
1987 }
1988
1989 if (!udc->resume_state)
1990 udc->usb_state = USB_STATE_DEFAULT;
1991 }
1992
1993 static void irq_process_error(struct mv_udc *udc)
1994 {
1995 /* Increment the error count */
1996 udc->errors++;
1997 }
1998
1999 static irqreturn_t mv_udc_irq(int irq, void *dev)
2000 {
2001 struct mv_udc *udc = (struct mv_udc *)dev;
2002 u32 status, intr;
2003
2004 /* Disable ISR when stopped bit is set */
2005 if (udc->stopped)
2006 return IRQ_NONE;
2007
2008 spin_lock(&udc->lock);
2009
2010 status = readl(&udc->op_regs->usbsts);
2011 intr = readl(&udc->op_regs->usbintr);
2012 status &= intr;
2013
2014 if (status == 0) {
2015 spin_unlock(&udc->lock);
2016 return IRQ_NONE;
2017 }
2018
2019 /* Clear all the interrupts occurred */
2020 writel(status, &udc->op_regs->usbsts);
2021
2022 if (status & USBSTS_ERR)
2023 irq_process_error(udc);
2024
2025 if (status & USBSTS_RESET)
2026 irq_process_reset(udc);
2027
2028 if (status & USBSTS_PORT_CHANGE)
2029 irq_process_port_change(udc);
2030
2031 if (status & USBSTS_INT)
2032 irq_process_tr_complete(udc);
2033
2034 if (status & USBSTS_SUSPEND)
2035 irq_process_suspend(udc);
2036
2037 spin_unlock(&udc->lock);
2038
2039 return IRQ_HANDLED;
2040 }
2041
2042 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2043 {
2044 struct mv_udc *udc = (struct mv_udc *)dev;
2045
2046 /* polling VBUS and init phy may cause too much time*/
2047 if (udc->qwork)
2048 queue_work(udc->qwork, &udc->vbus_work);
2049
2050 return IRQ_HANDLED;
2051 }
2052
2053 static void mv_udc_vbus_work(struct work_struct *work)
2054 {
2055 struct mv_udc *udc;
2056 unsigned int vbus;
2057
2058 udc = container_of(work, struct mv_udc, vbus_work);
2059 if (!udc->pdata->vbus)
2060 return;
2061
2062 vbus = udc->pdata->vbus->poll();
2063 dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2064
2065 if (vbus == VBUS_HIGH)
2066 mv_udc_vbus_session(&udc->gadget, 1);
2067 else if (vbus == VBUS_LOW)
2068 mv_udc_vbus_session(&udc->gadget, 0);
2069 }
2070
2071 /* release device structure */
2072 static void gadget_release(struct device *_dev)
2073 {
2074 struct mv_udc *udc;
2075
2076 udc = dev_get_drvdata(_dev);
2077
2078 complete(udc->done);
2079 }
2080
2081 static int mv_udc_remove(struct platform_device *pdev)
2082 {
2083 struct mv_udc *udc;
2084
2085 udc = platform_get_drvdata(pdev);
2086
2087 usb_del_gadget_udc(&udc->gadget);
2088
2089 if (udc->qwork) {
2090 flush_workqueue(udc->qwork);
2091 destroy_workqueue(udc->qwork);
2092 }
2093
2094 /* free memory allocated in probe */
2095 if (udc->dtd_pool)
2096 dma_pool_destroy(udc->dtd_pool);
2097
2098 if (udc->ep_dqh)
2099 dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2100 udc->ep_dqh, udc->ep_dqh_dma);
2101
2102 mv_udc_disable(udc);
2103
2104 /* free dev, wait for the release() finished */
2105 wait_for_completion(udc->done);
2106
2107 return 0;
2108 }
2109
2110 static int mv_udc_probe(struct platform_device *pdev)
2111 {
2112 struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2113 struct mv_udc *udc;
2114 int retval = 0;
2115 struct resource *r;
2116 size_t size;
2117
2118 if (pdata == NULL) {
2119 dev_err(&pdev->dev, "missing platform_data\n");
2120 return -ENODEV;
2121 }
2122
2123 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2124 if (udc == NULL)
2125 return -ENOMEM;
2126
2127 udc->done = &release_done;
2128 udc->pdata = dev_get_platdata(&pdev->dev);
2129 spin_lock_init(&udc->lock);
2130
2131 udc->dev = pdev;
2132
2133 if (pdata->mode == MV_USB_MODE_OTG) {
2134 udc->transceiver = devm_usb_get_phy(&pdev->dev,
2135 USB_PHY_TYPE_USB2);
2136 if (IS_ERR(udc->transceiver)) {
2137 retval = PTR_ERR(udc->transceiver);
2138
2139 if (retval == -ENXIO)
2140 return retval;
2141
2142 udc->transceiver = NULL;
2143 return -EPROBE_DEFER;
2144 }
2145 }
2146
2147 /* udc only have one sysclk. */
2148 udc->clk = devm_clk_get(&pdev->dev, NULL);
2149 if (IS_ERR(udc->clk))
2150 return PTR_ERR(udc->clk);
2151
2152 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2153 if (r == NULL) {
2154 dev_err(&pdev->dev, "no I/O memory resource defined\n");
2155 return -ENODEV;
2156 }
2157
2158 udc->cap_regs = (struct mv_cap_regs __iomem *)
2159 devm_ioremap(&pdev->dev, r->start, resource_size(r));
2160 if (udc->cap_regs == NULL) {
2161 dev_err(&pdev->dev, "failed to map I/O memory\n");
2162 return -EBUSY;
2163 }
2164
2165 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2166 if (r == NULL) {
2167 dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2168 return -ENODEV;
2169 }
2170
2171 udc->phy_regs = ioremap(r->start, resource_size(r));
2172 if (udc->phy_regs == NULL) {
2173 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2174 return -EBUSY;
2175 }
2176
2177 /* we will acces controller register, so enable the clk */
2178 retval = mv_udc_enable_internal(udc);
2179 if (retval)
2180 return retval;
2181
2182 udc->op_regs =
2183 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2184 + (readl(&udc->cap_regs->caplength_hciversion)
2185 & CAPLENGTH_MASK));
2186 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2187
2188 /*
2189 * some platform will use usb to download image, it may not disconnect
2190 * usb gadget before loading kernel. So first stop udc here.
2191 */
2192 udc_stop(udc);
2193 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2194
2195 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2196 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2197 udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2198 &udc->ep_dqh_dma, GFP_KERNEL);
2199
2200 if (udc->ep_dqh == NULL) {
2201 dev_err(&pdev->dev, "allocate dQH memory failed\n");
2202 retval = -ENOMEM;
2203 goto err_disable_clock;
2204 }
2205 udc->ep_dqh_size = size;
2206
2207 /* create dTD dma_pool resource */
2208 udc->dtd_pool = dma_pool_create("mv_dtd",
2209 &pdev->dev,
2210 sizeof(struct mv_dtd),
2211 DTD_ALIGNMENT,
2212 DMA_BOUNDARY);
2213
2214 if (!udc->dtd_pool) {
2215 retval = -ENOMEM;
2216 goto err_free_dma;
2217 }
2218
2219 size = udc->max_eps * sizeof(struct mv_ep) *2;
2220 udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2221 if (udc->eps == NULL) {
2222 retval = -ENOMEM;
2223 goto err_destroy_dma;
2224 }
2225
2226 /* initialize ep0 status request structure */
2227 udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2228 GFP_KERNEL);
2229 if (!udc->status_req) {
2230 retval = -ENOMEM;
2231 goto err_destroy_dma;
2232 }
2233 INIT_LIST_HEAD(&udc->status_req->queue);
2234
2235 /* allocate a small amount of memory to get valid address */
2236 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2237 udc->status_req->req.dma = DMA_ADDR_INVALID;
2238
2239 udc->resume_state = USB_STATE_NOTATTACHED;
2240 udc->usb_state = USB_STATE_POWERED;
2241 udc->ep0_dir = EP_DIR_OUT;
2242 udc->remote_wakeup = 0;
2243
2244 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2245 if (r == NULL) {
2246 dev_err(&pdev->dev, "no IRQ resource defined\n");
2247 retval = -ENODEV;
2248 goto err_destroy_dma;
2249 }
2250 udc->irq = r->start;
2251 if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2252 IRQF_SHARED, driver_name, udc)) {
2253 dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2254 udc->irq);
2255 retval = -ENODEV;
2256 goto err_destroy_dma;
2257 }
2258
2259 /* initialize gadget structure */
2260 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2261 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2262 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2263 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2264 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
2265
2266 /* the "gadget" abstracts/virtualizes the controller */
2267 udc->gadget.name = driver_name; /* gadget name */
2268
2269 eps_init(udc);
2270
2271 /* VBUS detect: we can disable/enable clock on demand.*/
2272 if (udc->transceiver)
2273 udc->clock_gating = 1;
2274 else if (pdata->vbus) {
2275 udc->clock_gating = 1;
2276 retval = devm_request_threaded_irq(&pdev->dev,
2277 pdata->vbus->irq, NULL,
2278 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2279 if (retval) {
2280 dev_info(&pdev->dev,
2281 "Can not request irq for VBUS, "
2282 "disable clock gating\n");
2283 udc->clock_gating = 0;
2284 }
2285
2286 udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2287 if (!udc->qwork) {
2288 dev_err(&pdev->dev, "cannot create workqueue\n");
2289 retval = -ENOMEM;
2290 goto err_destroy_dma;
2291 }
2292
2293 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2294 }
2295
2296 /*
2297 * When clock gating is supported, we can disable clk and phy.
2298 * If not, it means that VBUS detection is not supported, we
2299 * have to enable vbus active all the time to let controller work.
2300 */
2301 if (udc->clock_gating)
2302 mv_udc_disable_internal(udc);
2303 else
2304 udc->vbus_active = 1;
2305
2306 retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2307 gadget_release);
2308 if (retval)
2309 goto err_create_workqueue;
2310
2311 platform_set_drvdata(pdev, udc);
2312 dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2313 udc->clock_gating ? "with" : "without");
2314
2315 return 0;
2316
2317 err_create_workqueue:
2318 destroy_workqueue(udc->qwork);
2319 err_destroy_dma:
2320 dma_pool_destroy(udc->dtd_pool);
2321 err_free_dma:
2322 dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2323 udc->ep_dqh, udc->ep_dqh_dma);
2324 err_disable_clock:
2325 mv_udc_disable_internal(udc);
2326
2327 return retval;
2328 }
2329
2330 #ifdef CONFIG_PM
2331 static int mv_udc_suspend(struct device *dev)
2332 {
2333 struct mv_udc *udc;
2334
2335 udc = dev_get_drvdata(dev);
2336
2337 /* if OTG is enabled, the following will be done in OTG driver*/
2338 if (udc->transceiver)
2339 return 0;
2340
2341 if (udc->pdata->vbus && udc->pdata->vbus->poll)
2342 if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2343 dev_info(&udc->dev->dev, "USB cable is connected!\n");
2344 return -EAGAIN;
2345 }
2346
2347 /*
2348 * only cable is unplugged, udc can suspend.
2349 * So do not care about clock_gating == 1.
2350 */
2351 if (!udc->clock_gating) {
2352 udc_stop(udc);
2353
2354 spin_lock_irq(&udc->lock);
2355 /* stop all usb activities */
2356 stop_activity(udc, udc->driver);
2357 spin_unlock_irq(&udc->lock);
2358
2359 mv_udc_disable_internal(udc);
2360 }
2361
2362 return 0;
2363 }
2364
2365 static int mv_udc_resume(struct device *dev)
2366 {
2367 struct mv_udc *udc;
2368 int retval;
2369
2370 udc = dev_get_drvdata(dev);
2371
2372 /* if OTG is enabled, the following will be done in OTG driver*/
2373 if (udc->transceiver)
2374 return 0;
2375
2376 if (!udc->clock_gating) {
2377 retval = mv_udc_enable_internal(udc);
2378 if (retval)
2379 return retval;
2380
2381 if (udc->driver && udc->softconnect) {
2382 udc_reset(udc);
2383 ep0_reset(udc);
2384 udc_start(udc);
2385 }
2386 }
2387
2388 return 0;
2389 }
2390
2391 static const struct dev_pm_ops mv_udc_pm_ops = {
2392 .suspend = mv_udc_suspend,
2393 .resume = mv_udc_resume,
2394 };
2395 #endif
2396
2397 static void mv_udc_shutdown(struct platform_device *pdev)
2398 {
2399 struct mv_udc *udc;
2400 u32 mode;
2401
2402 udc = platform_get_drvdata(pdev);
2403 /* reset controller mode to IDLE */
2404 mv_udc_enable(udc);
2405 mode = readl(&udc->op_regs->usbmode);
2406 mode &= ~3;
2407 writel(mode, &udc->op_regs->usbmode);
2408 mv_udc_disable(udc);
2409 }
2410
2411 static struct platform_driver udc_driver = {
2412 .probe = mv_udc_probe,
2413 .remove = mv_udc_remove,
2414 .shutdown = mv_udc_shutdown,
2415 .driver = {
2416 .name = "mv-udc",
2417 #ifdef CONFIG_PM
2418 .pm = &mv_udc_pm_ops,
2419 #endif
2420 },
2421 };
2422
2423 module_platform_driver(udc_driver);
2424 MODULE_ALIAS("platform:mv-udc");
2425 MODULE_DESCRIPTION(DRIVER_DESC);
2426 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2427 MODULE_VERSION(DRIVER_VERSION);
2428 MODULE_LICENSE("GPL");
2429
2430
2431
2432
2433
2434 /* LDV_COMMENT_BEGIN_MAIN */
2435 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2436
2437 /*###########################################################################*/
2438
2439 /*############## Driver Environment Generator 0.2 output ####################*/
2440
2441 /*###########################################################################*/
2442
2443
2444
2445 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2446 void ldv_check_final_state(void);
2447
2448 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2449 void ldv_check_return_value(int res);
2450
2451 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2452 void ldv_check_return_value_probe(int res);
2453
2454 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2455 void ldv_initialize(void);
2456
2457 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2458 void ldv_handler_precall(void);
2459
2460 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2461 int nondet_int(void);
2462
2463 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2464 int LDV_IN_INTERRUPT;
2465
2466 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2467 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2468
2469
2470
2471 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2472 /*============================= VARIABLE DECLARATION PART =============================*/
2473 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
2474 /* content: static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
2475 /* LDV_COMMENT_BEGIN_PREP */
2476 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2477 #define DRIVER_VERSION "8 Nov 2010"
2478 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2479 ((ep)->udc->ep0_dir) : ((ep)->direction))
2480 #define RESET_TIMEOUT 10000
2481 #define FLUSH_TIMEOUT 10000
2482 #define EPSTATUS_TIMEOUT 10000
2483 #define PRIME_TIMEOUT 10000
2484 #define READSAFE_TIMEOUT 1000
2485 #define LOOPS_USEC_SHIFT 1
2486 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2487 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2488 /* LDV_COMMENT_END_PREP */
2489 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_enable" */
2490 struct usb_ep * var_group1;
2491 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_enable" */
2492 const struct usb_endpoint_descriptor * var_mv_ep_enable_6_p1;
2493 /* LDV_COMMENT_BEGIN_PREP */
2494 #ifdef CONFIG_PM
2495 #endif
2496 #ifdef CONFIG_PM
2497 #endif
2498 /* LDV_COMMENT_END_PREP */
2499 /* content: static int mv_ep_disable(struct usb_ep *_ep)*/
2500 /* LDV_COMMENT_BEGIN_PREP */
2501 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2502 #define DRIVER_VERSION "8 Nov 2010"
2503 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2504 ((ep)->udc->ep0_dir) : ((ep)->direction))
2505 #define RESET_TIMEOUT 10000
2506 #define FLUSH_TIMEOUT 10000
2507 #define EPSTATUS_TIMEOUT 10000
2508 #define PRIME_TIMEOUT 10000
2509 #define READSAFE_TIMEOUT 1000
2510 #define LOOPS_USEC_SHIFT 1
2511 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2512 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2513 /* LDV_COMMENT_END_PREP */
2514 /* LDV_COMMENT_BEGIN_PREP */
2515 #ifdef CONFIG_PM
2516 #endif
2517 #ifdef CONFIG_PM
2518 #endif
2519 /* LDV_COMMENT_END_PREP */
2520 /* content: static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
2521 /* LDV_COMMENT_BEGIN_PREP */
2522 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2523 #define DRIVER_VERSION "8 Nov 2010"
2524 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2525 ((ep)->udc->ep0_dir) : ((ep)->direction))
2526 #define RESET_TIMEOUT 10000
2527 #define FLUSH_TIMEOUT 10000
2528 #define EPSTATUS_TIMEOUT 10000
2529 #define PRIME_TIMEOUT 10000
2530 #define READSAFE_TIMEOUT 1000
2531 #define LOOPS_USEC_SHIFT 1
2532 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2533 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2534 /* LDV_COMMENT_END_PREP */
2535 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_alloc_request" */
2536 gfp_t var_mv_alloc_request_8_p1;
2537 /* LDV_COMMENT_BEGIN_PREP */
2538 #ifdef CONFIG_PM
2539 #endif
2540 #ifdef CONFIG_PM
2541 #endif
2542 /* LDV_COMMENT_END_PREP */
2543 /* content: static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
2544 /* LDV_COMMENT_BEGIN_PREP */
2545 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2546 #define DRIVER_VERSION "8 Nov 2010"
2547 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2548 ((ep)->udc->ep0_dir) : ((ep)->direction))
2549 #define RESET_TIMEOUT 10000
2550 #define FLUSH_TIMEOUT 10000
2551 #define EPSTATUS_TIMEOUT 10000
2552 #define PRIME_TIMEOUT 10000
2553 #define READSAFE_TIMEOUT 1000
2554 #define LOOPS_USEC_SHIFT 1
2555 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2556 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2557 /* LDV_COMMENT_END_PREP */
2558 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_free_request" */
2559 struct usb_request * var_group2;
2560 /* LDV_COMMENT_BEGIN_PREP */
2561 #ifdef CONFIG_PM
2562 #endif
2563 #ifdef CONFIG_PM
2564 #endif
2565 /* LDV_COMMENT_END_PREP */
2566 /* content: static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
2567 /* LDV_COMMENT_BEGIN_PREP */
2568 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2569 #define DRIVER_VERSION "8 Nov 2010"
2570 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2571 ((ep)->udc->ep0_dir) : ((ep)->direction))
2572 #define RESET_TIMEOUT 10000
2573 #define FLUSH_TIMEOUT 10000
2574 #define EPSTATUS_TIMEOUT 10000
2575 #define PRIME_TIMEOUT 10000
2576 #define READSAFE_TIMEOUT 1000
2577 #define LOOPS_USEC_SHIFT 1
2578 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2579 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2580 /* LDV_COMMENT_END_PREP */
2581 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_queue" */
2582 gfp_t var_mv_ep_queue_11_p2;
2583 /* LDV_COMMENT_BEGIN_PREP */
2584 #ifdef CONFIG_PM
2585 #endif
2586 #ifdef CONFIG_PM
2587 #endif
2588 /* LDV_COMMENT_END_PREP */
2589 /* content: static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
2590 /* LDV_COMMENT_BEGIN_PREP */
2591 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2592 #define DRIVER_VERSION "8 Nov 2010"
2593 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2594 ((ep)->udc->ep0_dir) : ((ep)->direction))
2595 #define RESET_TIMEOUT 10000
2596 #define FLUSH_TIMEOUT 10000
2597 #define EPSTATUS_TIMEOUT 10000
2598 #define PRIME_TIMEOUT 10000
2599 #define READSAFE_TIMEOUT 1000
2600 #define LOOPS_USEC_SHIFT 1
2601 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2602 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2603 /* LDV_COMMENT_END_PREP */
2604 /* LDV_COMMENT_BEGIN_PREP */
2605 #ifdef CONFIG_PM
2606 #endif
2607 #ifdef CONFIG_PM
2608 #endif
2609 /* LDV_COMMENT_END_PREP */
2610 /* content: static int mv_ep_set_wedge(struct usb_ep *_ep)*/
2611 /* LDV_COMMENT_BEGIN_PREP */
2612 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2613 #define DRIVER_VERSION "8 Nov 2010"
2614 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2615 ((ep)->udc->ep0_dir) : ((ep)->direction))
2616 #define RESET_TIMEOUT 10000
2617 #define FLUSH_TIMEOUT 10000
2618 #define EPSTATUS_TIMEOUT 10000
2619 #define PRIME_TIMEOUT 10000
2620 #define READSAFE_TIMEOUT 1000
2621 #define LOOPS_USEC_SHIFT 1
2622 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2623 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2624 /* LDV_COMMENT_END_PREP */
2625 /* LDV_COMMENT_BEGIN_PREP */
2626 #ifdef CONFIG_PM
2627 #endif
2628 #ifdef CONFIG_PM
2629 #endif
2630 /* LDV_COMMENT_END_PREP */
2631 /* content: static int mv_ep_set_halt(struct usb_ep *_ep, int halt)*/
2632 /* LDV_COMMENT_BEGIN_PREP */
2633 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2634 #define DRIVER_VERSION "8 Nov 2010"
2635 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2636 ((ep)->udc->ep0_dir) : ((ep)->direction))
2637 #define RESET_TIMEOUT 10000
2638 #define FLUSH_TIMEOUT 10000
2639 #define EPSTATUS_TIMEOUT 10000
2640 #define PRIME_TIMEOUT 10000
2641 #define READSAFE_TIMEOUT 1000
2642 #define LOOPS_USEC_SHIFT 1
2643 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2644 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2645 /* LDV_COMMENT_END_PREP */
2646 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_set_halt" */
2647 int var_mv_ep_set_halt_17_p1;
2648 /* LDV_COMMENT_BEGIN_PREP */
2649 #ifdef CONFIG_PM
2650 #endif
2651 #ifdef CONFIG_PM
2652 #endif
2653 /* LDV_COMMENT_END_PREP */
2654 /* content: static void mv_ep_fifo_flush(struct usb_ep *_ep)*/
2655 /* LDV_COMMENT_BEGIN_PREP */
2656 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2657 #define DRIVER_VERSION "8 Nov 2010"
2658 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2659 ((ep)->udc->ep0_dir) : ((ep)->direction))
2660 #define RESET_TIMEOUT 10000
2661 #define FLUSH_TIMEOUT 10000
2662 #define EPSTATUS_TIMEOUT 10000
2663 #define PRIME_TIMEOUT 10000
2664 #define READSAFE_TIMEOUT 1000
2665 #define LOOPS_USEC_SHIFT 1
2666 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2667 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2668 /* LDV_COMMENT_END_PREP */
2669 /* LDV_COMMENT_BEGIN_PREP */
2670 #ifdef CONFIG_PM
2671 #endif
2672 #ifdef CONFIG_PM
2673 #endif
2674 /* LDV_COMMENT_END_PREP */
2675
2676 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
2677 /* content: static int mv_udc_get_frame(struct usb_gadget *gadget)*/
2678 /* LDV_COMMENT_BEGIN_PREP */
2679 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2680 #define DRIVER_VERSION "8 Nov 2010"
2681 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2682 ((ep)->udc->ep0_dir) : ((ep)->direction))
2683 #define RESET_TIMEOUT 10000
2684 #define FLUSH_TIMEOUT 10000
2685 #define EPSTATUS_TIMEOUT 10000
2686 #define PRIME_TIMEOUT 10000
2687 #define READSAFE_TIMEOUT 1000
2688 #define LOOPS_USEC_SHIFT 1
2689 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2690 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2691 /* LDV_COMMENT_END_PREP */
2692 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_get_frame" */
2693 struct usb_gadget * var_group3;
2694 /* LDV_COMMENT_BEGIN_PREP */
2695 #ifdef CONFIG_PM
2696 #endif
2697 #ifdef CONFIG_PM
2698 #endif
2699 /* LDV_COMMENT_END_PREP */
2700 /* content: static int mv_udc_wakeup(struct usb_gadget *gadget)*/
2701 /* LDV_COMMENT_BEGIN_PREP */
2702 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2703 #define DRIVER_VERSION "8 Nov 2010"
2704 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2705 ((ep)->udc->ep0_dir) : ((ep)->direction))
2706 #define RESET_TIMEOUT 10000
2707 #define FLUSH_TIMEOUT 10000
2708 #define EPSTATUS_TIMEOUT 10000
2709 #define PRIME_TIMEOUT 10000
2710 #define READSAFE_TIMEOUT 1000
2711 #define LOOPS_USEC_SHIFT 1
2712 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2713 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2714 /* LDV_COMMENT_END_PREP */
2715 /* LDV_COMMENT_BEGIN_PREP */
2716 #ifdef CONFIG_PM
2717 #endif
2718 #ifdef CONFIG_PM
2719 #endif
2720 /* LDV_COMMENT_END_PREP */
2721 /* content: static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)*/
2722 /* LDV_COMMENT_BEGIN_PREP */
2723 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2724 #define DRIVER_VERSION "8 Nov 2010"
2725 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2726 ((ep)->udc->ep0_dir) : ((ep)->direction))
2727 #define RESET_TIMEOUT 10000
2728 #define FLUSH_TIMEOUT 10000
2729 #define EPSTATUS_TIMEOUT 10000
2730 #define PRIME_TIMEOUT 10000
2731 #define READSAFE_TIMEOUT 1000
2732 #define LOOPS_USEC_SHIFT 1
2733 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2734 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2735 /* LDV_COMMENT_END_PREP */
2736 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_vbus_session" */
2737 int var_mv_udc_vbus_session_30_p1;
2738 /* LDV_COMMENT_BEGIN_PREP */
2739 #ifdef CONFIG_PM
2740 #endif
2741 #ifdef CONFIG_PM
2742 #endif
2743 /* LDV_COMMENT_END_PREP */
2744 /* content: static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)*/
2745 /* LDV_COMMENT_BEGIN_PREP */
2746 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2747 #define DRIVER_VERSION "8 Nov 2010"
2748 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2749 ((ep)->udc->ep0_dir) : ((ep)->direction))
2750 #define RESET_TIMEOUT 10000
2751 #define FLUSH_TIMEOUT 10000
2752 #define EPSTATUS_TIMEOUT 10000
2753 #define PRIME_TIMEOUT 10000
2754 #define READSAFE_TIMEOUT 1000
2755 #define LOOPS_USEC_SHIFT 1
2756 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2757 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2758 /* LDV_COMMENT_END_PREP */
2759 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_pullup" */
2760 int var_mv_udc_pullup_31_p1;
2761 /* LDV_COMMENT_BEGIN_PREP */
2762 #ifdef CONFIG_PM
2763 #endif
2764 #ifdef CONFIG_PM
2765 #endif
2766 /* LDV_COMMENT_END_PREP */
2767 /* content: static int mv_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver)*/
2768 /* LDV_COMMENT_BEGIN_PREP */
2769 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2770 #define DRIVER_VERSION "8 Nov 2010"
2771 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2772 ((ep)->udc->ep0_dir) : ((ep)->direction))
2773 #define RESET_TIMEOUT 10000
2774 #define FLUSH_TIMEOUT 10000
2775 #define EPSTATUS_TIMEOUT 10000
2776 #define PRIME_TIMEOUT 10000
2777 #define READSAFE_TIMEOUT 1000
2778 #define LOOPS_USEC_SHIFT 1
2779 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2780 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2781 /* LDV_COMMENT_END_PREP */
2782 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_start" */
2783 struct usb_gadget_driver * var_group4;
2784 /* LDV_COMMENT_BEGIN_PREP */
2785 #ifdef CONFIG_PM
2786 #endif
2787 #ifdef CONFIG_PM
2788 #endif
2789 /* LDV_COMMENT_END_PREP */
2790 /* content: static int mv_udc_stop(struct usb_gadget *gadget)*/
2791 /* LDV_COMMENT_BEGIN_PREP */
2792 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2793 #define DRIVER_VERSION "8 Nov 2010"
2794 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2795 ((ep)->udc->ep0_dir) : ((ep)->direction))
2796 #define RESET_TIMEOUT 10000
2797 #define FLUSH_TIMEOUT 10000
2798 #define EPSTATUS_TIMEOUT 10000
2799 #define PRIME_TIMEOUT 10000
2800 #define READSAFE_TIMEOUT 1000
2801 #define LOOPS_USEC_SHIFT 1
2802 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2803 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2804 /* LDV_COMMENT_END_PREP */
2805 /* LDV_COMMENT_BEGIN_PREP */
2806 #ifdef CONFIG_PM
2807 #endif
2808 #ifdef CONFIG_PM
2809 #endif
2810 /* LDV_COMMENT_END_PREP */
2811
2812 /** STRUCT: struct type: dev_pm_ops, struct name: mv_udc_pm_ops **/
2813 /* content: static int mv_udc_suspend(struct device *dev)*/
2814 /* LDV_COMMENT_BEGIN_PREP */
2815 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2816 #define DRIVER_VERSION "8 Nov 2010"
2817 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2818 ((ep)->udc->ep0_dir) : ((ep)->direction))
2819 #define RESET_TIMEOUT 10000
2820 #define FLUSH_TIMEOUT 10000
2821 #define EPSTATUS_TIMEOUT 10000
2822 #define PRIME_TIMEOUT 10000
2823 #define READSAFE_TIMEOUT 1000
2824 #define LOOPS_USEC_SHIFT 1
2825 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2826 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2827 #ifdef CONFIG_PM
2828 /* LDV_COMMENT_END_PREP */
2829 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_suspend" */
2830 struct device * var_group5;
2831 /* LDV_COMMENT_BEGIN_PREP */
2832 #endif
2833 #ifdef CONFIG_PM
2834 #endif
2835 /* LDV_COMMENT_END_PREP */
2836 /* content: static int mv_udc_resume(struct device *dev)*/
2837 /* LDV_COMMENT_BEGIN_PREP */
2838 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2839 #define DRIVER_VERSION "8 Nov 2010"
2840 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2841 ((ep)->udc->ep0_dir) : ((ep)->direction))
2842 #define RESET_TIMEOUT 10000
2843 #define FLUSH_TIMEOUT 10000
2844 #define EPSTATUS_TIMEOUT 10000
2845 #define PRIME_TIMEOUT 10000
2846 #define READSAFE_TIMEOUT 1000
2847 #define LOOPS_USEC_SHIFT 1
2848 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2849 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2850 #ifdef CONFIG_PM
2851 /* LDV_COMMENT_END_PREP */
2852 /* LDV_COMMENT_BEGIN_PREP */
2853 #endif
2854 #ifdef CONFIG_PM
2855 #endif
2856 /* LDV_COMMENT_END_PREP */
2857
2858 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
2859 /* content: static int mv_udc_probe(struct platform_device *pdev)*/
2860 /* LDV_COMMENT_BEGIN_PREP */
2861 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2862 #define DRIVER_VERSION "8 Nov 2010"
2863 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2864 ((ep)->udc->ep0_dir) : ((ep)->direction))
2865 #define RESET_TIMEOUT 10000
2866 #define FLUSH_TIMEOUT 10000
2867 #define EPSTATUS_TIMEOUT 10000
2868 #define PRIME_TIMEOUT 10000
2869 #define READSAFE_TIMEOUT 1000
2870 #define LOOPS_USEC_SHIFT 1
2871 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2872 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2873 /* LDV_COMMENT_END_PREP */
2874 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_probe" */
2875 struct platform_device * var_group6;
2876 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mv_udc_probe" */
2877 static int res_mv_udc_probe_59;
2878 /* LDV_COMMENT_BEGIN_PREP */
2879 #ifdef CONFIG_PM
2880 #endif
2881 #ifdef CONFIG_PM
2882 #endif
2883 /* LDV_COMMENT_END_PREP */
2884 /* content: static int mv_udc_remove(struct platform_device *pdev)*/
2885 /* LDV_COMMENT_BEGIN_PREP */
2886 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2887 #define DRIVER_VERSION "8 Nov 2010"
2888 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2889 ((ep)->udc->ep0_dir) : ((ep)->direction))
2890 #define RESET_TIMEOUT 10000
2891 #define FLUSH_TIMEOUT 10000
2892 #define EPSTATUS_TIMEOUT 10000
2893 #define PRIME_TIMEOUT 10000
2894 #define READSAFE_TIMEOUT 1000
2895 #define LOOPS_USEC_SHIFT 1
2896 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2897 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2898 /* LDV_COMMENT_END_PREP */
2899 /* LDV_COMMENT_BEGIN_PREP */
2900 #ifdef CONFIG_PM
2901 #endif
2902 #ifdef CONFIG_PM
2903 #endif
2904 /* LDV_COMMENT_END_PREP */
2905 /* content: static void mv_udc_shutdown(struct platform_device *pdev)*/
2906 /* LDV_COMMENT_BEGIN_PREP */
2907 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2908 #define DRIVER_VERSION "8 Nov 2010"
2909 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2910 ((ep)->udc->ep0_dir) : ((ep)->direction))
2911 #define RESET_TIMEOUT 10000
2912 #define FLUSH_TIMEOUT 10000
2913 #define EPSTATUS_TIMEOUT 10000
2914 #define PRIME_TIMEOUT 10000
2915 #define READSAFE_TIMEOUT 1000
2916 #define LOOPS_USEC_SHIFT 1
2917 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2918 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2919 #ifdef CONFIG_PM
2920 #endif
2921 /* LDV_COMMENT_END_PREP */
2922 /* LDV_COMMENT_BEGIN_PREP */
2923 #ifdef CONFIG_PM
2924 #endif
2925 /* LDV_COMMENT_END_PREP */
2926
2927 /** CALLBACK SECTION request_irq **/
2928 /* content: static irqreturn_t mv_udc_irq(int irq, void *dev)*/
2929 /* LDV_COMMENT_BEGIN_PREP */
2930 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2931 #define DRIVER_VERSION "8 Nov 2010"
2932 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2933 ((ep)->udc->ep0_dir) : ((ep)->direction))
2934 #define RESET_TIMEOUT 10000
2935 #define FLUSH_TIMEOUT 10000
2936 #define EPSTATUS_TIMEOUT 10000
2937 #define PRIME_TIMEOUT 10000
2938 #define READSAFE_TIMEOUT 1000
2939 #define LOOPS_USEC_SHIFT 1
2940 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2941 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2942 /* LDV_COMMENT_END_PREP */
2943 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_irq" */
2944 int var_mv_udc_irq_54_p0;
2945 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_irq" */
2946 void * var_mv_udc_irq_54_p1;
2947 /* LDV_COMMENT_BEGIN_PREP */
2948 #ifdef CONFIG_PM
2949 #endif
2950 #ifdef CONFIG_PM
2951 #endif
2952 /* LDV_COMMENT_END_PREP */
2953 /* content: static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)*/
2954 /* LDV_COMMENT_BEGIN_PREP */
2955 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2956 #define DRIVER_VERSION "8 Nov 2010"
2957 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2958 ((ep)->udc->ep0_dir) : ((ep)->direction))
2959 #define RESET_TIMEOUT 10000
2960 #define FLUSH_TIMEOUT 10000
2961 #define EPSTATUS_TIMEOUT 10000
2962 #define PRIME_TIMEOUT 10000
2963 #define READSAFE_TIMEOUT 1000
2964 #define LOOPS_USEC_SHIFT 1
2965 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2966 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2967 /* LDV_COMMENT_END_PREP */
2968 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_vbus_irq" */
2969 int var_mv_udc_vbus_irq_55_p0;
2970 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_vbus_irq" */
2971 void * var_mv_udc_vbus_irq_55_p1;
2972 /* LDV_COMMENT_BEGIN_PREP */
2973 #ifdef CONFIG_PM
2974 #endif
2975 #ifdef CONFIG_PM
2976 #endif
2977 /* LDV_COMMENT_END_PREP */
2978
2979
2980
2981
2982 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2983 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2984 /*============================= VARIABLE INITIALIZING PART =============================*/
2985 LDV_IN_INTERRUPT=1;
2986
2987
2988
2989
2990 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2991 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2992 /*============================= FUNCTION CALL SECTION =============================*/
2993 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2994 ldv_initialize();
2995
2996
2997
2998
2999
3000
3001 int ldv_s_udc_driver_platform_driver = 0;
3002
3003
3004
3005
3006 while( nondet_int()
3007 || !(ldv_s_udc_driver_platform_driver == 0)
3008 ) {
3009
3010 switch(nondet_int()) {
3011
3012 case 0: {
3013
3014 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3015
3016
3017 /* content: static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
3018 /* LDV_COMMENT_BEGIN_PREP */
3019 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3020 #define DRIVER_VERSION "8 Nov 2010"
3021 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3022 ((ep)->udc->ep0_dir) : ((ep)->direction))
3023 #define RESET_TIMEOUT 10000
3024 #define FLUSH_TIMEOUT 10000
3025 #define EPSTATUS_TIMEOUT 10000
3026 #define PRIME_TIMEOUT 10000
3027 #define READSAFE_TIMEOUT 1000
3028 #define LOOPS_USEC_SHIFT 1
3029 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3030 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3031 /* LDV_COMMENT_END_PREP */
3032 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "mv_ep_ops" */
3033 ldv_handler_precall();
3034 mv_ep_enable( var_group1, var_mv_ep_enable_6_p1);
3035 /* LDV_COMMENT_BEGIN_PREP */
3036 #ifdef CONFIG_PM
3037 #endif
3038 #ifdef CONFIG_PM
3039 #endif
3040 /* LDV_COMMENT_END_PREP */
3041
3042
3043
3044
3045 }
3046
3047 break;
3048 case 1: {
3049
3050 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3051
3052
3053 /* content: static int mv_ep_disable(struct usb_ep *_ep)*/
3054 /* LDV_COMMENT_BEGIN_PREP */
3055 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3056 #define DRIVER_VERSION "8 Nov 2010"
3057 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3058 ((ep)->udc->ep0_dir) : ((ep)->direction))
3059 #define RESET_TIMEOUT 10000
3060 #define FLUSH_TIMEOUT 10000
3061 #define EPSTATUS_TIMEOUT 10000
3062 #define PRIME_TIMEOUT 10000
3063 #define READSAFE_TIMEOUT 1000
3064 #define LOOPS_USEC_SHIFT 1
3065 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3066 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3067 /* LDV_COMMENT_END_PREP */
3068 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "mv_ep_ops" */
3069 ldv_handler_precall();
3070 mv_ep_disable( var_group1);
3071 /* LDV_COMMENT_BEGIN_PREP */
3072 #ifdef CONFIG_PM
3073 #endif
3074 #ifdef CONFIG_PM
3075 #endif
3076 /* LDV_COMMENT_END_PREP */
3077
3078
3079
3080
3081 }
3082
3083 break;
3084 case 2: {
3085
3086 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3087
3088
3089 /* content: static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
3090 /* LDV_COMMENT_BEGIN_PREP */
3091 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3092 #define DRIVER_VERSION "8 Nov 2010"
3093 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3094 ((ep)->udc->ep0_dir) : ((ep)->direction))
3095 #define RESET_TIMEOUT 10000
3096 #define FLUSH_TIMEOUT 10000
3097 #define EPSTATUS_TIMEOUT 10000
3098 #define PRIME_TIMEOUT 10000
3099 #define READSAFE_TIMEOUT 1000
3100 #define LOOPS_USEC_SHIFT 1
3101 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3102 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3103 /* LDV_COMMENT_END_PREP */
3104 /* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "mv_ep_ops" */
3105 ldv_handler_precall();
3106 mv_alloc_request( var_group1, var_mv_alloc_request_8_p1);
3107 /* LDV_COMMENT_BEGIN_PREP */
3108 #ifdef CONFIG_PM
3109 #endif
3110 #ifdef CONFIG_PM
3111 #endif
3112 /* LDV_COMMENT_END_PREP */
3113
3114
3115
3116
3117 }
3118
3119 break;
3120 case 3: {
3121
3122 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3123
3124
3125 /* content: static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
3126 /* LDV_COMMENT_BEGIN_PREP */
3127 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3128 #define DRIVER_VERSION "8 Nov 2010"
3129 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3130 ((ep)->udc->ep0_dir) : ((ep)->direction))
3131 #define RESET_TIMEOUT 10000
3132 #define FLUSH_TIMEOUT 10000
3133 #define EPSTATUS_TIMEOUT 10000
3134 #define PRIME_TIMEOUT 10000
3135 #define READSAFE_TIMEOUT 1000
3136 #define LOOPS_USEC_SHIFT 1
3137 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3138 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3139 /* LDV_COMMENT_END_PREP */
3140 /* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "mv_ep_ops" */
3141 ldv_handler_precall();
3142 mv_free_request( var_group1, var_group2);
3143 /* LDV_COMMENT_BEGIN_PREP */
3144 #ifdef CONFIG_PM
3145 #endif
3146 #ifdef CONFIG_PM
3147 #endif
3148 /* LDV_COMMENT_END_PREP */
3149
3150
3151
3152
3153 }
3154
3155 break;
3156 case 4: {
3157
3158 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3159
3160
3161 /* content: static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
3162 /* LDV_COMMENT_BEGIN_PREP */
3163 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3164 #define DRIVER_VERSION "8 Nov 2010"
3165 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3166 ((ep)->udc->ep0_dir) : ((ep)->direction))
3167 #define RESET_TIMEOUT 10000
3168 #define FLUSH_TIMEOUT 10000
3169 #define EPSTATUS_TIMEOUT 10000
3170 #define PRIME_TIMEOUT 10000
3171 #define READSAFE_TIMEOUT 1000
3172 #define LOOPS_USEC_SHIFT 1
3173 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3174 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3175 /* LDV_COMMENT_END_PREP */
3176 /* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "mv_ep_ops" */
3177 ldv_handler_precall();
3178 mv_ep_queue( var_group1, var_group2, var_mv_ep_queue_11_p2);
3179 /* LDV_COMMENT_BEGIN_PREP */
3180 #ifdef CONFIG_PM
3181 #endif
3182 #ifdef CONFIG_PM
3183 #endif
3184 /* LDV_COMMENT_END_PREP */
3185
3186
3187
3188
3189 }
3190
3191 break;
3192 case 5: {
3193
3194 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3195
3196
3197 /* content: static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
3198 /* LDV_COMMENT_BEGIN_PREP */
3199 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3200 #define DRIVER_VERSION "8 Nov 2010"
3201 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3202 ((ep)->udc->ep0_dir) : ((ep)->direction))
3203 #define RESET_TIMEOUT 10000
3204 #define FLUSH_TIMEOUT 10000
3205 #define EPSTATUS_TIMEOUT 10000
3206 #define PRIME_TIMEOUT 10000
3207 #define READSAFE_TIMEOUT 1000
3208 #define LOOPS_USEC_SHIFT 1
3209 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3210 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3211 /* LDV_COMMENT_END_PREP */
3212 /* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "mv_ep_ops" */
3213 ldv_handler_precall();
3214 mv_ep_dequeue( var_group1, var_group2);
3215 /* LDV_COMMENT_BEGIN_PREP */
3216 #ifdef CONFIG_PM
3217 #endif
3218 #ifdef CONFIG_PM
3219 #endif
3220 /* LDV_COMMENT_END_PREP */
3221
3222
3223
3224
3225 }
3226
3227 break;
3228 case 6: {
3229
3230 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3231
3232
3233 /* content: static int mv_ep_set_wedge(struct usb_ep *_ep)*/
3234 /* LDV_COMMENT_BEGIN_PREP */
3235 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3236 #define DRIVER_VERSION "8 Nov 2010"
3237 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3238 ((ep)->udc->ep0_dir) : ((ep)->direction))
3239 #define RESET_TIMEOUT 10000
3240 #define FLUSH_TIMEOUT 10000
3241 #define EPSTATUS_TIMEOUT 10000
3242 #define PRIME_TIMEOUT 10000
3243 #define READSAFE_TIMEOUT 1000
3244 #define LOOPS_USEC_SHIFT 1
3245 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3246 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3247 /* LDV_COMMENT_END_PREP */
3248 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "mv_ep_ops" */
3249 ldv_handler_precall();
3250 mv_ep_set_wedge( var_group1);
3251 /* LDV_COMMENT_BEGIN_PREP */
3252 #ifdef CONFIG_PM
3253 #endif
3254 #ifdef CONFIG_PM
3255 #endif
3256 /* LDV_COMMENT_END_PREP */
3257
3258
3259
3260
3261 }
3262
3263 break;
3264 case 7: {
3265
3266 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3267
3268
3269 /* content: static int mv_ep_set_halt(struct usb_ep *_ep, int halt)*/
3270 /* LDV_COMMENT_BEGIN_PREP */
3271 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3272 #define DRIVER_VERSION "8 Nov 2010"
3273 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3274 ((ep)->udc->ep0_dir) : ((ep)->direction))
3275 #define RESET_TIMEOUT 10000
3276 #define FLUSH_TIMEOUT 10000
3277 #define EPSTATUS_TIMEOUT 10000
3278 #define PRIME_TIMEOUT 10000
3279 #define READSAFE_TIMEOUT 1000
3280 #define LOOPS_USEC_SHIFT 1
3281 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3282 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3283 /* LDV_COMMENT_END_PREP */
3284 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "mv_ep_ops" */
3285 ldv_handler_precall();
3286 mv_ep_set_halt( var_group1, var_mv_ep_set_halt_17_p1);
3287 /* LDV_COMMENT_BEGIN_PREP */
3288 #ifdef CONFIG_PM
3289 #endif
3290 #ifdef CONFIG_PM
3291 #endif
3292 /* LDV_COMMENT_END_PREP */
3293
3294
3295
3296
3297 }
3298
3299 break;
3300 case 8: {
3301
3302 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3303
3304
3305 /* content: static void mv_ep_fifo_flush(struct usb_ep *_ep)*/
3306 /* LDV_COMMENT_BEGIN_PREP */
3307 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3308 #define DRIVER_VERSION "8 Nov 2010"
3309 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3310 ((ep)->udc->ep0_dir) : ((ep)->direction))
3311 #define RESET_TIMEOUT 10000
3312 #define FLUSH_TIMEOUT 10000
3313 #define EPSTATUS_TIMEOUT 10000
3314 #define PRIME_TIMEOUT 10000
3315 #define READSAFE_TIMEOUT 1000
3316 #define LOOPS_USEC_SHIFT 1
3317 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3318 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3319 /* LDV_COMMENT_END_PREP */
3320 /* LDV_COMMENT_FUNCTION_CALL Function from field "fifo_flush" from driver structure with callbacks "mv_ep_ops" */
3321 ldv_handler_precall();
3322 mv_ep_fifo_flush( var_group1);
3323 /* LDV_COMMENT_BEGIN_PREP */
3324 #ifdef CONFIG_PM
3325 #endif
3326 #ifdef CONFIG_PM
3327 #endif
3328 /* LDV_COMMENT_END_PREP */
3329
3330
3331
3332
3333 }
3334
3335 break;
3336 case 9: {
3337
3338 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3339
3340
3341 /* content: static int mv_udc_get_frame(struct usb_gadget *gadget)*/
3342 /* LDV_COMMENT_BEGIN_PREP */
3343 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3344 #define DRIVER_VERSION "8 Nov 2010"
3345 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3346 ((ep)->udc->ep0_dir) : ((ep)->direction))
3347 #define RESET_TIMEOUT 10000
3348 #define FLUSH_TIMEOUT 10000
3349 #define EPSTATUS_TIMEOUT 10000
3350 #define PRIME_TIMEOUT 10000
3351 #define READSAFE_TIMEOUT 1000
3352 #define LOOPS_USEC_SHIFT 1
3353 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3354 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3355 /* LDV_COMMENT_END_PREP */
3356 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame" from driver structure with callbacks "mv_ops" */
3357 ldv_handler_precall();
3358 mv_udc_get_frame( var_group3);
3359 /* LDV_COMMENT_BEGIN_PREP */
3360 #ifdef CONFIG_PM
3361 #endif
3362 #ifdef CONFIG_PM
3363 #endif
3364 /* LDV_COMMENT_END_PREP */
3365
3366
3367
3368
3369 }
3370
3371 break;
3372 case 10: {
3373
3374 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3375
3376
3377 /* content: static int mv_udc_wakeup(struct usb_gadget *gadget)*/
3378 /* LDV_COMMENT_BEGIN_PREP */
3379 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3380 #define DRIVER_VERSION "8 Nov 2010"
3381 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3382 ((ep)->udc->ep0_dir) : ((ep)->direction))
3383 #define RESET_TIMEOUT 10000
3384 #define FLUSH_TIMEOUT 10000
3385 #define EPSTATUS_TIMEOUT 10000
3386 #define PRIME_TIMEOUT 10000
3387 #define READSAFE_TIMEOUT 1000
3388 #define LOOPS_USEC_SHIFT 1
3389 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3390 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3391 /* LDV_COMMENT_END_PREP */
3392 /* LDV_COMMENT_FUNCTION_CALL Function from field "wakeup" from driver structure with callbacks "mv_ops" */
3393 ldv_handler_precall();
3394 mv_udc_wakeup( var_group3);
3395 /* LDV_COMMENT_BEGIN_PREP */
3396 #ifdef CONFIG_PM
3397 #endif
3398 #ifdef CONFIG_PM
3399 #endif
3400 /* LDV_COMMENT_END_PREP */
3401
3402
3403
3404
3405 }
3406
3407 break;
3408 case 11: {
3409
3410 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3411
3412
3413 /* content: static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)*/
3414 /* LDV_COMMENT_BEGIN_PREP */
3415 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3416 #define DRIVER_VERSION "8 Nov 2010"
3417 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3418 ((ep)->udc->ep0_dir) : ((ep)->direction))
3419 #define RESET_TIMEOUT 10000
3420 #define FLUSH_TIMEOUT 10000
3421 #define EPSTATUS_TIMEOUT 10000
3422 #define PRIME_TIMEOUT 10000
3423 #define READSAFE_TIMEOUT 1000
3424 #define LOOPS_USEC_SHIFT 1
3425 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3426 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3427 /* LDV_COMMENT_END_PREP */
3428 /* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_session" from driver structure with callbacks "mv_ops" */
3429 ldv_handler_precall();
3430 mv_udc_vbus_session( var_group3, var_mv_udc_vbus_session_30_p1);
3431 /* LDV_COMMENT_BEGIN_PREP */
3432 #ifdef CONFIG_PM
3433 #endif
3434 #ifdef CONFIG_PM
3435 #endif
3436 /* LDV_COMMENT_END_PREP */
3437
3438
3439
3440
3441 }
3442
3443 break;
3444 case 12: {
3445
3446 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3447
3448
3449 /* content: static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)*/
3450 /* LDV_COMMENT_BEGIN_PREP */
3451 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3452 #define DRIVER_VERSION "8 Nov 2010"
3453 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3454 ((ep)->udc->ep0_dir) : ((ep)->direction))
3455 #define RESET_TIMEOUT 10000
3456 #define FLUSH_TIMEOUT 10000
3457 #define EPSTATUS_TIMEOUT 10000
3458 #define PRIME_TIMEOUT 10000
3459 #define READSAFE_TIMEOUT 1000
3460 #define LOOPS_USEC_SHIFT 1
3461 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3462 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3463 /* LDV_COMMENT_END_PREP */
3464 /* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "mv_ops" */
3465 ldv_handler_precall();
3466 mv_udc_pullup( var_group3, var_mv_udc_pullup_31_p1);
3467 /* LDV_COMMENT_BEGIN_PREP */
3468 #ifdef CONFIG_PM
3469 #endif
3470 #ifdef CONFIG_PM
3471 #endif
3472 /* LDV_COMMENT_END_PREP */
3473
3474
3475
3476
3477 }
3478
3479 break;
3480 case 13: {
3481
3482 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3483
3484
3485 /* content: static int mv_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver)*/
3486 /* LDV_COMMENT_BEGIN_PREP */
3487 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3488 #define DRIVER_VERSION "8 Nov 2010"
3489 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3490 ((ep)->udc->ep0_dir) : ((ep)->direction))
3491 #define RESET_TIMEOUT 10000
3492 #define FLUSH_TIMEOUT 10000
3493 #define EPSTATUS_TIMEOUT 10000
3494 #define PRIME_TIMEOUT 10000
3495 #define READSAFE_TIMEOUT 1000
3496 #define LOOPS_USEC_SHIFT 1
3497 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3498 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3499 /* LDV_COMMENT_END_PREP */
3500 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "mv_ops" */
3501 ldv_handler_precall();
3502 mv_udc_start( var_group3, var_group4);
3503 /* LDV_COMMENT_BEGIN_PREP */
3504 #ifdef CONFIG_PM
3505 #endif
3506 #ifdef CONFIG_PM
3507 #endif
3508 /* LDV_COMMENT_END_PREP */
3509
3510
3511
3512
3513 }
3514
3515 break;
3516 case 14: {
3517
3518 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3519
3520
3521 /* content: static int mv_udc_stop(struct usb_gadget *gadget)*/
3522 /* LDV_COMMENT_BEGIN_PREP */
3523 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3524 #define DRIVER_VERSION "8 Nov 2010"
3525 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3526 ((ep)->udc->ep0_dir) : ((ep)->direction))
3527 #define RESET_TIMEOUT 10000
3528 #define FLUSH_TIMEOUT 10000
3529 #define EPSTATUS_TIMEOUT 10000
3530 #define PRIME_TIMEOUT 10000
3531 #define READSAFE_TIMEOUT 1000
3532 #define LOOPS_USEC_SHIFT 1
3533 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3534 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3535 /* LDV_COMMENT_END_PREP */
3536 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "mv_ops" */
3537 ldv_handler_precall();
3538 mv_udc_stop( var_group3);
3539 /* LDV_COMMENT_BEGIN_PREP */
3540 #ifdef CONFIG_PM
3541 #endif
3542 #ifdef CONFIG_PM
3543 #endif
3544 /* LDV_COMMENT_END_PREP */
3545
3546
3547
3548
3549 }
3550
3551 break;
3552 case 15: {
3553
3554 /** STRUCT: struct type: dev_pm_ops, struct name: mv_udc_pm_ops **/
3555
3556
3557 /* content: static int mv_udc_suspend(struct device *dev)*/
3558 /* LDV_COMMENT_BEGIN_PREP */
3559 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3560 #define DRIVER_VERSION "8 Nov 2010"
3561 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3562 ((ep)->udc->ep0_dir) : ((ep)->direction))
3563 #define RESET_TIMEOUT 10000
3564 #define FLUSH_TIMEOUT 10000
3565 #define EPSTATUS_TIMEOUT 10000
3566 #define PRIME_TIMEOUT 10000
3567 #define READSAFE_TIMEOUT 1000
3568 #define LOOPS_USEC_SHIFT 1
3569 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3570 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3571 #ifdef CONFIG_PM
3572 /* LDV_COMMENT_END_PREP */
3573 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "mv_udc_pm_ops" */
3574 ldv_handler_precall();
3575 mv_udc_suspend( var_group5);
3576 /* LDV_COMMENT_BEGIN_PREP */
3577 #endif
3578 #ifdef CONFIG_PM
3579 #endif
3580 /* LDV_COMMENT_END_PREP */
3581
3582
3583
3584
3585 }
3586
3587 break;
3588 case 16: {
3589
3590 /** STRUCT: struct type: dev_pm_ops, struct name: mv_udc_pm_ops **/
3591
3592
3593 /* content: static int mv_udc_resume(struct device *dev)*/
3594 /* LDV_COMMENT_BEGIN_PREP */
3595 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3596 #define DRIVER_VERSION "8 Nov 2010"
3597 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3598 ((ep)->udc->ep0_dir) : ((ep)->direction))
3599 #define RESET_TIMEOUT 10000
3600 #define FLUSH_TIMEOUT 10000
3601 #define EPSTATUS_TIMEOUT 10000
3602 #define PRIME_TIMEOUT 10000
3603 #define READSAFE_TIMEOUT 1000
3604 #define LOOPS_USEC_SHIFT 1
3605 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3606 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3607 #ifdef CONFIG_PM
3608 /* LDV_COMMENT_END_PREP */
3609 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "mv_udc_pm_ops" */
3610 ldv_handler_precall();
3611 mv_udc_resume( var_group5);
3612 /* LDV_COMMENT_BEGIN_PREP */
3613 #endif
3614 #ifdef CONFIG_PM
3615 #endif
3616 /* LDV_COMMENT_END_PREP */
3617
3618
3619
3620
3621 }
3622
3623 break;
3624 case 17: {
3625
3626 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
3627 if(ldv_s_udc_driver_platform_driver==0) {
3628
3629 /* content: static int mv_udc_probe(struct platform_device *pdev)*/
3630 /* LDV_COMMENT_BEGIN_PREP */
3631 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3632 #define DRIVER_VERSION "8 Nov 2010"
3633 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3634 ((ep)->udc->ep0_dir) : ((ep)->direction))
3635 #define RESET_TIMEOUT 10000
3636 #define FLUSH_TIMEOUT 10000
3637 #define EPSTATUS_TIMEOUT 10000
3638 #define PRIME_TIMEOUT 10000
3639 #define READSAFE_TIMEOUT 1000
3640 #define LOOPS_USEC_SHIFT 1
3641 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3642 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3643 /* LDV_COMMENT_END_PREP */
3644 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "udc_driver". Standart function test for correct return result. */
3645 res_mv_udc_probe_59 = mv_udc_probe( var_group6);
3646 ldv_check_return_value(res_mv_udc_probe_59);
3647 ldv_check_return_value_probe(res_mv_udc_probe_59);
3648 if(res_mv_udc_probe_59)
3649 goto ldv_module_exit;
3650 /* LDV_COMMENT_BEGIN_PREP */
3651 #ifdef CONFIG_PM
3652 #endif
3653 #ifdef CONFIG_PM
3654 #endif
3655 /* LDV_COMMENT_END_PREP */
3656 ldv_s_udc_driver_platform_driver++;
3657
3658 }
3659
3660 }
3661
3662 break;
3663 case 18: {
3664
3665 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
3666 if(ldv_s_udc_driver_platform_driver==1) {
3667
3668 /* content: static int mv_udc_remove(struct platform_device *pdev)*/
3669 /* LDV_COMMENT_BEGIN_PREP */
3670 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3671 #define DRIVER_VERSION "8 Nov 2010"
3672 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3673 ((ep)->udc->ep0_dir) : ((ep)->direction))
3674 #define RESET_TIMEOUT 10000
3675 #define FLUSH_TIMEOUT 10000
3676 #define EPSTATUS_TIMEOUT 10000
3677 #define PRIME_TIMEOUT 10000
3678 #define READSAFE_TIMEOUT 1000
3679 #define LOOPS_USEC_SHIFT 1
3680 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3681 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3682 /* LDV_COMMENT_END_PREP */
3683 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "udc_driver" */
3684 ldv_handler_precall();
3685 mv_udc_remove( var_group6);
3686 /* LDV_COMMENT_BEGIN_PREP */
3687 #ifdef CONFIG_PM
3688 #endif
3689 #ifdef CONFIG_PM
3690 #endif
3691 /* LDV_COMMENT_END_PREP */
3692 ldv_s_udc_driver_platform_driver++;
3693
3694 }
3695
3696 }
3697
3698 break;
3699 case 19: {
3700
3701 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
3702 if(ldv_s_udc_driver_platform_driver==2) {
3703
3704 /* content: static void mv_udc_shutdown(struct platform_device *pdev)*/
3705 /* LDV_COMMENT_BEGIN_PREP */
3706 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3707 #define DRIVER_VERSION "8 Nov 2010"
3708 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3709 ((ep)->udc->ep0_dir) : ((ep)->direction))
3710 #define RESET_TIMEOUT 10000
3711 #define FLUSH_TIMEOUT 10000
3712 #define EPSTATUS_TIMEOUT 10000
3713 #define PRIME_TIMEOUT 10000
3714 #define READSAFE_TIMEOUT 1000
3715 #define LOOPS_USEC_SHIFT 1
3716 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3717 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3718 #ifdef CONFIG_PM
3719 #endif
3720 /* LDV_COMMENT_END_PREP */
3721 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "udc_driver" */
3722 ldv_handler_precall();
3723 mv_udc_shutdown( var_group6);
3724 /* LDV_COMMENT_BEGIN_PREP */
3725 #ifdef CONFIG_PM
3726 #endif
3727 /* LDV_COMMENT_END_PREP */
3728 ldv_s_udc_driver_platform_driver=0;
3729
3730 }
3731
3732 }
3733
3734 break;
3735 case 20: {
3736
3737 /** CALLBACK SECTION request_irq **/
3738 LDV_IN_INTERRUPT=2;
3739
3740 /* content: static irqreturn_t mv_udc_irq(int irq, void *dev)*/
3741 /* LDV_COMMENT_BEGIN_PREP */
3742 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3743 #define DRIVER_VERSION "8 Nov 2010"
3744 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3745 ((ep)->udc->ep0_dir) : ((ep)->direction))
3746 #define RESET_TIMEOUT 10000
3747 #define FLUSH_TIMEOUT 10000
3748 #define EPSTATUS_TIMEOUT 10000
3749 #define PRIME_TIMEOUT 10000
3750 #define READSAFE_TIMEOUT 1000
3751 #define LOOPS_USEC_SHIFT 1
3752 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3753 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3754 /* LDV_COMMENT_END_PREP */
3755 /* LDV_COMMENT_FUNCTION_CALL */
3756 ldv_handler_precall();
3757 mv_udc_irq( var_mv_udc_irq_54_p0, var_mv_udc_irq_54_p1);
3758 /* LDV_COMMENT_BEGIN_PREP */
3759 #ifdef CONFIG_PM
3760 #endif
3761 #ifdef CONFIG_PM
3762 #endif
3763 /* LDV_COMMENT_END_PREP */
3764 LDV_IN_INTERRUPT=1;
3765
3766
3767
3768 }
3769
3770 break;
3771 case 21: {
3772
3773 /** CALLBACK SECTION request_irq **/
3774 LDV_IN_INTERRUPT=2;
3775
3776 /* content: static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)*/
3777 /* LDV_COMMENT_BEGIN_PREP */
3778 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3779 #define DRIVER_VERSION "8 Nov 2010"
3780 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3781 ((ep)->udc->ep0_dir) : ((ep)->direction))
3782 #define RESET_TIMEOUT 10000
3783 #define FLUSH_TIMEOUT 10000
3784 #define EPSTATUS_TIMEOUT 10000
3785 #define PRIME_TIMEOUT 10000
3786 #define READSAFE_TIMEOUT 1000
3787 #define LOOPS_USEC_SHIFT 1
3788 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3789 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3790 /* LDV_COMMENT_END_PREP */
3791 /* LDV_COMMENT_FUNCTION_CALL */
3792 ldv_handler_precall();
3793 mv_udc_vbus_irq( var_mv_udc_vbus_irq_55_p0, var_mv_udc_vbus_irq_55_p1);
3794 /* LDV_COMMENT_BEGIN_PREP */
3795 #ifdef CONFIG_PM
3796 #endif
3797 #ifdef CONFIG_PM
3798 #endif
3799 /* LDV_COMMENT_END_PREP */
3800 LDV_IN_INTERRUPT=1;
3801
3802
3803
3804 }
3805
3806 break;
3807 default: break;
3808
3809 }
3810
3811 }
3812
3813 ldv_module_exit:
3814
3815 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3816 ldv_final: ldv_check_final_state();
3817
3818 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3819 return;
3820
3821 }
3822 #endif
3823
3824 /* LDV_COMMENT_END_MAIN */ 1
2 #include <asm/io.h>
3 #include <verifier/rcv.h>
4 #include <verifier/set.h>
5
6
7 Set LDV_IO_MEMS = 0;
8
9
10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_io_mem_remap') Create some io-memory map for specified address */
11 void *ldv_io_mem_remap(void *addr) {
12 ldv_assert(ldv_set_not_contains(LDV_IO_MEMS, addr));
13
14 void *ptr = ldv_undef_ptr();
15 if (ptr != NULL) {
16 ldv_set_add(LDV_IO_MEMS, addr);
17 return ptr;
18 }
19 return NULL;
20 }
21
22 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_io_mem_unmap') Delete some io-memory map for specified address */
23 void ldv_io_mem_unmap(const volatile void *addr) {
24 ldv_assert(ldv_set_contains(LDV_IO_MEMS, addr));
25 ldv_set_remove(LDV_IO_MEMS, addr);
26 }
27
28 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all io-memory map are unmapped properly */
29 void ldv_check_final_state(void) {
30 ldv_assert(ldv_set_is_empty(LDV_IO_MEMS));
31 }
32 #line 1 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a/linux-4.1-rc1.tar.xz/csd_deg_dscv/2900/dscv_tempdir/dscv/ri/152_1a/drivers/usb/gadget/udc/mv_udc_core.c"
33
34 /*
35 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
36 * Author: Chao Xie <chao.xie@marvell.com>
37 * Neil Zhang <zhangwm@marvell.com>
38 *
39 * This program is free software; you can redistribute it and/or modify it
40 * under the terms of the GNU General Public License as published by the
41 * Free Software Foundation; either version 2 of the License, or (at your
42 * option) any later version.
43 */
44
45 #include <linux/module.h>
46 #include <linux/pci.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/dmapool.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include <linux/ioport.h>
52 #include <linux/sched.h>
53 #include <linux/slab.h>
54 #include <linux/errno.h>
55 #include <linux/err.h>
56 #include <linux/timer.h>
57 #include <linux/list.h>
58 #include <linux/interrupt.h>
59 #include <linux/moduleparam.h>
60 #include <linux/device.h>
61 #include <linux/usb/ch9.h>
62 #include <linux/usb/gadget.h>
63 #include <linux/usb/otg.h>
64 #include <linux/pm.h>
65 #include <linux/io.h>
66 #include <linux/irq.h>
67 #include <linux/platform_device.h>
68 #include <linux/clk.h>
69 #include <linux/platform_data/mv_usb.h>
70 #include <asm/unaligned.h>
71
72 #include "mv_udc.h"
73
74 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
75 #define DRIVER_VERSION "8 Nov 2010"
76
77 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
78 ((ep)->udc->ep0_dir) : ((ep)->direction))
79
80 /* timeout value -- usec */
81 #define RESET_TIMEOUT 10000
82 #define FLUSH_TIMEOUT 10000
83 #define EPSTATUS_TIMEOUT 10000
84 #define PRIME_TIMEOUT 10000
85 #define READSAFE_TIMEOUT 1000
86
87 #define LOOPS_USEC_SHIFT 1
88 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
89 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
90
91 static DECLARE_COMPLETION(release_done);
92
93 static const char driver_name[] = "mv_udc";
94 static const char driver_desc[] = DRIVER_DESC;
95
96 static void nuke(struct mv_ep *ep, int status);
97 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
98
99 /* for endpoint 0 operations */
100 static const struct usb_endpoint_descriptor mv_ep0_desc = {
101 .bLength = USB_DT_ENDPOINT_SIZE,
102 .bDescriptorType = USB_DT_ENDPOINT,
103 .bEndpointAddress = 0,
104 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
105 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
106 };
107
108 static void ep0_reset(struct mv_udc *udc)
109 {
110 struct mv_ep *ep;
111 u32 epctrlx;
112 int i = 0;
113
114 /* ep0 in and out */
115 for (i = 0; i < 2; i++) {
116 ep = &udc->eps[i];
117 ep->udc = udc;
118
119 /* ep0 dQH */
120 ep->dqh = &udc->ep_dqh[i];
121
122 /* configure ep0 endpoint capabilities in dQH */
123 ep->dqh->max_packet_length =
124 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
125 | EP_QUEUE_HEAD_IOS;
126
127 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
128
129 epctrlx = readl(&udc->op_regs->epctrlx[0]);
130 if (i) { /* TX */
131 epctrlx |= EPCTRL_TX_ENABLE
132 | (USB_ENDPOINT_XFER_CONTROL
133 << EPCTRL_TX_EP_TYPE_SHIFT);
134
135 } else { /* RX */
136 epctrlx |= EPCTRL_RX_ENABLE
137 | (USB_ENDPOINT_XFER_CONTROL
138 << EPCTRL_RX_EP_TYPE_SHIFT);
139 }
140
141 writel(epctrlx, &udc->op_regs->epctrlx[0]);
142 }
143 }
144
145 /* protocol ep0 stall, will automatically be cleared on new transaction */
146 static void ep0_stall(struct mv_udc *udc)
147 {
148 u32 epctrlx;
149
150 /* set TX and RX to stall */
151 epctrlx = readl(&udc->op_regs->epctrlx[0]);
152 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
153 writel(epctrlx, &udc->op_regs->epctrlx[0]);
154
155 /* update ep0 state */
156 udc->ep0_state = WAIT_FOR_SETUP;
157 udc->ep0_dir = EP_DIR_OUT;
158 }
159
160 static int process_ep_req(struct mv_udc *udc, int index,
161 struct mv_req *curr_req)
162 {
163 struct mv_dtd *curr_dtd;
164 struct mv_dqh *curr_dqh;
165 int td_complete, actual, remaining_length;
166 int i, direction;
167 int retval = 0;
168 u32 errors;
169 u32 bit_pos;
170
171 curr_dqh = &udc->ep_dqh[index];
172 direction = index % 2;
173
174 curr_dtd = curr_req->head;
175 td_complete = 0;
176 actual = curr_req->req.length;
177
178 for (i = 0; i < curr_req->dtd_count; i++) {
179 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
180 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
181 udc->eps[index].name);
182 return 1;
183 }
184
185 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
186 if (!errors) {
187 remaining_length =
188 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
189 >> DTD_LENGTH_BIT_POS;
190 actual -= remaining_length;
191
192 if (remaining_length) {
193 if (direction) {
194 dev_dbg(&udc->dev->dev,
195 "TX dTD remains data\n");
196 retval = -EPROTO;
197 break;
198 } else
199 break;
200 }
201 } else {
202 dev_info(&udc->dev->dev,
203 "complete_tr error: ep=%d %s: error = 0x%x\n",
204 index >> 1, direction ? "SEND" : "RECV",
205 errors);
206 if (errors & DTD_STATUS_HALTED) {
207 /* Clear the errors and Halt condition */
208 curr_dqh->size_ioc_int_sts &= ~errors;
209 retval = -EPIPE;
210 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
211 retval = -EPROTO;
212 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
213 retval = -EILSEQ;
214 }
215 }
216 if (i != curr_req->dtd_count - 1)
217 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
218 }
219 if (retval)
220 return retval;
221
222 if (direction == EP_DIR_OUT)
223 bit_pos = 1 << curr_req->ep->ep_num;
224 else
225 bit_pos = 1 << (16 + curr_req->ep->ep_num);
226
227 while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
228 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
229 while (readl(&udc->op_regs->epstatus) & bit_pos)
230 udelay(1);
231 break;
232 }
233 udelay(1);
234 }
235
236 curr_req->req.actual = actual;
237
238 return 0;
239 }
240
241 /*
242 * done() - retire a request; caller blocked irqs
243 * @status : request status to be set, only works when
244 * request is still in progress.
245 */
246 static void done(struct mv_ep *ep, struct mv_req *req, int status)
247 __releases(&ep->udc->lock)
248 __acquires(&ep->udc->lock)
249 {
250 struct mv_udc *udc = NULL;
251 unsigned char stopped = ep->stopped;
252 struct mv_dtd *curr_td, *next_td;
253 int j;
254
255 udc = (struct mv_udc *)ep->udc;
256 /* Removed the req from fsl_ep->queue */
257 list_del_init(&req->queue);
258
259 /* req.status should be set as -EINPROGRESS in ep_queue() */
260 if (req->req.status == -EINPROGRESS)
261 req->req.status = status;
262 else
263 status = req->req.status;
264
265 /* Free dtd for the request */
266 next_td = req->head;
267 for (j = 0; j < req->dtd_count; j++) {
268 curr_td = next_td;
269 if (j != req->dtd_count - 1)
270 next_td = curr_td->next_dtd_virt;
271 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
272 }
273
274 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
275
276 if (status && (status != -ESHUTDOWN))
277 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
278 ep->ep.name, &req->req, status,
279 req->req.actual, req->req.length);
280
281 ep->stopped = 1;
282
283 spin_unlock(&ep->udc->lock);
284
285 usb_gadget_giveback_request(&ep->ep, &req->req);
286
287 spin_lock(&ep->udc->lock);
288 ep->stopped = stopped;
289 }
290
291 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
292 {
293 struct mv_udc *udc;
294 struct mv_dqh *dqh;
295 u32 bit_pos, direction;
296 u32 usbcmd, epstatus;
297 unsigned int loops;
298 int retval = 0;
299
300 udc = ep->udc;
301 direction = ep_dir(ep);
302 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
303 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
304
305 /* check if the pipe is empty */
306 if (!(list_empty(&ep->queue))) {
307 struct mv_req *lastreq;
308 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
309 lastreq->tail->dtd_next =
310 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
311
312 wmb();
313
314 if (readl(&udc->op_regs->epprime) & bit_pos)
315 goto done;
316
317 loops = LOOPS(READSAFE_TIMEOUT);
318 while (1) {
319 /* start with setting the semaphores */
320 usbcmd = readl(&udc->op_regs->usbcmd);
321 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
322 writel(usbcmd, &udc->op_regs->usbcmd);
323
324 /* read the endpoint status */
325 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
326
327 /*
328 * Reread the ATDTW semaphore bit to check if it is
329 * cleared. When hardware see a hazard, it will clear
330 * the bit or else we remain set to 1 and we can
331 * proceed with priming of endpoint if not already
332 * primed.
333 */
334 if (readl(&udc->op_regs->usbcmd)
335 & USBCMD_ATDTW_TRIPWIRE_SET)
336 break;
337
338 loops--;
339 if (loops == 0) {
340 dev_err(&udc->dev->dev,
341 "Timeout for ATDTW_TRIPWIRE...\n");
342 retval = -ETIME;
343 goto done;
344 }
345 udelay(LOOPS_USEC);
346 }
347
348 /* Clear the semaphore */
349 usbcmd = readl(&udc->op_regs->usbcmd);
350 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
351 writel(usbcmd, &udc->op_regs->usbcmd);
352
353 if (epstatus)
354 goto done;
355 }
356
357 /* Write dQH next pointer and terminate bit to 0 */
358 dqh->next_dtd_ptr = req->head->td_dma
359 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
360
361 /* clear active and halt bit, in case set from a previous error */
362 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
363
364 /* Ensure that updates to the QH will occur before priming. */
365 wmb();
366
367 /* Prime the Endpoint */
368 writel(bit_pos, &udc->op_regs->epprime);
369
370 done:
371 return retval;
372 }
373
374 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
375 dma_addr_t *dma, int *is_last)
376 {
377 struct mv_dtd *dtd;
378 struct mv_udc *udc;
379 struct mv_dqh *dqh;
380 u32 temp, mult = 0;
381
382 /* how big will this transfer be? */
383 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
384 dqh = req->ep->dqh;
385 mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
386 & 0x3;
387 *length = min(req->req.length - req->req.actual,
388 (unsigned)(mult * req->ep->ep.maxpacket));
389 } else
390 *length = min(req->req.length - req->req.actual,
391 (unsigned)EP_MAX_LENGTH_TRANSFER);
392
393 udc = req->ep->udc;
394
395 /*
396 * Be careful that no _GFP_HIGHMEM is set,
397 * or we can not use dma_to_virt
398 */
399 dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
400 if (dtd == NULL)
401 return dtd;
402
403 dtd->td_dma = *dma;
404 /* initialize buffer page pointers */
405 temp = (u32)(req->req.dma + req->req.actual);
406 dtd->buff_ptr0 = cpu_to_le32(temp);
407 temp &= ~0xFFF;
408 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
409 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
410 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
411 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
412
413 req->req.actual += *length;
414
415 /* zlp is needed if req->req.zero is set */
416 if (req->req.zero) {
417 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
418 *is_last = 1;
419 else
420 *is_last = 0;
421 } else if (req->req.length == req->req.actual)
422 *is_last = 1;
423 else
424 *is_last = 0;
425
426 /* Fill in the transfer size; set active bit */
427 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
428
429 /* Enable interrupt for the last dtd of a request */
430 if (*is_last && !req->req.no_interrupt)
431 temp |= DTD_IOC;
432
433 temp |= mult << 10;
434
435 dtd->size_ioc_sts = temp;
436
437 mb();
438
439 return dtd;
440 }
441
442 /* generate dTD linked list for a request */
443 static int req_to_dtd(struct mv_req *req)
444 {
445 unsigned count;
446 int is_last, is_first = 1;
447 struct mv_dtd *dtd, *last_dtd = NULL;
448 struct mv_udc *udc;
449 dma_addr_t dma;
450
451 udc = req->ep->udc;
452
453 do {
454 dtd = build_dtd(req, &count, &dma, &is_last);
455 if (dtd == NULL)
456 return -ENOMEM;
457
458 if (is_first) {
459 is_first = 0;
460 req->head = dtd;
461 } else {
462 last_dtd->dtd_next = dma;
463 last_dtd->next_dtd_virt = dtd;
464 }
465 last_dtd = dtd;
466 req->dtd_count++;
467 } while (!is_last);
468
469 /* set terminate bit to 1 for the last dTD */
470 dtd->dtd_next = DTD_NEXT_TERMINATE;
471
472 req->tail = dtd;
473
474 return 0;
475 }
476
477 static int mv_ep_enable(struct usb_ep *_ep,
478 const struct usb_endpoint_descriptor *desc)
479 {
480 struct mv_udc *udc;
481 struct mv_ep *ep;
482 struct mv_dqh *dqh;
483 u16 max = 0;
484 u32 bit_pos, epctrlx, direction;
485 unsigned char zlt = 0, ios = 0, mult = 0;
486 unsigned long flags;
487
488 ep = container_of(_ep, struct mv_ep, ep);
489 udc = ep->udc;
490
491 if (!_ep || !desc
492 || desc->bDescriptorType != USB_DT_ENDPOINT)
493 return -EINVAL;
494
495 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
496 return -ESHUTDOWN;
497
498 direction = ep_dir(ep);
499 max = usb_endpoint_maxp(desc);
500
501 /*
502 * disable HW zero length termination select
503 * driver handles zero length packet through req->req.zero
504 */
505 zlt = 1;
506
507 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
508
509 /* Check if the Endpoint is Primed */
510 if ((readl(&udc->op_regs->epprime) & bit_pos)
511 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
512 dev_info(&udc->dev->dev,
513 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
514 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
515 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
516 (unsigned)readl(&udc->op_regs->epprime),
517 (unsigned)readl(&udc->op_regs->epstatus),
518 (unsigned)bit_pos);
519 goto en_done;
520 }
521 /* Set the max packet length, interrupt on Setup and Mult fields */
522 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
523 case USB_ENDPOINT_XFER_BULK:
524 zlt = 1;
525 mult = 0;
526 break;
527 case USB_ENDPOINT_XFER_CONTROL:
528 ios = 1;
529 case USB_ENDPOINT_XFER_INT:
530 mult = 0;
531 break;
532 case USB_ENDPOINT_XFER_ISOC:
533 /* Calculate transactions needed for high bandwidth iso */
534 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
535 max = max & 0x7ff; /* bit 0~10 */
536 /* 3 transactions at most */
537 if (mult > 3)
538 goto en_done;
539 break;
540 default:
541 goto en_done;
542 }
543
544 spin_lock_irqsave(&udc->lock, flags);
545 /* Get the endpoint queue head address */
546 dqh = ep->dqh;
547 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
548 | (mult << EP_QUEUE_HEAD_MULT_POS)
549 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
550 | (ios ? EP_QUEUE_HEAD_IOS : 0);
551 dqh->next_dtd_ptr = 1;
552 dqh->size_ioc_int_sts = 0;
553
554 ep->ep.maxpacket = max;
555 ep->ep.desc = desc;
556 ep->stopped = 0;
557
558 /* Enable the endpoint for Rx or Tx and set the endpoint type */
559 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
560 if (direction == EP_DIR_IN) {
561 epctrlx &= ~EPCTRL_TX_ALL_MASK;
562 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
563 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
564 << EPCTRL_TX_EP_TYPE_SHIFT);
565 } else {
566 epctrlx &= ~EPCTRL_RX_ALL_MASK;
567 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
568 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
569 << EPCTRL_RX_EP_TYPE_SHIFT);
570 }
571 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
572
573 /*
574 * Implement Guideline (GL# USB-7) The unused endpoint type must
575 * be programmed to bulk.
576 */
577 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
578 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
579 epctrlx |= (USB_ENDPOINT_XFER_BULK
580 << EPCTRL_RX_EP_TYPE_SHIFT);
581 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
582 }
583
584 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
585 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
586 epctrlx |= (USB_ENDPOINT_XFER_BULK
587 << EPCTRL_TX_EP_TYPE_SHIFT);
588 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
589 }
590
591 spin_unlock_irqrestore(&udc->lock, flags);
592
593 return 0;
594 en_done:
595 return -EINVAL;
596 }
597
598 static int mv_ep_disable(struct usb_ep *_ep)
599 {
600 struct mv_udc *udc;
601 struct mv_ep *ep;
602 struct mv_dqh *dqh;
603 u32 bit_pos, epctrlx, direction;
604 unsigned long flags;
605
606 ep = container_of(_ep, struct mv_ep, ep);
607 if ((_ep == NULL) || !ep->ep.desc)
608 return -EINVAL;
609
610 udc = ep->udc;
611
612 /* Get the endpoint queue head address */
613 dqh = ep->dqh;
614
615 spin_lock_irqsave(&udc->lock, flags);
616
617 direction = ep_dir(ep);
618 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
619
620 /* Reset the max packet length and the interrupt on Setup */
621 dqh->max_packet_length = 0;
622
623 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
624 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
625 epctrlx &= ~((direction == EP_DIR_IN)
626 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
627 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
628 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
629
630 /* nuke all pending requests (does flush) */
631 nuke(ep, -ESHUTDOWN);
632
633 ep->ep.desc = NULL;
634 ep->stopped = 1;
635
636 spin_unlock_irqrestore(&udc->lock, flags);
637
638 return 0;
639 }
640
641 static struct usb_request *
642 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
643 {
644 struct mv_req *req = NULL;
645
646 req = kzalloc(sizeof *req, gfp_flags);
647 if (!req)
648 return NULL;
649
650 req->req.dma = DMA_ADDR_INVALID;
651 INIT_LIST_HEAD(&req->queue);
652
653 return &req->req;
654 }
655
656 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
657 {
658 struct mv_req *req = NULL;
659
660 req = container_of(_req, struct mv_req, req);
661
662 if (_req)
663 kfree(req);
664 }
665
666 static void mv_ep_fifo_flush(struct usb_ep *_ep)
667 {
668 struct mv_udc *udc;
669 u32 bit_pos, direction;
670 struct mv_ep *ep;
671 unsigned int loops;
672
673 if (!_ep)
674 return;
675
676 ep = container_of(_ep, struct mv_ep, ep);
677 if (!ep->ep.desc)
678 return;
679
680 udc = ep->udc;
681 direction = ep_dir(ep);
682
683 if (ep->ep_num == 0)
684 bit_pos = (1 << 16) | 1;
685 else if (direction == EP_DIR_OUT)
686 bit_pos = 1 << ep->ep_num;
687 else
688 bit_pos = 1 << (16 + ep->ep_num);
689
690 loops = LOOPS(EPSTATUS_TIMEOUT);
691 do {
692 unsigned int inter_loops;
693
694 if (loops == 0) {
695 dev_err(&udc->dev->dev,
696 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
697 (unsigned)readl(&udc->op_regs->epstatus),
698 (unsigned)bit_pos);
699 return;
700 }
701 /* Write 1 to the Flush register */
702 writel(bit_pos, &udc->op_regs->epflush);
703
704 /* Wait until flushing completed */
705 inter_loops = LOOPS(FLUSH_TIMEOUT);
706 while (readl(&udc->op_regs->epflush)) {
707 /*
708 * ENDPTFLUSH bit should be cleared to indicate this
709 * operation is complete
710 */
711 if (inter_loops == 0) {
712 dev_err(&udc->dev->dev,
713 "TIMEOUT for ENDPTFLUSH=0x%x,"
714 "bit_pos=0x%x\n",
715 (unsigned)readl(&udc->op_regs->epflush),
716 (unsigned)bit_pos);
717 return;
718 }
719 inter_loops--;
720 udelay(LOOPS_USEC);
721 }
722 loops--;
723 } while (readl(&udc->op_regs->epstatus) & bit_pos);
724 }
725
726 /* queues (submits) an I/O request to an endpoint */
727 static int
728 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
729 {
730 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
731 struct mv_req *req = container_of(_req, struct mv_req, req);
732 struct mv_udc *udc = ep->udc;
733 unsigned long flags;
734 int retval;
735
736 /* catch various bogus parameters */
737 if (!_req || !req->req.complete || !req->req.buf
738 || !list_empty(&req->queue)) {
739 dev_err(&udc->dev->dev, "%s, bad params", __func__);
740 return -EINVAL;
741 }
742 if (unlikely(!_ep || !ep->ep.desc)) {
743 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
744 return -EINVAL;
745 }
746
747 udc = ep->udc;
748 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
749 return -ESHUTDOWN;
750
751 req->ep = ep;
752
753 /* map virtual address to hardware */
754 retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
755 if (retval)
756 return retval;
757
758 req->req.status = -EINPROGRESS;
759 req->req.actual = 0;
760 req->dtd_count = 0;
761
762 spin_lock_irqsave(&udc->lock, flags);
763
764 /* build dtds and push them to device queue */
765 if (!req_to_dtd(req)) {
766 retval = queue_dtd(ep, req);
767 if (retval) {
768 spin_unlock_irqrestore(&udc->lock, flags);
769 dev_err(&udc->dev->dev, "Failed to queue dtd\n");
770 goto err_unmap_dma;
771 }
772 } else {
773 spin_unlock_irqrestore(&udc->lock, flags);
774 dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
775 retval = -ENOMEM;
776 goto err_unmap_dma;
777 }
778
779 /* Update ep0 state */
780 if (ep->ep_num == 0)
781 udc->ep0_state = DATA_STATE_XMIT;
782
783 /* irq handler advances the queue */
784 list_add_tail(&req->queue, &ep->queue);
785 spin_unlock_irqrestore(&udc->lock, flags);
786
787 return 0;
788
789 err_unmap_dma:
790 usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
791
792 return retval;
793 }
794
795 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
796 {
797 struct mv_dqh *dqh = ep->dqh;
798 u32 bit_pos;
799
800 /* Write dQH next pointer and terminate bit to 0 */
801 dqh->next_dtd_ptr = req->head->td_dma
802 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
803
804 /* clear active and halt bit, in case set from a previous error */
805 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
806
807 /* Ensure that updates to the QH will occure before priming. */
808 wmb();
809
810 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
811
812 /* Prime the Endpoint */
813 writel(bit_pos, &ep->udc->op_regs->epprime);
814 }
815
816 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
817 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
818 {
819 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
820 struct mv_req *req;
821 struct mv_udc *udc = ep->udc;
822 unsigned long flags;
823 int stopped, ret = 0;
824 u32 epctrlx;
825
826 if (!_ep || !_req)
827 return -EINVAL;
828
829 spin_lock_irqsave(&ep->udc->lock, flags);
830 stopped = ep->stopped;
831
832 /* Stop the ep before we deal with the queue */
833 ep->stopped = 1;
834 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
835 if (ep_dir(ep) == EP_DIR_IN)
836 epctrlx &= ~EPCTRL_TX_ENABLE;
837 else
838 epctrlx &= ~EPCTRL_RX_ENABLE;
839 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
840
841 /* make sure it's actually queued on this endpoint */
842 list_for_each_entry(req, &ep->queue, queue) {
843 if (&req->req == _req)
844 break;
845 }
846 if (&req->req != _req) {
847 ret = -EINVAL;
848 goto out;
849 }
850
851 /* The request is in progress, or completed but not dequeued */
852 if (ep->queue.next == &req->queue) {
853 _req->status = -ECONNRESET;
854 mv_ep_fifo_flush(_ep); /* flush current transfer */
855
856 /* The request isn't the last request in this ep queue */
857 if (req->queue.next != &ep->queue) {
858 struct mv_req *next_req;
859
860 next_req = list_entry(req->queue.next,
861 struct mv_req, queue);
862
863 /* Point the QH to the first TD of next request */
864 mv_prime_ep(ep, next_req);
865 } else {
866 struct mv_dqh *qh;
867
868 qh = ep->dqh;
869 qh->next_dtd_ptr = 1;
870 qh->size_ioc_int_sts = 0;
871 }
872
873 /* The request hasn't been processed, patch up the TD chain */
874 } else {
875 struct mv_req *prev_req;
876
877 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
878 writel(readl(&req->tail->dtd_next),
879 &prev_req->tail->dtd_next);
880
881 }
882
883 done(ep, req, -ECONNRESET);
884
885 /* Enable EP */
886 out:
887 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
888 if (ep_dir(ep) == EP_DIR_IN)
889 epctrlx |= EPCTRL_TX_ENABLE;
890 else
891 epctrlx |= EPCTRL_RX_ENABLE;
892 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
893 ep->stopped = stopped;
894
895 spin_unlock_irqrestore(&ep->udc->lock, flags);
896 return ret;
897 }
898
899 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
900 {
901 u32 epctrlx;
902
903 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
904
905 if (stall) {
906 if (direction == EP_DIR_IN)
907 epctrlx |= EPCTRL_TX_EP_STALL;
908 else
909 epctrlx |= EPCTRL_RX_EP_STALL;
910 } else {
911 if (direction == EP_DIR_IN) {
912 epctrlx &= ~EPCTRL_TX_EP_STALL;
913 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
914 } else {
915 epctrlx &= ~EPCTRL_RX_EP_STALL;
916 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
917 }
918 }
919 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
920 }
921
922 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
923 {
924 u32 epctrlx;
925
926 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
927
928 if (direction == EP_DIR_OUT)
929 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
930 else
931 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
932 }
933
934 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
935 {
936 struct mv_ep *ep;
937 unsigned long flags = 0;
938 int status = 0;
939 struct mv_udc *udc;
940
941 ep = container_of(_ep, struct mv_ep, ep);
942 udc = ep->udc;
943 if (!_ep || !ep->ep.desc) {
944 status = -EINVAL;
945 goto out;
946 }
947
948 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
949 status = -EOPNOTSUPP;
950 goto out;
951 }
952
953 /*
954 * Attempt to halt IN ep will fail if any transfer requests
955 * are still queue
956 */
957 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
958 status = -EAGAIN;
959 goto out;
960 }
961
962 spin_lock_irqsave(&ep->udc->lock, flags);
963 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
964 if (halt && wedge)
965 ep->wedge = 1;
966 else if (!halt)
967 ep->wedge = 0;
968 spin_unlock_irqrestore(&ep->udc->lock, flags);
969
970 if (ep->ep_num == 0) {
971 udc->ep0_state = WAIT_FOR_SETUP;
972 udc->ep0_dir = EP_DIR_OUT;
973 }
974 out:
975 return status;
976 }
977
978 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
979 {
980 return mv_ep_set_halt_wedge(_ep, halt, 0);
981 }
982
983 static int mv_ep_set_wedge(struct usb_ep *_ep)
984 {
985 return mv_ep_set_halt_wedge(_ep, 1, 1);
986 }
987
988 static struct usb_ep_ops mv_ep_ops = {
989 .enable = mv_ep_enable,
990 .disable = mv_ep_disable,
991
992 .alloc_request = mv_alloc_request,
993 .free_request = mv_free_request,
994
995 .queue = mv_ep_queue,
996 .dequeue = mv_ep_dequeue,
997
998 .set_wedge = mv_ep_set_wedge,
999 .set_halt = mv_ep_set_halt,
1000 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
1001 };
1002
1003 static void udc_clock_enable(struct mv_udc *udc)
1004 {
1005 clk_prepare_enable(udc->clk);
1006 }
1007
1008 static void udc_clock_disable(struct mv_udc *udc)
1009 {
1010 clk_disable_unprepare(udc->clk);
1011 }
1012
1013 static void udc_stop(struct mv_udc *udc)
1014 {
1015 u32 tmp;
1016
1017 /* Disable interrupts */
1018 tmp = readl(&udc->op_regs->usbintr);
1019 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1020 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1021 writel(tmp, &udc->op_regs->usbintr);
1022
1023 udc->stopped = 1;
1024
1025 /* Reset the Run the bit in the command register to stop VUSB */
1026 tmp = readl(&udc->op_regs->usbcmd);
1027 tmp &= ~USBCMD_RUN_STOP;
1028 writel(tmp, &udc->op_regs->usbcmd);
1029 }
1030
1031 static void udc_start(struct mv_udc *udc)
1032 {
1033 u32 usbintr;
1034
1035 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1036 | USBINTR_PORT_CHANGE_DETECT_EN
1037 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1038 /* Enable interrupts */
1039 writel(usbintr, &udc->op_regs->usbintr);
1040
1041 udc->stopped = 0;
1042
1043 /* Set the Run bit in the command register */
1044 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1045 }
1046
1047 static int udc_reset(struct mv_udc *udc)
1048 {
1049 unsigned int loops;
1050 u32 tmp, portsc;
1051
1052 /* Stop the controller */
1053 tmp = readl(&udc->op_regs->usbcmd);
1054 tmp &= ~USBCMD_RUN_STOP;
1055 writel(tmp, &udc->op_regs->usbcmd);
1056
1057 /* Reset the controller to get default values */
1058 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1059
1060 /* wait for reset to complete */
1061 loops = LOOPS(RESET_TIMEOUT);
1062 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1063 if (loops == 0) {
1064 dev_err(&udc->dev->dev,
1065 "Wait for RESET completed TIMEOUT\n");
1066 return -ETIMEDOUT;
1067 }
1068 loops--;
1069 udelay(LOOPS_USEC);
1070 }
1071
1072 /* set controller to device mode */
1073 tmp = readl(&udc->op_regs->usbmode);
1074 tmp |= USBMODE_CTRL_MODE_DEVICE;
1075
1076 /* turn setup lockout off, require setup tripwire in usbcmd */
1077 tmp |= USBMODE_SETUP_LOCK_OFF;
1078
1079 writel(tmp, &udc->op_regs->usbmode);
1080
1081 writel(0x0, &udc->op_regs->epsetupstat);
1082
1083 /* Configure the Endpoint List Address */
1084 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1085 &udc->op_regs->eplistaddr);
1086
1087 portsc = readl(&udc->op_regs->portsc[0]);
1088 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1089 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1090
1091 if (udc->force_fs)
1092 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1093 else
1094 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1095
1096 writel(portsc, &udc->op_regs->portsc[0]);
1097
1098 tmp = readl(&udc->op_regs->epctrlx[0]);
1099 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1100 writel(tmp, &udc->op_regs->epctrlx[0]);
1101
1102 return 0;
1103 }
1104
1105 static int mv_udc_enable_internal(struct mv_udc *udc)
1106 {
1107 int retval;
1108
1109 if (udc->active)
1110 return 0;
1111
1112 dev_dbg(&udc->dev->dev, "enable udc\n");
1113 udc_clock_enable(udc);
1114 if (udc->pdata->phy_init) {
1115 retval = udc->pdata->phy_init(udc->phy_regs);
1116 if (retval) {
1117 dev_err(&udc->dev->dev,
1118 "init phy error %d\n", retval);
1119 udc_clock_disable(udc);
1120 return retval;
1121 }
1122 }
1123 udc->active = 1;
1124
1125 return 0;
1126 }
1127
1128 static int mv_udc_enable(struct mv_udc *udc)
1129 {
1130 if (udc->clock_gating)
1131 return mv_udc_enable_internal(udc);
1132
1133 return 0;
1134 }
1135
1136 static void mv_udc_disable_internal(struct mv_udc *udc)
1137 {
1138 if (udc->active) {
1139 dev_dbg(&udc->dev->dev, "disable udc\n");
1140 if (udc->pdata->phy_deinit)
1141 udc->pdata->phy_deinit(udc->phy_regs);
1142 udc_clock_disable(udc);
1143 udc->active = 0;
1144 }
1145 }
1146
1147 static void mv_udc_disable(struct mv_udc *udc)
1148 {
1149 if (udc->clock_gating)
1150 mv_udc_disable_internal(udc);
1151 }
1152
1153 static int mv_udc_get_frame(struct usb_gadget *gadget)
1154 {
1155 struct mv_udc *udc;
1156 u16 retval;
1157
1158 if (!gadget)
1159 return -ENODEV;
1160
1161 udc = container_of(gadget, struct mv_udc, gadget);
1162
1163 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1164
1165 return retval;
1166 }
1167
1168 /* Tries to wake up the host connected to this gadget */
1169 static int mv_udc_wakeup(struct usb_gadget *gadget)
1170 {
1171 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1172 u32 portsc;
1173
1174 /* Remote wakeup feature not enabled by host */
1175 if (!udc->remote_wakeup)
1176 return -ENOTSUPP;
1177
1178 portsc = readl(&udc->op_regs->portsc);
1179 /* not suspended? */
1180 if (!(portsc & PORTSCX_PORT_SUSPEND))
1181 return 0;
1182 /* trigger force resume */
1183 portsc |= PORTSCX_PORT_FORCE_RESUME;
1184 writel(portsc, &udc->op_regs->portsc[0]);
1185 return 0;
1186 }
1187
1188 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1189 {
1190 struct mv_udc *udc;
1191 unsigned long flags;
1192 int retval = 0;
1193
1194 udc = container_of(gadget, struct mv_udc, gadget);
1195 spin_lock_irqsave(&udc->lock, flags);
1196
1197 udc->vbus_active = (is_active != 0);
1198
1199 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1200 __func__, udc->softconnect, udc->vbus_active);
1201
1202 if (udc->driver && udc->softconnect && udc->vbus_active) {
1203 retval = mv_udc_enable(udc);
1204 if (retval == 0) {
1205 /* Clock is disabled, need re-init registers */
1206 udc_reset(udc);
1207 ep0_reset(udc);
1208 udc_start(udc);
1209 }
1210 } else if (udc->driver && udc->softconnect) {
1211 if (!udc->active)
1212 goto out;
1213
1214 /* stop all the transfer in queue*/
1215 stop_activity(udc, udc->driver);
1216 udc_stop(udc);
1217 mv_udc_disable(udc);
1218 }
1219
1220 out:
1221 spin_unlock_irqrestore(&udc->lock, flags);
1222 return retval;
1223 }
1224
1225 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1226 {
1227 struct mv_udc *udc;
1228 unsigned long flags;
1229 int retval = 0;
1230
1231 udc = container_of(gadget, struct mv_udc, gadget);
1232 spin_lock_irqsave(&udc->lock, flags);
1233
1234 udc->softconnect = (is_on != 0);
1235
1236 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1237 __func__, udc->softconnect, udc->vbus_active);
1238
1239 if (udc->driver && udc->softconnect && udc->vbus_active) {
1240 retval = mv_udc_enable(udc);
1241 if (retval == 0) {
1242 /* Clock is disabled, need re-init registers */
1243 udc_reset(udc);
1244 ep0_reset(udc);
1245 udc_start(udc);
1246 }
1247 } else if (udc->driver && udc->vbus_active) {
1248 /* stop all the transfer in queue*/
1249 stop_activity(udc, udc->driver);
1250 udc_stop(udc);
1251 mv_udc_disable(udc);
1252 }
1253
1254 spin_unlock_irqrestore(&udc->lock, flags);
1255 return retval;
1256 }
1257
1258 static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1259 static int mv_udc_stop(struct usb_gadget *);
1260 /* device controller usb_gadget_ops structure */
1261 static const struct usb_gadget_ops mv_ops = {
1262
1263 /* returns the current frame number */
1264 .get_frame = mv_udc_get_frame,
1265
1266 /* tries to wake up the host connected to this gadget */
1267 .wakeup = mv_udc_wakeup,
1268
1269 /* notify controller that VBUS is powered or not */
1270 .vbus_session = mv_udc_vbus_session,
1271
1272 /* D+ pullup, software-controlled connect/disconnect to USB host */
1273 .pullup = mv_udc_pullup,
1274 .udc_start = mv_udc_start,
1275 .udc_stop = mv_udc_stop,
1276 };
1277
1278 static int eps_init(struct mv_udc *udc)
1279 {
1280 struct mv_ep *ep;
1281 char name[14];
1282 int i;
1283
1284 /* initialize ep0 */
1285 ep = &udc->eps[0];
1286 ep->udc = udc;
1287 strncpy(ep->name, "ep0", sizeof(ep->name));
1288 ep->ep.name = ep->name;
1289 ep->ep.ops = &mv_ep_ops;
1290 ep->wedge = 0;
1291 ep->stopped = 0;
1292 usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1293 ep->ep_num = 0;
1294 ep->ep.desc = &mv_ep0_desc;
1295 INIT_LIST_HEAD(&ep->queue);
1296
1297 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1298
1299 /* initialize other endpoints */
1300 for (i = 2; i < udc->max_eps * 2; i++) {
1301 ep = &udc->eps[i];
1302 if (i % 2) {
1303 snprintf(name, sizeof(name), "ep%din", i / 2);
1304 ep->direction = EP_DIR_IN;
1305 } else {
1306 snprintf(name, sizeof(name), "ep%dout", i / 2);
1307 ep->direction = EP_DIR_OUT;
1308 }
1309 ep->udc = udc;
1310 strncpy(ep->name, name, sizeof(ep->name));
1311 ep->ep.name = ep->name;
1312
1313 ep->ep.ops = &mv_ep_ops;
1314 ep->stopped = 0;
1315 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1316 ep->ep_num = i / 2;
1317
1318 INIT_LIST_HEAD(&ep->queue);
1319 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1320
1321 ep->dqh = &udc->ep_dqh[i];
1322 }
1323
1324 return 0;
1325 }
1326
1327 /* delete all endpoint requests, called with spinlock held */
1328 static void nuke(struct mv_ep *ep, int status)
1329 {
1330 /* called with spinlock held */
1331 ep->stopped = 1;
1332
1333 /* endpoint fifo flush */
1334 mv_ep_fifo_flush(&ep->ep);
1335
1336 while (!list_empty(&ep->queue)) {
1337 struct mv_req *req = NULL;
1338 req = list_entry(ep->queue.next, struct mv_req, queue);
1339 done(ep, req, status);
1340 }
1341 }
1342
1343 static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1344 {
1345 struct mv_ep *ep;
1346
1347 nuke(&udc->eps[0], -ESHUTDOWN);
1348
1349 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1350 nuke(ep, -ESHUTDOWN);
1351 }
1352
1353 /* report reset; the driver is already quiesced */
1354 if (driver) {
1355 spin_unlock(&udc->lock);
1356 usb_gadget_udc_reset(&udc->gadget, driver);
1357 spin_lock(&udc->lock);
1358 }
1359 }
1360 /* stop all USB activities */
1361 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1362 {
1363 struct mv_ep *ep;
1364
1365 nuke(&udc->eps[0], -ESHUTDOWN);
1366
1367 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1368 nuke(ep, -ESHUTDOWN);
1369 }
1370
1371 /* report disconnect; the driver is already quiesced */
1372 if (driver) {
1373 spin_unlock(&udc->lock);
1374 driver->disconnect(&udc->gadget);
1375 spin_lock(&udc->lock);
1376 }
1377 }
1378
1379 static int mv_udc_start(struct usb_gadget *gadget,
1380 struct usb_gadget_driver *driver)
1381 {
1382 struct mv_udc *udc;
1383 int retval = 0;
1384 unsigned long flags;
1385
1386 udc = container_of(gadget, struct mv_udc, gadget);
1387
1388 if (udc->driver)
1389 return -EBUSY;
1390
1391 spin_lock_irqsave(&udc->lock, flags);
1392
1393 /* hook up the driver ... */
1394 driver->driver.bus = NULL;
1395 udc->driver = driver;
1396
1397 udc->usb_state = USB_STATE_ATTACHED;
1398 udc->ep0_state = WAIT_FOR_SETUP;
1399 udc->ep0_dir = EP_DIR_OUT;
1400
1401 spin_unlock_irqrestore(&udc->lock, flags);
1402
1403 if (udc->transceiver) {
1404 retval = otg_set_peripheral(udc->transceiver->otg,
1405 &udc->gadget);
1406 if (retval) {
1407 dev_err(&udc->dev->dev,
1408 "unable to register peripheral to otg\n");
1409 udc->driver = NULL;
1410 return retval;
1411 }
1412 }
1413
1414 /* When boot with cable attached, there will be no vbus irq occurred */
1415 if (udc->qwork)
1416 queue_work(udc->qwork, &udc->vbus_work);
1417
1418 return 0;
1419 }
1420
1421 static int mv_udc_stop(struct usb_gadget *gadget)
1422 {
1423 struct mv_udc *udc;
1424 unsigned long flags;
1425
1426 udc = container_of(gadget, struct mv_udc, gadget);
1427
1428 spin_lock_irqsave(&udc->lock, flags);
1429
1430 mv_udc_enable(udc);
1431 udc_stop(udc);
1432
1433 /* stop all usb activities */
1434 udc->gadget.speed = USB_SPEED_UNKNOWN;
1435 stop_activity(udc, NULL);
1436 mv_udc_disable(udc);
1437
1438 spin_unlock_irqrestore(&udc->lock, flags);
1439
1440 /* unbind gadget driver */
1441 udc->driver = NULL;
1442
1443 return 0;
1444 }
1445
1446 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1447 {
1448 u32 portsc;
1449
1450 portsc = readl(&udc->op_regs->portsc[0]);
1451 portsc |= mode << 16;
1452 writel(portsc, &udc->op_regs->portsc[0]);
1453 }
1454
1455 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1456 {
1457 struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1458 struct mv_req *req = container_of(_req, struct mv_req, req);
1459 struct mv_udc *udc;
1460 unsigned long flags;
1461
1462 udc = mvep->udc;
1463
1464 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1465
1466 spin_lock_irqsave(&udc->lock, flags);
1467 if (req->test_mode) {
1468 mv_set_ptc(udc, req->test_mode);
1469 req->test_mode = 0;
1470 }
1471 spin_unlock_irqrestore(&udc->lock, flags);
1472 }
1473
1474 static int
1475 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1476 {
1477 int retval = 0;
1478 struct mv_req *req;
1479 struct mv_ep *ep;
1480
1481 ep = &udc->eps[0];
1482 udc->ep0_dir = direction;
1483 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1484
1485 req = udc->status_req;
1486
1487 /* fill in the reqest structure */
1488 if (empty == false) {
1489 *((u16 *) req->req.buf) = cpu_to_le16(status);
1490 req->req.length = 2;
1491 } else
1492 req->req.length = 0;
1493
1494 req->ep = ep;
1495 req->req.status = -EINPROGRESS;
1496 req->req.actual = 0;
1497 if (udc->test_mode) {
1498 req->req.complete = prime_status_complete;
1499 req->test_mode = udc->test_mode;
1500 udc->test_mode = 0;
1501 } else
1502 req->req.complete = NULL;
1503 req->dtd_count = 0;
1504
1505 if (req->req.dma == DMA_ADDR_INVALID) {
1506 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1507 req->req.buf, req->req.length,
1508 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1509 req->mapped = 1;
1510 }
1511
1512 /* prime the data phase */
1513 if (!req_to_dtd(req)) {
1514 retval = queue_dtd(ep, req);
1515 if (retval) {
1516 dev_err(&udc->dev->dev,
1517 "Failed to queue dtd when prime status\n");
1518 goto out;
1519 }
1520 } else{ /* no mem */
1521 retval = -ENOMEM;
1522 dev_err(&udc->dev->dev,
1523 "Failed to dma_pool_alloc when prime status\n");
1524 goto out;
1525 }
1526
1527 list_add_tail(&req->queue, &ep->queue);
1528
1529 return 0;
1530 out:
1531 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1532
1533 return retval;
1534 }
1535
1536 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1537 {
1538 if (index <= TEST_FORCE_EN) {
1539 udc->test_mode = index;
1540 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1541 ep0_stall(udc);
1542 } else
1543 dev_err(&udc->dev->dev,
1544 "This test mode(%d) is not supported\n", index);
1545 }
1546
1547 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1548 {
1549 udc->dev_addr = (u8)setup->wValue;
1550
1551 /* update usb state */
1552 udc->usb_state = USB_STATE_ADDRESS;
1553
1554 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1555 ep0_stall(udc);
1556 }
1557
1558 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1559 struct usb_ctrlrequest *setup)
1560 {
1561 u16 status = 0;
1562 int retval;
1563
1564 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1565 != (USB_DIR_IN | USB_TYPE_STANDARD))
1566 return;
1567
1568 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1569 status = 1 << USB_DEVICE_SELF_POWERED;
1570 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1571 } else if ((setup->bRequestType & USB_RECIP_MASK)
1572 == USB_RECIP_INTERFACE) {
1573 /* get interface status */
1574 status = 0;
1575 } else if ((setup->bRequestType & USB_RECIP_MASK)
1576 == USB_RECIP_ENDPOINT) {
1577 u8 ep_num, direction;
1578
1579 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1580 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1581 ? EP_DIR_IN : EP_DIR_OUT;
1582 status = ep_is_stall(udc, ep_num, direction)
1583 << USB_ENDPOINT_HALT;
1584 }
1585
1586 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1587 if (retval)
1588 ep0_stall(udc);
1589 else
1590 udc->ep0_state = DATA_STATE_XMIT;
1591 }
1592
1593 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1594 {
1595 u8 ep_num;
1596 u8 direction;
1597 struct mv_ep *ep;
1598
1599 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1600 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1601 switch (setup->wValue) {
1602 case USB_DEVICE_REMOTE_WAKEUP:
1603 udc->remote_wakeup = 0;
1604 break;
1605 default:
1606 goto out;
1607 }
1608 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1609 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1610 switch (setup->wValue) {
1611 case USB_ENDPOINT_HALT:
1612 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1613 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1614 ? EP_DIR_IN : EP_DIR_OUT;
1615 if (setup->wValue != 0 || setup->wLength != 0
1616 || ep_num > udc->max_eps)
1617 goto out;
1618 ep = &udc->eps[ep_num * 2 + direction];
1619 if (ep->wedge == 1)
1620 break;
1621 spin_unlock(&udc->lock);
1622 ep_set_stall(udc, ep_num, direction, 0);
1623 spin_lock(&udc->lock);
1624 break;
1625 default:
1626 goto out;
1627 }
1628 } else
1629 goto out;
1630
1631 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1632 ep0_stall(udc);
1633 out:
1634 return;
1635 }
1636
1637 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1638 {
1639 u8 ep_num;
1640 u8 direction;
1641
1642 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1643 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1644 switch (setup->wValue) {
1645 case USB_DEVICE_REMOTE_WAKEUP:
1646 udc->remote_wakeup = 1;
1647 break;
1648 case USB_DEVICE_TEST_MODE:
1649 if (setup->wIndex & 0xFF
1650 || udc->gadget.speed != USB_SPEED_HIGH)
1651 ep0_stall(udc);
1652
1653 if (udc->usb_state != USB_STATE_CONFIGURED
1654 && udc->usb_state != USB_STATE_ADDRESS
1655 && udc->usb_state != USB_STATE_DEFAULT)
1656 ep0_stall(udc);
1657
1658 mv_udc_testmode(udc, (setup->wIndex >> 8));
1659 goto out;
1660 default:
1661 goto out;
1662 }
1663 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1664 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1665 switch (setup->wValue) {
1666 case USB_ENDPOINT_HALT:
1667 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1668 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1669 ? EP_DIR_IN : EP_DIR_OUT;
1670 if (setup->wValue != 0 || setup->wLength != 0
1671 || ep_num > udc->max_eps)
1672 goto out;
1673 spin_unlock(&udc->lock);
1674 ep_set_stall(udc, ep_num, direction, 1);
1675 spin_lock(&udc->lock);
1676 break;
1677 default:
1678 goto out;
1679 }
1680 } else
1681 goto out;
1682
1683 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1684 ep0_stall(udc);
1685 out:
1686 return;
1687 }
1688
1689 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1690 struct usb_ctrlrequest *setup)
1691 __releases(&ep->udc->lock)
1692 __acquires(&ep->udc->lock)
1693 {
1694 bool delegate = false;
1695
1696 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1697
1698 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1699 setup->bRequestType, setup->bRequest,
1700 setup->wValue, setup->wIndex, setup->wLength);
1701 /* We process some standard setup requests here */
1702 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1703 switch (setup->bRequest) {
1704 case USB_REQ_GET_STATUS:
1705 ch9getstatus(udc, ep_num, setup);
1706 break;
1707
1708 case USB_REQ_SET_ADDRESS:
1709 ch9setaddress(udc, setup);
1710 break;
1711
1712 case USB_REQ_CLEAR_FEATURE:
1713 ch9clearfeature(udc, setup);
1714 break;
1715
1716 case USB_REQ_SET_FEATURE:
1717 ch9setfeature(udc, setup);
1718 break;
1719
1720 default:
1721 delegate = true;
1722 }
1723 } else
1724 delegate = true;
1725
1726 /* delegate USB standard requests to the gadget driver */
1727 if (delegate == true) {
1728 /* USB requests handled by gadget */
1729 if (setup->wLength) {
1730 /* DATA phase from gadget, STATUS phase from udc */
1731 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1732 ? EP_DIR_IN : EP_DIR_OUT;
1733 spin_unlock(&udc->lock);
1734 if (udc->driver->setup(&udc->gadget,
1735 &udc->local_setup_buff) < 0)
1736 ep0_stall(udc);
1737 spin_lock(&udc->lock);
1738 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1739 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1740 } else {
1741 /* no DATA phase, IN STATUS phase from gadget */
1742 udc->ep0_dir = EP_DIR_IN;
1743 spin_unlock(&udc->lock);
1744 if (udc->driver->setup(&udc->gadget,
1745 &udc->local_setup_buff) < 0)
1746 ep0_stall(udc);
1747 spin_lock(&udc->lock);
1748 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1749 }
1750 }
1751 }
1752
1753 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
1754 static void ep0_req_complete(struct mv_udc *udc,
1755 struct mv_ep *ep0, struct mv_req *req)
1756 {
1757 u32 new_addr;
1758
1759 if (udc->usb_state == USB_STATE_ADDRESS) {
1760 /* set the new address */
1761 new_addr = (u32)udc->dev_addr;
1762 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1763 &udc->op_regs->deviceaddr);
1764 }
1765
1766 done(ep0, req, 0);
1767
1768 switch (udc->ep0_state) {
1769 case DATA_STATE_XMIT:
1770 /* receive status phase */
1771 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1772 ep0_stall(udc);
1773 break;
1774 case DATA_STATE_RECV:
1775 /* send status phase */
1776 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1777 ep0_stall(udc);
1778 break;
1779 case WAIT_FOR_OUT_STATUS:
1780 udc->ep0_state = WAIT_FOR_SETUP;
1781 break;
1782 case WAIT_FOR_SETUP:
1783 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1784 break;
1785 default:
1786 ep0_stall(udc);
1787 break;
1788 }
1789 }
1790
1791 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1792 {
1793 u32 temp;
1794 struct mv_dqh *dqh;
1795
1796 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1797
1798 /* Clear bit in ENDPTSETUPSTAT */
1799 writel((1 << ep_num), &udc->op_regs->epsetupstat);
1800
1801 /* while a hazard exists when setup package arrives */
1802 do {
1803 /* Set Setup Tripwire */
1804 temp = readl(&udc->op_regs->usbcmd);
1805 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1806
1807 /* Copy the setup packet to local buffer */
1808 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1809 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1810
1811 /* Clear Setup Tripwire */
1812 temp = readl(&udc->op_regs->usbcmd);
1813 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1814 }
1815
1816 static void irq_process_tr_complete(struct mv_udc *udc)
1817 {
1818 u32 tmp, bit_pos;
1819 int i, ep_num = 0, direction = 0;
1820 struct mv_ep *curr_ep;
1821 struct mv_req *curr_req, *temp_req;
1822 int status;
1823
1824 /*
1825 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1826 * because the setup packets are to be read ASAP
1827 */
1828
1829 /* Process all Setup packet received interrupts */
1830 tmp = readl(&udc->op_regs->epsetupstat);
1831
1832 if (tmp) {
1833 for (i = 0; i < udc->max_eps; i++) {
1834 if (tmp & (1 << i)) {
1835 get_setup_data(udc, i,
1836 (u8 *)(&udc->local_setup_buff));
1837 handle_setup_packet(udc, i,
1838 &udc->local_setup_buff);
1839 }
1840 }
1841 }
1842
1843 /* Don't clear the endpoint setup status register here.
1844 * It is cleared as a setup packet is read out of the buffer
1845 */
1846
1847 /* Process non-setup transaction complete interrupts */
1848 tmp = readl(&udc->op_regs->epcomplete);
1849
1850 if (!tmp)
1851 return;
1852
1853 writel(tmp, &udc->op_regs->epcomplete);
1854
1855 for (i = 0; i < udc->max_eps * 2; i++) {
1856 ep_num = i >> 1;
1857 direction = i % 2;
1858
1859 bit_pos = 1 << (ep_num + 16 * direction);
1860
1861 if (!(bit_pos & tmp))
1862 continue;
1863
1864 if (i == 1)
1865 curr_ep = &udc->eps[0];
1866 else
1867 curr_ep = &udc->eps[i];
1868 /* process the req queue until an uncomplete request */
1869 list_for_each_entry_safe(curr_req, temp_req,
1870 &curr_ep->queue, queue) {
1871 status = process_ep_req(udc, i, curr_req);
1872 if (status)
1873 break;
1874
1875 /* write back status to req */
1876 curr_req->req.status = status;
1877
1878 /* ep0 request completion */
1879 if (ep_num == 0) {
1880 ep0_req_complete(udc, curr_ep, curr_req);
1881 break;
1882 } else {
1883 done(curr_ep, curr_req, status);
1884 }
1885 }
1886 }
1887 }
1888
1889 static void irq_process_reset(struct mv_udc *udc)
1890 {
1891 u32 tmp;
1892 unsigned int loops;
1893
1894 udc->ep0_dir = EP_DIR_OUT;
1895 udc->ep0_state = WAIT_FOR_SETUP;
1896 udc->remote_wakeup = 0; /* default to 0 on reset */
1897
1898 /* The address bits are past bit 25-31. Set the address */
1899 tmp = readl(&udc->op_regs->deviceaddr);
1900 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1901 writel(tmp, &udc->op_regs->deviceaddr);
1902
1903 /* Clear all the setup token semaphores */
1904 tmp = readl(&udc->op_regs->epsetupstat);
1905 writel(tmp, &udc->op_regs->epsetupstat);
1906
1907 /* Clear all the endpoint complete status bits */
1908 tmp = readl(&udc->op_regs->epcomplete);
1909 writel(tmp, &udc->op_regs->epcomplete);
1910
1911 /* wait until all endptprime bits cleared */
1912 loops = LOOPS(PRIME_TIMEOUT);
1913 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1914 if (loops == 0) {
1915 dev_err(&udc->dev->dev,
1916 "Timeout for ENDPTPRIME = 0x%x\n",
1917 readl(&udc->op_regs->epprime));
1918 break;
1919 }
1920 loops--;
1921 udelay(LOOPS_USEC);
1922 }
1923
1924 /* Write 1s to the Flush register */
1925 writel((u32)~0, &udc->op_regs->epflush);
1926
1927 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1928 dev_info(&udc->dev->dev, "usb bus reset\n");
1929 udc->usb_state = USB_STATE_DEFAULT;
1930 /* reset all the queues, stop all USB activities */
1931 gadget_reset(udc, udc->driver);
1932 } else {
1933 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1934 readl(&udc->op_regs->portsc));
1935
1936 /*
1937 * re-initialize
1938 * controller reset
1939 */
1940 udc_reset(udc);
1941
1942 /* reset all the queues, stop all USB activities */
1943 stop_activity(udc, udc->driver);
1944
1945 /* reset ep0 dQH and endptctrl */
1946 ep0_reset(udc);
1947
1948 /* enable interrupt and set controller to run state */
1949 udc_start(udc);
1950
1951 udc->usb_state = USB_STATE_ATTACHED;
1952 }
1953 }
1954
1955 static void handle_bus_resume(struct mv_udc *udc)
1956 {
1957 udc->usb_state = udc->resume_state;
1958 udc->resume_state = 0;
1959
1960 /* report resume to the driver */
1961 if (udc->driver) {
1962 if (udc->driver->resume) {
1963 spin_unlock(&udc->lock);
1964 udc->driver->resume(&udc->gadget);
1965 spin_lock(&udc->lock);
1966 }
1967 }
1968 }
1969
1970 static void irq_process_suspend(struct mv_udc *udc)
1971 {
1972 udc->resume_state = udc->usb_state;
1973 udc->usb_state = USB_STATE_SUSPENDED;
1974
1975 if (udc->driver->suspend) {
1976 spin_unlock(&udc->lock);
1977 udc->driver->suspend(&udc->gadget);
1978 spin_lock(&udc->lock);
1979 }
1980 }
1981
1982 static void irq_process_port_change(struct mv_udc *udc)
1983 {
1984 u32 portsc;
1985
1986 portsc = readl(&udc->op_regs->portsc[0]);
1987 if (!(portsc & PORTSCX_PORT_RESET)) {
1988 /* Get the speed */
1989 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1990 switch (speed) {
1991 case PORTSCX_PORT_SPEED_HIGH:
1992 udc->gadget.speed = USB_SPEED_HIGH;
1993 break;
1994 case PORTSCX_PORT_SPEED_FULL:
1995 udc->gadget.speed = USB_SPEED_FULL;
1996 break;
1997 case PORTSCX_PORT_SPEED_LOW:
1998 udc->gadget.speed = USB_SPEED_LOW;
1999 break;
2000 default:
2001 udc->gadget.speed = USB_SPEED_UNKNOWN;
2002 break;
2003 }
2004 }
2005
2006 if (portsc & PORTSCX_PORT_SUSPEND) {
2007 udc->resume_state = udc->usb_state;
2008 udc->usb_state = USB_STATE_SUSPENDED;
2009 if (udc->driver->suspend) {
2010 spin_unlock(&udc->lock);
2011 udc->driver->suspend(&udc->gadget);
2012 spin_lock(&udc->lock);
2013 }
2014 }
2015
2016 if (!(portsc & PORTSCX_PORT_SUSPEND)
2017 && udc->usb_state == USB_STATE_SUSPENDED) {
2018 handle_bus_resume(udc);
2019 }
2020
2021 if (!udc->resume_state)
2022 udc->usb_state = USB_STATE_DEFAULT;
2023 }
2024
2025 static void irq_process_error(struct mv_udc *udc)
2026 {
2027 /* Increment the error count */
2028 udc->errors++;
2029 }
2030
2031 static irqreturn_t mv_udc_irq(int irq, void *dev)
2032 {
2033 struct mv_udc *udc = (struct mv_udc *)dev;
2034 u32 status, intr;
2035
2036 /* Disable ISR when stopped bit is set */
2037 if (udc->stopped)
2038 return IRQ_NONE;
2039
2040 spin_lock(&udc->lock);
2041
2042 status = readl(&udc->op_regs->usbsts);
2043 intr = readl(&udc->op_regs->usbintr);
2044 status &= intr;
2045
2046 if (status == 0) {
2047 spin_unlock(&udc->lock);
2048 return IRQ_NONE;
2049 }
2050
2051 /* Clear all the interrupts occurred */
2052 writel(status, &udc->op_regs->usbsts);
2053
2054 if (status & USBSTS_ERR)
2055 irq_process_error(udc);
2056
2057 if (status & USBSTS_RESET)
2058 irq_process_reset(udc);
2059
2060 if (status & USBSTS_PORT_CHANGE)
2061 irq_process_port_change(udc);
2062
2063 if (status & USBSTS_INT)
2064 irq_process_tr_complete(udc);
2065
2066 if (status & USBSTS_SUSPEND)
2067 irq_process_suspend(udc);
2068
2069 spin_unlock(&udc->lock);
2070
2071 return IRQ_HANDLED;
2072 }
2073
2074 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2075 {
2076 struct mv_udc *udc = (struct mv_udc *)dev;
2077
2078 /* polling VBUS and init phy may cause too much time*/
2079 if (udc->qwork)
2080 queue_work(udc->qwork, &udc->vbus_work);
2081
2082 return IRQ_HANDLED;
2083 }
2084
2085 static void mv_udc_vbus_work(struct work_struct *work)
2086 {
2087 struct mv_udc *udc;
2088 unsigned int vbus;
2089
2090 udc = container_of(work, struct mv_udc, vbus_work);
2091 if (!udc->pdata->vbus)
2092 return;
2093
2094 vbus = udc->pdata->vbus->poll();
2095 dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2096
2097 if (vbus == VBUS_HIGH)
2098 mv_udc_vbus_session(&udc->gadget, 1);
2099 else if (vbus == VBUS_LOW)
2100 mv_udc_vbus_session(&udc->gadget, 0);
2101 }
2102
2103 /* release device structure */
2104 static void gadget_release(struct device *_dev)
2105 {
2106 struct mv_udc *udc;
2107
2108 udc = dev_get_drvdata(_dev);
2109
2110 complete(udc->done);
2111 }
2112
2113 static int mv_udc_remove(struct platform_device *pdev)
2114 {
2115 struct mv_udc *udc;
2116
2117 udc = platform_get_drvdata(pdev);
2118
2119 usb_del_gadget_udc(&udc->gadget);
2120
2121 if (udc->qwork) {
2122 flush_workqueue(udc->qwork);
2123 destroy_workqueue(udc->qwork);
2124 }
2125
2126 /* free memory allocated in probe */
2127 if (udc->dtd_pool)
2128 dma_pool_destroy(udc->dtd_pool);
2129
2130 if (udc->ep_dqh)
2131 dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2132 udc->ep_dqh, udc->ep_dqh_dma);
2133
2134 mv_udc_disable(udc);
2135
2136 /* free dev, wait for the release() finished */
2137 wait_for_completion(udc->done);
2138
2139 return 0;
2140 }
2141
2142 static int mv_udc_probe(struct platform_device *pdev)
2143 {
2144 struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2145 struct mv_udc *udc;
2146 int retval = 0;
2147 struct resource *r;
2148 size_t size;
2149
2150 if (pdata == NULL) {
2151 dev_err(&pdev->dev, "missing platform_data\n");
2152 return -ENODEV;
2153 }
2154
2155 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2156 if (udc == NULL)
2157 return -ENOMEM;
2158
2159 udc->done = &release_done;
2160 udc->pdata = dev_get_platdata(&pdev->dev);
2161 spin_lock_init(&udc->lock);
2162
2163 udc->dev = pdev;
2164
2165 if (pdata->mode == MV_USB_MODE_OTG) {
2166 udc->transceiver = devm_usb_get_phy(&pdev->dev,
2167 USB_PHY_TYPE_USB2);
2168 if (IS_ERR(udc->transceiver)) {
2169 retval = PTR_ERR(udc->transceiver);
2170
2171 if (retval == -ENXIO)
2172 return retval;
2173
2174 udc->transceiver = NULL;
2175 return -EPROBE_DEFER;
2176 }
2177 }
2178
2179 /* udc only have one sysclk. */
2180 udc->clk = devm_clk_get(&pdev->dev, NULL);
2181 if (IS_ERR(udc->clk))
2182 return PTR_ERR(udc->clk);
2183
2184 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2185 if (r == NULL) {
2186 dev_err(&pdev->dev, "no I/O memory resource defined\n");
2187 return -ENODEV;
2188 }
2189
2190 udc->cap_regs = (struct mv_cap_regs __iomem *)
2191 devm_ioremap(&pdev->dev, r->start, resource_size(r));
2192 if (udc->cap_regs == NULL) {
2193 dev_err(&pdev->dev, "failed to map I/O memory\n");
2194 return -EBUSY;
2195 }
2196
2197 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2198 if (r == NULL) {
2199 dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2200 return -ENODEV;
2201 }
2202
2203 udc->phy_regs = ioremap(r->start, resource_size(r));
2204 if (udc->phy_regs == NULL) {
2205 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2206 return -EBUSY;
2207 }
2208
2209 /* we will acces controller register, so enable the clk */
2210 retval = mv_udc_enable_internal(udc);
2211 if (retval)
2212 return retval;
2213
2214 udc->op_regs =
2215 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2216 + (readl(&udc->cap_regs->caplength_hciversion)
2217 & CAPLENGTH_MASK));
2218 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2219
2220 /*
2221 * some platform will use usb to download image, it may not disconnect
2222 * usb gadget before loading kernel. So first stop udc here.
2223 */
2224 udc_stop(udc);
2225 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2226
2227 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2228 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2229 udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2230 &udc->ep_dqh_dma, GFP_KERNEL);
2231
2232 if (udc->ep_dqh == NULL) {
2233 dev_err(&pdev->dev, "allocate dQH memory failed\n");
2234 retval = -ENOMEM;
2235 goto err_disable_clock;
2236 }
2237 udc->ep_dqh_size = size;
2238
2239 /* create dTD dma_pool resource */
2240 udc->dtd_pool = dma_pool_create("mv_dtd",
2241 &pdev->dev,
2242 sizeof(struct mv_dtd),
2243 DTD_ALIGNMENT,
2244 DMA_BOUNDARY);
2245
2246 if (!udc->dtd_pool) {
2247 retval = -ENOMEM;
2248 goto err_free_dma;
2249 }
2250
2251 size = udc->max_eps * sizeof(struct mv_ep) *2;
2252 udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2253 if (udc->eps == NULL) {
2254 retval = -ENOMEM;
2255 goto err_destroy_dma;
2256 }
2257
2258 /* initialize ep0 status request structure */
2259 udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2260 GFP_KERNEL);
2261 if (!udc->status_req) {
2262 retval = -ENOMEM;
2263 goto err_destroy_dma;
2264 }
2265 INIT_LIST_HEAD(&udc->status_req->queue);
2266
2267 /* allocate a small amount of memory to get valid address */
2268 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2269 udc->status_req->req.dma = DMA_ADDR_INVALID;
2270
2271 udc->resume_state = USB_STATE_NOTATTACHED;
2272 udc->usb_state = USB_STATE_POWERED;
2273 udc->ep0_dir = EP_DIR_OUT;
2274 udc->remote_wakeup = 0;
2275
2276 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2277 if (r == NULL) {
2278 dev_err(&pdev->dev, "no IRQ resource defined\n");
2279 retval = -ENODEV;
2280 goto err_destroy_dma;
2281 }
2282 udc->irq = r->start;
2283 if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2284 IRQF_SHARED, driver_name, udc)) {
2285 dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2286 udc->irq);
2287 retval = -ENODEV;
2288 goto err_destroy_dma;
2289 }
2290
2291 /* initialize gadget structure */
2292 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2293 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2294 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2295 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2296 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
2297
2298 /* the "gadget" abstracts/virtualizes the controller */
2299 udc->gadget.name = driver_name; /* gadget name */
2300
2301 eps_init(udc);
2302
2303 /* VBUS detect: we can disable/enable clock on demand.*/
2304 if (udc->transceiver)
2305 udc->clock_gating = 1;
2306 else if (pdata->vbus) {
2307 udc->clock_gating = 1;
2308 retval = devm_request_threaded_irq(&pdev->dev,
2309 pdata->vbus->irq, NULL,
2310 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2311 if (retval) {
2312 dev_info(&pdev->dev,
2313 "Can not request irq for VBUS, "
2314 "disable clock gating\n");
2315 udc->clock_gating = 0;
2316 }
2317
2318 udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2319 if (!udc->qwork) {
2320 dev_err(&pdev->dev, "cannot create workqueue\n");
2321 retval = -ENOMEM;
2322 goto err_destroy_dma;
2323 }
2324
2325 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2326 }
2327
2328 /*
2329 * When clock gating is supported, we can disable clk and phy.
2330 * If not, it means that VBUS detection is not supported, we
2331 * have to enable vbus active all the time to let controller work.
2332 */
2333 if (udc->clock_gating)
2334 mv_udc_disable_internal(udc);
2335 else
2336 udc->vbus_active = 1;
2337
2338 retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2339 gadget_release);
2340 if (retval)
2341 goto err_create_workqueue;
2342
2343 platform_set_drvdata(pdev, udc);
2344 dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2345 udc->clock_gating ? "with" : "without");
2346
2347 return 0;
2348
2349 err_create_workqueue:
2350 destroy_workqueue(udc->qwork);
2351 err_destroy_dma:
2352 dma_pool_destroy(udc->dtd_pool);
2353 err_free_dma:
2354 dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2355 udc->ep_dqh, udc->ep_dqh_dma);
2356 err_disable_clock:
2357 mv_udc_disable_internal(udc);
2358
2359 return retval;
2360 }
2361
2362 #ifdef CONFIG_PM
2363 static int mv_udc_suspend(struct device *dev)
2364 {
2365 struct mv_udc *udc;
2366
2367 udc = dev_get_drvdata(dev);
2368
2369 /* if OTG is enabled, the following will be done in OTG driver*/
2370 if (udc->transceiver)
2371 return 0;
2372
2373 if (udc->pdata->vbus && udc->pdata->vbus->poll)
2374 if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2375 dev_info(&udc->dev->dev, "USB cable is connected!\n");
2376 return -EAGAIN;
2377 }
2378
2379 /*
2380 * only cable is unplugged, udc can suspend.
2381 * So do not care about clock_gating == 1.
2382 */
2383 if (!udc->clock_gating) {
2384 udc_stop(udc);
2385
2386 spin_lock_irq(&udc->lock);
2387 /* stop all usb activities */
2388 stop_activity(udc, udc->driver);
2389 spin_unlock_irq(&udc->lock);
2390
2391 mv_udc_disable_internal(udc);
2392 }
2393
2394 return 0;
2395 }
2396
2397 static int mv_udc_resume(struct device *dev)
2398 {
2399 struct mv_udc *udc;
2400 int retval;
2401
2402 udc = dev_get_drvdata(dev);
2403
2404 /* if OTG is enabled, the following will be done in OTG driver*/
2405 if (udc->transceiver)
2406 return 0;
2407
2408 if (!udc->clock_gating) {
2409 retval = mv_udc_enable_internal(udc);
2410 if (retval)
2411 return retval;
2412
2413 if (udc->driver && udc->softconnect) {
2414 udc_reset(udc);
2415 ep0_reset(udc);
2416 udc_start(udc);
2417 }
2418 }
2419
2420 return 0;
2421 }
2422
2423 static const struct dev_pm_ops mv_udc_pm_ops = {
2424 .suspend = mv_udc_suspend,
2425 .resume = mv_udc_resume,
2426 };
2427 #endif
2428
2429 static void mv_udc_shutdown(struct platform_device *pdev)
2430 {
2431 struct mv_udc *udc;
2432 u32 mode;
2433
2434 udc = platform_get_drvdata(pdev);
2435 /* reset controller mode to IDLE */
2436 mv_udc_enable(udc);
2437 mode = readl(&udc->op_regs->usbmode);
2438 mode &= ~3;
2439 writel(mode, &udc->op_regs->usbmode);
2440 mv_udc_disable(udc);
2441 }
2442
2443 static struct platform_driver udc_driver = {
2444 .probe = mv_udc_probe,
2445 .remove = mv_udc_remove,
2446 .shutdown = mv_udc_shutdown,
2447 .driver = {
2448 .name = "mv-udc",
2449 #ifdef CONFIG_PM
2450 .pm = &mv_udc_pm_ops,
2451 #endif
2452 },
2453 };
2454
2455 module_platform_driver(udc_driver);
2456 MODULE_ALIAS("platform:mv-udc");
2457 MODULE_DESCRIPTION(DRIVER_DESC);
2458 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2459 MODULE_VERSION(DRIVER_VERSION);
2460 MODULE_LICENSE("GPL");
2461
2462
2463
2464
2465
2466 /* LDV_COMMENT_BEGIN_MAIN */
2467 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2468
2469 /*###########################################################################*/
2470
2471 /*############## Driver Environment Generator 0.2 output ####################*/
2472
2473 /*###########################################################################*/
2474
2475
2476
2477 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2478 void ldv_check_final_state(void);
2479
2480 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2481 void ldv_check_return_value(int res);
2482
2483 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2484 void ldv_check_return_value_probe(int res);
2485
2486 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2487 void ldv_initialize(void);
2488
2489 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2490 void ldv_handler_precall(void);
2491
2492 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2493 int nondet_int(void);
2494
2495 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2496 int LDV_IN_INTERRUPT;
2497
2498 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2499 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2500
2501
2502
2503 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2504 /*============================= VARIABLE DECLARATION PART =============================*/
2505 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
2506 /* content: static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
2507 /* LDV_COMMENT_BEGIN_PREP */
2508 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2509 #define DRIVER_VERSION "8 Nov 2010"
2510 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2511 ((ep)->udc->ep0_dir) : ((ep)->direction))
2512 #define RESET_TIMEOUT 10000
2513 #define FLUSH_TIMEOUT 10000
2514 #define EPSTATUS_TIMEOUT 10000
2515 #define PRIME_TIMEOUT 10000
2516 #define READSAFE_TIMEOUT 1000
2517 #define LOOPS_USEC_SHIFT 1
2518 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2519 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2520 /* LDV_COMMENT_END_PREP */
2521 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_enable" */
2522 struct usb_ep * var_group1;
2523 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_enable" */
2524 const struct usb_endpoint_descriptor * var_mv_ep_enable_6_p1;
2525 /* LDV_COMMENT_BEGIN_PREP */
2526 #ifdef CONFIG_PM
2527 #endif
2528 #ifdef CONFIG_PM
2529 #endif
2530 /* LDV_COMMENT_END_PREP */
2531 /* content: static int mv_ep_disable(struct usb_ep *_ep)*/
2532 /* LDV_COMMENT_BEGIN_PREP */
2533 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2534 #define DRIVER_VERSION "8 Nov 2010"
2535 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2536 ((ep)->udc->ep0_dir) : ((ep)->direction))
2537 #define RESET_TIMEOUT 10000
2538 #define FLUSH_TIMEOUT 10000
2539 #define EPSTATUS_TIMEOUT 10000
2540 #define PRIME_TIMEOUT 10000
2541 #define READSAFE_TIMEOUT 1000
2542 #define LOOPS_USEC_SHIFT 1
2543 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2544 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2545 /* LDV_COMMENT_END_PREP */
2546 /* LDV_COMMENT_BEGIN_PREP */
2547 #ifdef CONFIG_PM
2548 #endif
2549 #ifdef CONFIG_PM
2550 #endif
2551 /* LDV_COMMENT_END_PREP */
2552 /* content: static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
2553 /* LDV_COMMENT_BEGIN_PREP */
2554 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2555 #define DRIVER_VERSION "8 Nov 2010"
2556 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2557 ((ep)->udc->ep0_dir) : ((ep)->direction))
2558 #define RESET_TIMEOUT 10000
2559 #define FLUSH_TIMEOUT 10000
2560 #define EPSTATUS_TIMEOUT 10000
2561 #define PRIME_TIMEOUT 10000
2562 #define READSAFE_TIMEOUT 1000
2563 #define LOOPS_USEC_SHIFT 1
2564 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2565 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2566 /* LDV_COMMENT_END_PREP */
2567 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_alloc_request" */
2568 gfp_t var_mv_alloc_request_8_p1;
2569 /* LDV_COMMENT_BEGIN_PREP */
2570 #ifdef CONFIG_PM
2571 #endif
2572 #ifdef CONFIG_PM
2573 #endif
2574 /* LDV_COMMENT_END_PREP */
2575 /* content: static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
2576 /* LDV_COMMENT_BEGIN_PREP */
2577 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2578 #define DRIVER_VERSION "8 Nov 2010"
2579 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2580 ((ep)->udc->ep0_dir) : ((ep)->direction))
2581 #define RESET_TIMEOUT 10000
2582 #define FLUSH_TIMEOUT 10000
2583 #define EPSTATUS_TIMEOUT 10000
2584 #define PRIME_TIMEOUT 10000
2585 #define READSAFE_TIMEOUT 1000
2586 #define LOOPS_USEC_SHIFT 1
2587 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2588 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2589 /* LDV_COMMENT_END_PREP */
2590 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_free_request" */
2591 struct usb_request * var_group2;
2592 /* LDV_COMMENT_BEGIN_PREP */
2593 #ifdef CONFIG_PM
2594 #endif
2595 #ifdef CONFIG_PM
2596 #endif
2597 /* LDV_COMMENT_END_PREP */
2598 /* content: static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
2599 /* LDV_COMMENT_BEGIN_PREP */
2600 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2601 #define DRIVER_VERSION "8 Nov 2010"
2602 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2603 ((ep)->udc->ep0_dir) : ((ep)->direction))
2604 #define RESET_TIMEOUT 10000
2605 #define FLUSH_TIMEOUT 10000
2606 #define EPSTATUS_TIMEOUT 10000
2607 #define PRIME_TIMEOUT 10000
2608 #define READSAFE_TIMEOUT 1000
2609 #define LOOPS_USEC_SHIFT 1
2610 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2611 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2612 /* LDV_COMMENT_END_PREP */
2613 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_queue" */
2614 gfp_t var_mv_ep_queue_11_p2;
2615 /* LDV_COMMENT_BEGIN_PREP */
2616 #ifdef CONFIG_PM
2617 #endif
2618 #ifdef CONFIG_PM
2619 #endif
2620 /* LDV_COMMENT_END_PREP */
2621 /* content: static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
2622 /* LDV_COMMENT_BEGIN_PREP */
2623 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2624 #define DRIVER_VERSION "8 Nov 2010"
2625 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2626 ((ep)->udc->ep0_dir) : ((ep)->direction))
2627 #define RESET_TIMEOUT 10000
2628 #define FLUSH_TIMEOUT 10000
2629 #define EPSTATUS_TIMEOUT 10000
2630 #define PRIME_TIMEOUT 10000
2631 #define READSAFE_TIMEOUT 1000
2632 #define LOOPS_USEC_SHIFT 1
2633 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2634 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2635 /* LDV_COMMENT_END_PREP */
2636 /* LDV_COMMENT_BEGIN_PREP */
2637 #ifdef CONFIG_PM
2638 #endif
2639 #ifdef CONFIG_PM
2640 #endif
2641 /* LDV_COMMENT_END_PREP */
2642 /* content: static int mv_ep_set_wedge(struct usb_ep *_ep)*/
2643 /* LDV_COMMENT_BEGIN_PREP */
2644 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2645 #define DRIVER_VERSION "8 Nov 2010"
2646 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2647 ((ep)->udc->ep0_dir) : ((ep)->direction))
2648 #define RESET_TIMEOUT 10000
2649 #define FLUSH_TIMEOUT 10000
2650 #define EPSTATUS_TIMEOUT 10000
2651 #define PRIME_TIMEOUT 10000
2652 #define READSAFE_TIMEOUT 1000
2653 #define LOOPS_USEC_SHIFT 1
2654 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2655 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2656 /* LDV_COMMENT_END_PREP */
2657 /* LDV_COMMENT_BEGIN_PREP */
2658 #ifdef CONFIG_PM
2659 #endif
2660 #ifdef CONFIG_PM
2661 #endif
2662 /* LDV_COMMENT_END_PREP */
2663 /* content: static int mv_ep_set_halt(struct usb_ep *_ep, int halt)*/
2664 /* LDV_COMMENT_BEGIN_PREP */
2665 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2666 #define DRIVER_VERSION "8 Nov 2010"
2667 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2668 ((ep)->udc->ep0_dir) : ((ep)->direction))
2669 #define RESET_TIMEOUT 10000
2670 #define FLUSH_TIMEOUT 10000
2671 #define EPSTATUS_TIMEOUT 10000
2672 #define PRIME_TIMEOUT 10000
2673 #define READSAFE_TIMEOUT 1000
2674 #define LOOPS_USEC_SHIFT 1
2675 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2676 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2677 /* LDV_COMMENT_END_PREP */
2678 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_ep_set_halt" */
2679 int var_mv_ep_set_halt_17_p1;
2680 /* LDV_COMMENT_BEGIN_PREP */
2681 #ifdef CONFIG_PM
2682 #endif
2683 #ifdef CONFIG_PM
2684 #endif
2685 /* LDV_COMMENT_END_PREP */
2686 /* content: static void mv_ep_fifo_flush(struct usb_ep *_ep)*/
2687 /* LDV_COMMENT_BEGIN_PREP */
2688 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2689 #define DRIVER_VERSION "8 Nov 2010"
2690 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2691 ((ep)->udc->ep0_dir) : ((ep)->direction))
2692 #define RESET_TIMEOUT 10000
2693 #define FLUSH_TIMEOUT 10000
2694 #define EPSTATUS_TIMEOUT 10000
2695 #define PRIME_TIMEOUT 10000
2696 #define READSAFE_TIMEOUT 1000
2697 #define LOOPS_USEC_SHIFT 1
2698 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2699 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2700 /* LDV_COMMENT_END_PREP */
2701 /* LDV_COMMENT_BEGIN_PREP */
2702 #ifdef CONFIG_PM
2703 #endif
2704 #ifdef CONFIG_PM
2705 #endif
2706 /* LDV_COMMENT_END_PREP */
2707
2708 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
2709 /* content: static int mv_udc_get_frame(struct usb_gadget *gadget)*/
2710 /* LDV_COMMENT_BEGIN_PREP */
2711 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2712 #define DRIVER_VERSION "8 Nov 2010"
2713 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2714 ((ep)->udc->ep0_dir) : ((ep)->direction))
2715 #define RESET_TIMEOUT 10000
2716 #define FLUSH_TIMEOUT 10000
2717 #define EPSTATUS_TIMEOUT 10000
2718 #define PRIME_TIMEOUT 10000
2719 #define READSAFE_TIMEOUT 1000
2720 #define LOOPS_USEC_SHIFT 1
2721 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2722 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2723 /* LDV_COMMENT_END_PREP */
2724 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_get_frame" */
2725 struct usb_gadget * var_group3;
2726 /* LDV_COMMENT_BEGIN_PREP */
2727 #ifdef CONFIG_PM
2728 #endif
2729 #ifdef CONFIG_PM
2730 #endif
2731 /* LDV_COMMENT_END_PREP */
2732 /* content: static int mv_udc_wakeup(struct usb_gadget *gadget)*/
2733 /* LDV_COMMENT_BEGIN_PREP */
2734 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2735 #define DRIVER_VERSION "8 Nov 2010"
2736 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2737 ((ep)->udc->ep0_dir) : ((ep)->direction))
2738 #define RESET_TIMEOUT 10000
2739 #define FLUSH_TIMEOUT 10000
2740 #define EPSTATUS_TIMEOUT 10000
2741 #define PRIME_TIMEOUT 10000
2742 #define READSAFE_TIMEOUT 1000
2743 #define LOOPS_USEC_SHIFT 1
2744 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2745 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2746 /* LDV_COMMENT_END_PREP */
2747 /* LDV_COMMENT_BEGIN_PREP */
2748 #ifdef CONFIG_PM
2749 #endif
2750 #ifdef CONFIG_PM
2751 #endif
2752 /* LDV_COMMENT_END_PREP */
2753 /* content: static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)*/
2754 /* LDV_COMMENT_BEGIN_PREP */
2755 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2756 #define DRIVER_VERSION "8 Nov 2010"
2757 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2758 ((ep)->udc->ep0_dir) : ((ep)->direction))
2759 #define RESET_TIMEOUT 10000
2760 #define FLUSH_TIMEOUT 10000
2761 #define EPSTATUS_TIMEOUT 10000
2762 #define PRIME_TIMEOUT 10000
2763 #define READSAFE_TIMEOUT 1000
2764 #define LOOPS_USEC_SHIFT 1
2765 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2766 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2767 /* LDV_COMMENT_END_PREP */
2768 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_vbus_session" */
2769 int var_mv_udc_vbus_session_30_p1;
2770 /* LDV_COMMENT_BEGIN_PREP */
2771 #ifdef CONFIG_PM
2772 #endif
2773 #ifdef CONFIG_PM
2774 #endif
2775 /* LDV_COMMENT_END_PREP */
2776 /* content: static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)*/
2777 /* LDV_COMMENT_BEGIN_PREP */
2778 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2779 #define DRIVER_VERSION "8 Nov 2010"
2780 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2781 ((ep)->udc->ep0_dir) : ((ep)->direction))
2782 #define RESET_TIMEOUT 10000
2783 #define FLUSH_TIMEOUT 10000
2784 #define EPSTATUS_TIMEOUT 10000
2785 #define PRIME_TIMEOUT 10000
2786 #define READSAFE_TIMEOUT 1000
2787 #define LOOPS_USEC_SHIFT 1
2788 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2789 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2790 /* LDV_COMMENT_END_PREP */
2791 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_pullup" */
2792 int var_mv_udc_pullup_31_p1;
2793 /* LDV_COMMENT_BEGIN_PREP */
2794 #ifdef CONFIG_PM
2795 #endif
2796 #ifdef CONFIG_PM
2797 #endif
2798 /* LDV_COMMENT_END_PREP */
2799 /* content: static int mv_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver)*/
2800 /* LDV_COMMENT_BEGIN_PREP */
2801 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2802 #define DRIVER_VERSION "8 Nov 2010"
2803 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2804 ((ep)->udc->ep0_dir) : ((ep)->direction))
2805 #define RESET_TIMEOUT 10000
2806 #define FLUSH_TIMEOUT 10000
2807 #define EPSTATUS_TIMEOUT 10000
2808 #define PRIME_TIMEOUT 10000
2809 #define READSAFE_TIMEOUT 1000
2810 #define LOOPS_USEC_SHIFT 1
2811 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2812 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2813 /* LDV_COMMENT_END_PREP */
2814 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_start" */
2815 struct usb_gadget_driver * var_group4;
2816 /* LDV_COMMENT_BEGIN_PREP */
2817 #ifdef CONFIG_PM
2818 #endif
2819 #ifdef CONFIG_PM
2820 #endif
2821 /* LDV_COMMENT_END_PREP */
2822 /* content: static int mv_udc_stop(struct usb_gadget *gadget)*/
2823 /* LDV_COMMENT_BEGIN_PREP */
2824 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2825 #define DRIVER_VERSION "8 Nov 2010"
2826 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2827 ((ep)->udc->ep0_dir) : ((ep)->direction))
2828 #define RESET_TIMEOUT 10000
2829 #define FLUSH_TIMEOUT 10000
2830 #define EPSTATUS_TIMEOUT 10000
2831 #define PRIME_TIMEOUT 10000
2832 #define READSAFE_TIMEOUT 1000
2833 #define LOOPS_USEC_SHIFT 1
2834 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2835 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2836 /* LDV_COMMENT_END_PREP */
2837 /* LDV_COMMENT_BEGIN_PREP */
2838 #ifdef CONFIG_PM
2839 #endif
2840 #ifdef CONFIG_PM
2841 #endif
2842 /* LDV_COMMENT_END_PREP */
2843
2844 /** STRUCT: struct type: dev_pm_ops, struct name: mv_udc_pm_ops **/
2845 /* content: static int mv_udc_suspend(struct device *dev)*/
2846 /* LDV_COMMENT_BEGIN_PREP */
2847 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2848 #define DRIVER_VERSION "8 Nov 2010"
2849 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2850 ((ep)->udc->ep0_dir) : ((ep)->direction))
2851 #define RESET_TIMEOUT 10000
2852 #define FLUSH_TIMEOUT 10000
2853 #define EPSTATUS_TIMEOUT 10000
2854 #define PRIME_TIMEOUT 10000
2855 #define READSAFE_TIMEOUT 1000
2856 #define LOOPS_USEC_SHIFT 1
2857 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2858 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2859 #ifdef CONFIG_PM
2860 /* LDV_COMMENT_END_PREP */
2861 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_suspend" */
2862 struct device * var_group5;
2863 /* LDV_COMMENT_BEGIN_PREP */
2864 #endif
2865 #ifdef CONFIG_PM
2866 #endif
2867 /* LDV_COMMENT_END_PREP */
2868 /* content: static int mv_udc_resume(struct device *dev)*/
2869 /* LDV_COMMENT_BEGIN_PREP */
2870 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2871 #define DRIVER_VERSION "8 Nov 2010"
2872 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2873 ((ep)->udc->ep0_dir) : ((ep)->direction))
2874 #define RESET_TIMEOUT 10000
2875 #define FLUSH_TIMEOUT 10000
2876 #define EPSTATUS_TIMEOUT 10000
2877 #define PRIME_TIMEOUT 10000
2878 #define READSAFE_TIMEOUT 1000
2879 #define LOOPS_USEC_SHIFT 1
2880 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2881 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2882 #ifdef CONFIG_PM
2883 /* LDV_COMMENT_END_PREP */
2884 /* LDV_COMMENT_BEGIN_PREP */
2885 #endif
2886 #ifdef CONFIG_PM
2887 #endif
2888 /* LDV_COMMENT_END_PREP */
2889
2890 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
2891 /* content: static int mv_udc_probe(struct platform_device *pdev)*/
2892 /* LDV_COMMENT_BEGIN_PREP */
2893 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2894 #define DRIVER_VERSION "8 Nov 2010"
2895 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2896 ((ep)->udc->ep0_dir) : ((ep)->direction))
2897 #define RESET_TIMEOUT 10000
2898 #define FLUSH_TIMEOUT 10000
2899 #define EPSTATUS_TIMEOUT 10000
2900 #define PRIME_TIMEOUT 10000
2901 #define READSAFE_TIMEOUT 1000
2902 #define LOOPS_USEC_SHIFT 1
2903 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2904 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2905 /* LDV_COMMENT_END_PREP */
2906 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_probe" */
2907 struct platform_device * var_group6;
2908 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mv_udc_probe" */
2909 static int res_mv_udc_probe_59;
2910 /* LDV_COMMENT_BEGIN_PREP */
2911 #ifdef CONFIG_PM
2912 #endif
2913 #ifdef CONFIG_PM
2914 #endif
2915 /* LDV_COMMENT_END_PREP */
2916 /* content: static int mv_udc_remove(struct platform_device *pdev)*/
2917 /* LDV_COMMENT_BEGIN_PREP */
2918 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2919 #define DRIVER_VERSION "8 Nov 2010"
2920 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2921 ((ep)->udc->ep0_dir) : ((ep)->direction))
2922 #define RESET_TIMEOUT 10000
2923 #define FLUSH_TIMEOUT 10000
2924 #define EPSTATUS_TIMEOUT 10000
2925 #define PRIME_TIMEOUT 10000
2926 #define READSAFE_TIMEOUT 1000
2927 #define LOOPS_USEC_SHIFT 1
2928 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2929 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2930 /* LDV_COMMENT_END_PREP */
2931 /* LDV_COMMENT_BEGIN_PREP */
2932 #ifdef CONFIG_PM
2933 #endif
2934 #ifdef CONFIG_PM
2935 #endif
2936 /* LDV_COMMENT_END_PREP */
2937 /* content: static void mv_udc_shutdown(struct platform_device *pdev)*/
2938 /* LDV_COMMENT_BEGIN_PREP */
2939 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2940 #define DRIVER_VERSION "8 Nov 2010"
2941 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2942 ((ep)->udc->ep0_dir) : ((ep)->direction))
2943 #define RESET_TIMEOUT 10000
2944 #define FLUSH_TIMEOUT 10000
2945 #define EPSTATUS_TIMEOUT 10000
2946 #define PRIME_TIMEOUT 10000
2947 #define READSAFE_TIMEOUT 1000
2948 #define LOOPS_USEC_SHIFT 1
2949 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2950 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2951 #ifdef CONFIG_PM
2952 #endif
2953 /* LDV_COMMENT_END_PREP */
2954 /* LDV_COMMENT_BEGIN_PREP */
2955 #ifdef CONFIG_PM
2956 #endif
2957 /* LDV_COMMENT_END_PREP */
2958
2959 /** CALLBACK SECTION request_irq **/
2960 /* content: static irqreturn_t mv_udc_irq(int irq, void *dev)*/
2961 /* LDV_COMMENT_BEGIN_PREP */
2962 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2963 #define DRIVER_VERSION "8 Nov 2010"
2964 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2965 ((ep)->udc->ep0_dir) : ((ep)->direction))
2966 #define RESET_TIMEOUT 10000
2967 #define FLUSH_TIMEOUT 10000
2968 #define EPSTATUS_TIMEOUT 10000
2969 #define PRIME_TIMEOUT 10000
2970 #define READSAFE_TIMEOUT 1000
2971 #define LOOPS_USEC_SHIFT 1
2972 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2973 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2974 /* LDV_COMMENT_END_PREP */
2975 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_irq" */
2976 int var_mv_udc_irq_54_p0;
2977 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_irq" */
2978 void * var_mv_udc_irq_54_p1;
2979 /* LDV_COMMENT_BEGIN_PREP */
2980 #ifdef CONFIG_PM
2981 #endif
2982 #ifdef CONFIG_PM
2983 #endif
2984 /* LDV_COMMENT_END_PREP */
2985 /* content: static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)*/
2986 /* LDV_COMMENT_BEGIN_PREP */
2987 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
2988 #define DRIVER_VERSION "8 Nov 2010"
2989 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
2990 ((ep)->udc->ep0_dir) : ((ep)->direction))
2991 #define RESET_TIMEOUT 10000
2992 #define FLUSH_TIMEOUT 10000
2993 #define EPSTATUS_TIMEOUT 10000
2994 #define PRIME_TIMEOUT 10000
2995 #define READSAFE_TIMEOUT 1000
2996 #define LOOPS_USEC_SHIFT 1
2997 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
2998 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
2999 /* LDV_COMMENT_END_PREP */
3000 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_vbus_irq" */
3001 int var_mv_udc_vbus_irq_55_p0;
3002 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_udc_vbus_irq" */
3003 void * var_mv_udc_vbus_irq_55_p1;
3004 /* LDV_COMMENT_BEGIN_PREP */
3005 #ifdef CONFIG_PM
3006 #endif
3007 #ifdef CONFIG_PM
3008 #endif
3009 /* LDV_COMMENT_END_PREP */
3010
3011
3012
3013
3014 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
3015 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
3016 /*============================= VARIABLE INITIALIZING PART =============================*/
3017 LDV_IN_INTERRUPT=1;
3018
3019
3020
3021
3022 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
3023 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
3024 /*============================= FUNCTION CALL SECTION =============================*/
3025 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
3026 ldv_initialize();
3027
3028
3029
3030
3031
3032
3033 int ldv_s_udc_driver_platform_driver = 0;
3034
3035
3036
3037
3038 while( nondet_int()
3039 || !(ldv_s_udc_driver_platform_driver == 0)
3040 ) {
3041
3042 switch(nondet_int()) {
3043
3044 case 0: {
3045
3046 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3047
3048
3049 /* content: static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
3050 /* LDV_COMMENT_BEGIN_PREP */
3051 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3052 #define DRIVER_VERSION "8 Nov 2010"
3053 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3054 ((ep)->udc->ep0_dir) : ((ep)->direction))
3055 #define RESET_TIMEOUT 10000
3056 #define FLUSH_TIMEOUT 10000
3057 #define EPSTATUS_TIMEOUT 10000
3058 #define PRIME_TIMEOUT 10000
3059 #define READSAFE_TIMEOUT 1000
3060 #define LOOPS_USEC_SHIFT 1
3061 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3062 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3063 /* LDV_COMMENT_END_PREP */
3064 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "mv_ep_ops" */
3065 ldv_handler_precall();
3066 mv_ep_enable( var_group1, var_mv_ep_enable_6_p1);
3067 /* LDV_COMMENT_BEGIN_PREP */
3068 #ifdef CONFIG_PM
3069 #endif
3070 #ifdef CONFIG_PM
3071 #endif
3072 /* LDV_COMMENT_END_PREP */
3073
3074
3075
3076
3077 }
3078
3079 break;
3080 case 1: {
3081
3082 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3083
3084
3085 /* content: static int mv_ep_disable(struct usb_ep *_ep)*/
3086 /* LDV_COMMENT_BEGIN_PREP */
3087 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3088 #define DRIVER_VERSION "8 Nov 2010"
3089 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3090 ((ep)->udc->ep0_dir) : ((ep)->direction))
3091 #define RESET_TIMEOUT 10000
3092 #define FLUSH_TIMEOUT 10000
3093 #define EPSTATUS_TIMEOUT 10000
3094 #define PRIME_TIMEOUT 10000
3095 #define READSAFE_TIMEOUT 1000
3096 #define LOOPS_USEC_SHIFT 1
3097 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3098 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3099 /* LDV_COMMENT_END_PREP */
3100 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "mv_ep_ops" */
3101 ldv_handler_precall();
3102 mv_ep_disable( var_group1);
3103 /* LDV_COMMENT_BEGIN_PREP */
3104 #ifdef CONFIG_PM
3105 #endif
3106 #ifdef CONFIG_PM
3107 #endif
3108 /* LDV_COMMENT_END_PREP */
3109
3110
3111
3112
3113 }
3114
3115 break;
3116 case 2: {
3117
3118 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3119
3120
3121 /* content: static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
3122 /* LDV_COMMENT_BEGIN_PREP */
3123 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3124 #define DRIVER_VERSION "8 Nov 2010"
3125 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3126 ((ep)->udc->ep0_dir) : ((ep)->direction))
3127 #define RESET_TIMEOUT 10000
3128 #define FLUSH_TIMEOUT 10000
3129 #define EPSTATUS_TIMEOUT 10000
3130 #define PRIME_TIMEOUT 10000
3131 #define READSAFE_TIMEOUT 1000
3132 #define LOOPS_USEC_SHIFT 1
3133 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3134 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3135 /* LDV_COMMENT_END_PREP */
3136 /* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "mv_ep_ops" */
3137 ldv_handler_precall();
3138 mv_alloc_request( var_group1, var_mv_alloc_request_8_p1);
3139 /* LDV_COMMENT_BEGIN_PREP */
3140 #ifdef CONFIG_PM
3141 #endif
3142 #ifdef CONFIG_PM
3143 #endif
3144 /* LDV_COMMENT_END_PREP */
3145
3146
3147
3148
3149 }
3150
3151 break;
3152 case 3: {
3153
3154 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3155
3156
3157 /* content: static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
3158 /* LDV_COMMENT_BEGIN_PREP */
3159 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3160 #define DRIVER_VERSION "8 Nov 2010"
3161 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3162 ((ep)->udc->ep0_dir) : ((ep)->direction))
3163 #define RESET_TIMEOUT 10000
3164 #define FLUSH_TIMEOUT 10000
3165 #define EPSTATUS_TIMEOUT 10000
3166 #define PRIME_TIMEOUT 10000
3167 #define READSAFE_TIMEOUT 1000
3168 #define LOOPS_USEC_SHIFT 1
3169 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3170 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3171 /* LDV_COMMENT_END_PREP */
3172 /* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "mv_ep_ops" */
3173 ldv_handler_precall();
3174 mv_free_request( var_group1, var_group2);
3175 /* LDV_COMMENT_BEGIN_PREP */
3176 #ifdef CONFIG_PM
3177 #endif
3178 #ifdef CONFIG_PM
3179 #endif
3180 /* LDV_COMMENT_END_PREP */
3181
3182
3183
3184
3185 }
3186
3187 break;
3188 case 4: {
3189
3190 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3191
3192
3193 /* content: static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
3194 /* LDV_COMMENT_BEGIN_PREP */
3195 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3196 #define DRIVER_VERSION "8 Nov 2010"
3197 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3198 ((ep)->udc->ep0_dir) : ((ep)->direction))
3199 #define RESET_TIMEOUT 10000
3200 #define FLUSH_TIMEOUT 10000
3201 #define EPSTATUS_TIMEOUT 10000
3202 #define PRIME_TIMEOUT 10000
3203 #define READSAFE_TIMEOUT 1000
3204 #define LOOPS_USEC_SHIFT 1
3205 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3206 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3207 /* LDV_COMMENT_END_PREP */
3208 /* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "mv_ep_ops" */
3209 ldv_handler_precall();
3210 mv_ep_queue( var_group1, var_group2, var_mv_ep_queue_11_p2);
3211 /* LDV_COMMENT_BEGIN_PREP */
3212 #ifdef CONFIG_PM
3213 #endif
3214 #ifdef CONFIG_PM
3215 #endif
3216 /* LDV_COMMENT_END_PREP */
3217
3218
3219
3220
3221 }
3222
3223 break;
3224 case 5: {
3225
3226 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3227
3228
3229 /* content: static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
3230 /* LDV_COMMENT_BEGIN_PREP */
3231 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3232 #define DRIVER_VERSION "8 Nov 2010"
3233 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3234 ((ep)->udc->ep0_dir) : ((ep)->direction))
3235 #define RESET_TIMEOUT 10000
3236 #define FLUSH_TIMEOUT 10000
3237 #define EPSTATUS_TIMEOUT 10000
3238 #define PRIME_TIMEOUT 10000
3239 #define READSAFE_TIMEOUT 1000
3240 #define LOOPS_USEC_SHIFT 1
3241 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3242 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3243 /* LDV_COMMENT_END_PREP */
3244 /* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "mv_ep_ops" */
3245 ldv_handler_precall();
3246 mv_ep_dequeue( var_group1, var_group2);
3247 /* LDV_COMMENT_BEGIN_PREP */
3248 #ifdef CONFIG_PM
3249 #endif
3250 #ifdef CONFIG_PM
3251 #endif
3252 /* LDV_COMMENT_END_PREP */
3253
3254
3255
3256
3257 }
3258
3259 break;
3260 case 6: {
3261
3262 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3263
3264
3265 /* content: static int mv_ep_set_wedge(struct usb_ep *_ep)*/
3266 /* LDV_COMMENT_BEGIN_PREP */
3267 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3268 #define DRIVER_VERSION "8 Nov 2010"
3269 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3270 ((ep)->udc->ep0_dir) : ((ep)->direction))
3271 #define RESET_TIMEOUT 10000
3272 #define FLUSH_TIMEOUT 10000
3273 #define EPSTATUS_TIMEOUT 10000
3274 #define PRIME_TIMEOUT 10000
3275 #define READSAFE_TIMEOUT 1000
3276 #define LOOPS_USEC_SHIFT 1
3277 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3278 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3279 /* LDV_COMMENT_END_PREP */
3280 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "mv_ep_ops" */
3281 ldv_handler_precall();
3282 mv_ep_set_wedge( var_group1);
3283 /* LDV_COMMENT_BEGIN_PREP */
3284 #ifdef CONFIG_PM
3285 #endif
3286 #ifdef CONFIG_PM
3287 #endif
3288 /* LDV_COMMENT_END_PREP */
3289
3290
3291
3292
3293 }
3294
3295 break;
3296 case 7: {
3297
3298 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3299
3300
3301 /* content: static int mv_ep_set_halt(struct usb_ep *_ep, int halt)*/
3302 /* LDV_COMMENT_BEGIN_PREP */
3303 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3304 #define DRIVER_VERSION "8 Nov 2010"
3305 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3306 ((ep)->udc->ep0_dir) : ((ep)->direction))
3307 #define RESET_TIMEOUT 10000
3308 #define FLUSH_TIMEOUT 10000
3309 #define EPSTATUS_TIMEOUT 10000
3310 #define PRIME_TIMEOUT 10000
3311 #define READSAFE_TIMEOUT 1000
3312 #define LOOPS_USEC_SHIFT 1
3313 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3314 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3315 /* LDV_COMMENT_END_PREP */
3316 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "mv_ep_ops" */
3317 ldv_handler_precall();
3318 mv_ep_set_halt( var_group1, var_mv_ep_set_halt_17_p1);
3319 /* LDV_COMMENT_BEGIN_PREP */
3320 #ifdef CONFIG_PM
3321 #endif
3322 #ifdef CONFIG_PM
3323 #endif
3324 /* LDV_COMMENT_END_PREP */
3325
3326
3327
3328
3329 }
3330
3331 break;
3332 case 8: {
3333
3334 /** STRUCT: struct type: usb_ep_ops, struct name: mv_ep_ops **/
3335
3336
3337 /* content: static void mv_ep_fifo_flush(struct usb_ep *_ep)*/
3338 /* LDV_COMMENT_BEGIN_PREP */
3339 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3340 #define DRIVER_VERSION "8 Nov 2010"
3341 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3342 ((ep)->udc->ep0_dir) : ((ep)->direction))
3343 #define RESET_TIMEOUT 10000
3344 #define FLUSH_TIMEOUT 10000
3345 #define EPSTATUS_TIMEOUT 10000
3346 #define PRIME_TIMEOUT 10000
3347 #define READSAFE_TIMEOUT 1000
3348 #define LOOPS_USEC_SHIFT 1
3349 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3350 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3351 /* LDV_COMMENT_END_PREP */
3352 /* LDV_COMMENT_FUNCTION_CALL Function from field "fifo_flush" from driver structure with callbacks "mv_ep_ops" */
3353 ldv_handler_precall();
3354 mv_ep_fifo_flush( var_group1);
3355 /* LDV_COMMENT_BEGIN_PREP */
3356 #ifdef CONFIG_PM
3357 #endif
3358 #ifdef CONFIG_PM
3359 #endif
3360 /* LDV_COMMENT_END_PREP */
3361
3362
3363
3364
3365 }
3366
3367 break;
3368 case 9: {
3369
3370 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3371
3372
3373 /* content: static int mv_udc_get_frame(struct usb_gadget *gadget)*/
3374 /* LDV_COMMENT_BEGIN_PREP */
3375 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3376 #define DRIVER_VERSION "8 Nov 2010"
3377 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3378 ((ep)->udc->ep0_dir) : ((ep)->direction))
3379 #define RESET_TIMEOUT 10000
3380 #define FLUSH_TIMEOUT 10000
3381 #define EPSTATUS_TIMEOUT 10000
3382 #define PRIME_TIMEOUT 10000
3383 #define READSAFE_TIMEOUT 1000
3384 #define LOOPS_USEC_SHIFT 1
3385 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3386 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3387 /* LDV_COMMENT_END_PREP */
3388 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame" from driver structure with callbacks "mv_ops" */
3389 ldv_handler_precall();
3390 mv_udc_get_frame( var_group3);
3391 /* LDV_COMMENT_BEGIN_PREP */
3392 #ifdef CONFIG_PM
3393 #endif
3394 #ifdef CONFIG_PM
3395 #endif
3396 /* LDV_COMMENT_END_PREP */
3397
3398
3399
3400
3401 }
3402
3403 break;
3404 case 10: {
3405
3406 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3407
3408
3409 /* content: static int mv_udc_wakeup(struct usb_gadget *gadget)*/
3410 /* LDV_COMMENT_BEGIN_PREP */
3411 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3412 #define DRIVER_VERSION "8 Nov 2010"
3413 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3414 ((ep)->udc->ep0_dir) : ((ep)->direction))
3415 #define RESET_TIMEOUT 10000
3416 #define FLUSH_TIMEOUT 10000
3417 #define EPSTATUS_TIMEOUT 10000
3418 #define PRIME_TIMEOUT 10000
3419 #define READSAFE_TIMEOUT 1000
3420 #define LOOPS_USEC_SHIFT 1
3421 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3422 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3423 /* LDV_COMMENT_END_PREP */
3424 /* LDV_COMMENT_FUNCTION_CALL Function from field "wakeup" from driver structure with callbacks "mv_ops" */
3425 ldv_handler_precall();
3426 mv_udc_wakeup( var_group3);
3427 /* LDV_COMMENT_BEGIN_PREP */
3428 #ifdef CONFIG_PM
3429 #endif
3430 #ifdef CONFIG_PM
3431 #endif
3432 /* LDV_COMMENT_END_PREP */
3433
3434
3435
3436
3437 }
3438
3439 break;
3440 case 11: {
3441
3442 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3443
3444
3445 /* content: static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)*/
3446 /* LDV_COMMENT_BEGIN_PREP */
3447 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3448 #define DRIVER_VERSION "8 Nov 2010"
3449 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3450 ((ep)->udc->ep0_dir) : ((ep)->direction))
3451 #define RESET_TIMEOUT 10000
3452 #define FLUSH_TIMEOUT 10000
3453 #define EPSTATUS_TIMEOUT 10000
3454 #define PRIME_TIMEOUT 10000
3455 #define READSAFE_TIMEOUT 1000
3456 #define LOOPS_USEC_SHIFT 1
3457 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3458 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3459 /* LDV_COMMENT_END_PREP */
3460 /* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_session" from driver structure with callbacks "mv_ops" */
3461 ldv_handler_precall();
3462 mv_udc_vbus_session( var_group3, var_mv_udc_vbus_session_30_p1);
3463 /* LDV_COMMENT_BEGIN_PREP */
3464 #ifdef CONFIG_PM
3465 #endif
3466 #ifdef CONFIG_PM
3467 #endif
3468 /* LDV_COMMENT_END_PREP */
3469
3470
3471
3472
3473 }
3474
3475 break;
3476 case 12: {
3477
3478 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3479
3480
3481 /* content: static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)*/
3482 /* LDV_COMMENT_BEGIN_PREP */
3483 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3484 #define DRIVER_VERSION "8 Nov 2010"
3485 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3486 ((ep)->udc->ep0_dir) : ((ep)->direction))
3487 #define RESET_TIMEOUT 10000
3488 #define FLUSH_TIMEOUT 10000
3489 #define EPSTATUS_TIMEOUT 10000
3490 #define PRIME_TIMEOUT 10000
3491 #define READSAFE_TIMEOUT 1000
3492 #define LOOPS_USEC_SHIFT 1
3493 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3494 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3495 /* LDV_COMMENT_END_PREP */
3496 /* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "mv_ops" */
3497 ldv_handler_precall();
3498 mv_udc_pullup( var_group3, var_mv_udc_pullup_31_p1);
3499 /* LDV_COMMENT_BEGIN_PREP */
3500 #ifdef CONFIG_PM
3501 #endif
3502 #ifdef CONFIG_PM
3503 #endif
3504 /* LDV_COMMENT_END_PREP */
3505
3506
3507
3508
3509 }
3510
3511 break;
3512 case 13: {
3513
3514 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3515
3516
3517 /* content: static int mv_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver)*/
3518 /* LDV_COMMENT_BEGIN_PREP */
3519 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3520 #define DRIVER_VERSION "8 Nov 2010"
3521 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3522 ((ep)->udc->ep0_dir) : ((ep)->direction))
3523 #define RESET_TIMEOUT 10000
3524 #define FLUSH_TIMEOUT 10000
3525 #define EPSTATUS_TIMEOUT 10000
3526 #define PRIME_TIMEOUT 10000
3527 #define READSAFE_TIMEOUT 1000
3528 #define LOOPS_USEC_SHIFT 1
3529 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3530 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3531 /* LDV_COMMENT_END_PREP */
3532 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "mv_ops" */
3533 ldv_handler_precall();
3534 mv_udc_start( var_group3, var_group4);
3535 /* LDV_COMMENT_BEGIN_PREP */
3536 #ifdef CONFIG_PM
3537 #endif
3538 #ifdef CONFIG_PM
3539 #endif
3540 /* LDV_COMMENT_END_PREP */
3541
3542
3543
3544
3545 }
3546
3547 break;
3548 case 14: {
3549
3550 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_ops **/
3551
3552
3553 /* content: static int mv_udc_stop(struct usb_gadget *gadget)*/
3554 /* LDV_COMMENT_BEGIN_PREP */
3555 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3556 #define DRIVER_VERSION "8 Nov 2010"
3557 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3558 ((ep)->udc->ep0_dir) : ((ep)->direction))
3559 #define RESET_TIMEOUT 10000
3560 #define FLUSH_TIMEOUT 10000
3561 #define EPSTATUS_TIMEOUT 10000
3562 #define PRIME_TIMEOUT 10000
3563 #define READSAFE_TIMEOUT 1000
3564 #define LOOPS_USEC_SHIFT 1
3565 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3566 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3567 /* LDV_COMMENT_END_PREP */
3568 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "mv_ops" */
3569 ldv_handler_precall();
3570 mv_udc_stop( var_group3);
3571 /* LDV_COMMENT_BEGIN_PREP */
3572 #ifdef CONFIG_PM
3573 #endif
3574 #ifdef CONFIG_PM
3575 #endif
3576 /* LDV_COMMENT_END_PREP */
3577
3578
3579
3580
3581 }
3582
3583 break;
3584 case 15: {
3585
3586 /** STRUCT: struct type: dev_pm_ops, struct name: mv_udc_pm_ops **/
3587
3588
3589 /* content: static int mv_udc_suspend(struct device *dev)*/
3590 /* LDV_COMMENT_BEGIN_PREP */
3591 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3592 #define DRIVER_VERSION "8 Nov 2010"
3593 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3594 ((ep)->udc->ep0_dir) : ((ep)->direction))
3595 #define RESET_TIMEOUT 10000
3596 #define FLUSH_TIMEOUT 10000
3597 #define EPSTATUS_TIMEOUT 10000
3598 #define PRIME_TIMEOUT 10000
3599 #define READSAFE_TIMEOUT 1000
3600 #define LOOPS_USEC_SHIFT 1
3601 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3602 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3603 #ifdef CONFIG_PM
3604 /* LDV_COMMENT_END_PREP */
3605 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "mv_udc_pm_ops" */
3606 ldv_handler_precall();
3607 mv_udc_suspend( var_group5);
3608 /* LDV_COMMENT_BEGIN_PREP */
3609 #endif
3610 #ifdef CONFIG_PM
3611 #endif
3612 /* LDV_COMMENT_END_PREP */
3613
3614
3615
3616
3617 }
3618
3619 break;
3620 case 16: {
3621
3622 /** STRUCT: struct type: dev_pm_ops, struct name: mv_udc_pm_ops **/
3623
3624
3625 /* content: static int mv_udc_resume(struct device *dev)*/
3626 /* LDV_COMMENT_BEGIN_PREP */
3627 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3628 #define DRIVER_VERSION "8 Nov 2010"
3629 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3630 ((ep)->udc->ep0_dir) : ((ep)->direction))
3631 #define RESET_TIMEOUT 10000
3632 #define FLUSH_TIMEOUT 10000
3633 #define EPSTATUS_TIMEOUT 10000
3634 #define PRIME_TIMEOUT 10000
3635 #define READSAFE_TIMEOUT 1000
3636 #define LOOPS_USEC_SHIFT 1
3637 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3638 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3639 #ifdef CONFIG_PM
3640 /* LDV_COMMENT_END_PREP */
3641 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "mv_udc_pm_ops" */
3642 ldv_handler_precall();
3643 mv_udc_resume( var_group5);
3644 /* LDV_COMMENT_BEGIN_PREP */
3645 #endif
3646 #ifdef CONFIG_PM
3647 #endif
3648 /* LDV_COMMENT_END_PREP */
3649
3650
3651
3652
3653 }
3654
3655 break;
3656 case 17: {
3657
3658 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
3659 if(ldv_s_udc_driver_platform_driver==0) {
3660
3661 /* content: static int mv_udc_probe(struct platform_device *pdev)*/
3662 /* LDV_COMMENT_BEGIN_PREP */
3663 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3664 #define DRIVER_VERSION "8 Nov 2010"
3665 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3666 ((ep)->udc->ep0_dir) : ((ep)->direction))
3667 #define RESET_TIMEOUT 10000
3668 #define FLUSH_TIMEOUT 10000
3669 #define EPSTATUS_TIMEOUT 10000
3670 #define PRIME_TIMEOUT 10000
3671 #define READSAFE_TIMEOUT 1000
3672 #define LOOPS_USEC_SHIFT 1
3673 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3674 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3675 /* LDV_COMMENT_END_PREP */
3676 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "udc_driver". Standart function test for correct return result. */
3677 res_mv_udc_probe_59 = mv_udc_probe( var_group6);
3678 ldv_check_return_value(res_mv_udc_probe_59);
3679 ldv_check_return_value_probe(res_mv_udc_probe_59);
3680 if(res_mv_udc_probe_59)
3681 goto ldv_module_exit;
3682 /* LDV_COMMENT_BEGIN_PREP */
3683 #ifdef CONFIG_PM
3684 #endif
3685 #ifdef CONFIG_PM
3686 #endif
3687 /* LDV_COMMENT_END_PREP */
3688 ldv_s_udc_driver_platform_driver++;
3689
3690 }
3691
3692 }
3693
3694 break;
3695 case 18: {
3696
3697 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
3698 if(ldv_s_udc_driver_platform_driver==1) {
3699
3700 /* content: static int mv_udc_remove(struct platform_device *pdev)*/
3701 /* LDV_COMMENT_BEGIN_PREP */
3702 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3703 #define DRIVER_VERSION "8 Nov 2010"
3704 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3705 ((ep)->udc->ep0_dir) : ((ep)->direction))
3706 #define RESET_TIMEOUT 10000
3707 #define FLUSH_TIMEOUT 10000
3708 #define EPSTATUS_TIMEOUT 10000
3709 #define PRIME_TIMEOUT 10000
3710 #define READSAFE_TIMEOUT 1000
3711 #define LOOPS_USEC_SHIFT 1
3712 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3713 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3714 /* LDV_COMMENT_END_PREP */
3715 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "udc_driver" */
3716 ldv_handler_precall();
3717 mv_udc_remove( var_group6);
3718 /* LDV_COMMENT_BEGIN_PREP */
3719 #ifdef CONFIG_PM
3720 #endif
3721 #ifdef CONFIG_PM
3722 #endif
3723 /* LDV_COMMENT_END_PREP */
3724 ldv_s_udc_driver_platform_driver++;
3725
3726 }
3727
3728 }
3729
3730 break;
3731 case 19: {
3732
3733 /** STRUCT: struct type: platform_driver, struct name: udc_driver **/
3734 if(ldv_s_udc_driver_platform_driver==2) {
3735
3736 /* content: static void mv_udc_shutdown(struct platform_device *pdev)*/
3737 /* LDV_COMMENT_BEGIN_PREP */
3738 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3739 #define DRIVER_VERSION "8 Nov 2010"
3740 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3741 ((ep)->udc->ep0_dir) : ((ep)->direction))
3742 #define RESET_TIMEOUT 10000
3743 #define FLUSH_TIMEOUT 10000
3744 #define EPSTATUS_TIMEOUT 10000
3745 #define PRIME_TIMEOUT 10000
3746 #define READSAFE_TIMEOUT 1000
3747 #define LOOPS_USEC_SHIFT 1
3748 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3749 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3750 #ifdef CONFIG_PM
3751 #endif
3752 /* LDV_COMMENT_END_PREP */
3753 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "udc_driver" */
3754 ldv_handler_precall();
3755 mv_udc_shutdown( var_group6);
3756 /* LDV_COMMENT_BEGIN_PREP */
3757 #ifdef CONFIG_PM
3758 #endif
3759 /* LDV_COMMENT_END_PREP */
3760 ldv_s_udc_driver_platform_driver=0;
3761
3762 }
3763
3764 }
3765
3766 break;
3767 case 20: {
3768
3769 /** CALLBACK SECTION request_irq **/
3770 LDV_IN_INTERRUPT=2;
3771
3772 /* content: static irqreturn_t mv_udc_irq(int irq, void *dev)*/
3773 /* LDV_COMMENT_BEGIN_PREP */
3774 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3775 #define DRIVER_VERSION "8 Nov 2010"
3776 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3777 ((ep)->udc->ep0_dir) : ((ep)->direction))
3778 #define RESET_TIMEOUT 10000
3779 #define FLUSH_TIMEOUT 10000
3780 #define EPSTATUS_TIMEOUT 10000
3781 #define PRIME_TIMEOUT 10000
3782 #define READSAFE_TIMEOUT 1000
3783 #define LOOPS_USEC_SHIFT 1
3784 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3785 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3786 /* LDV_COMMENT_END_PREP */
3787 /* LDV_COMMENT_FUNCTION_CALL */
3788 ldv_handler_precall();
3789 mv_udc_irq( var_mv_udc_irq_54_p0, var_mv_udc_irq_54_p1);
3790 /* LDV_COMMENT_BEGIN_PREP */
3791 #ifdef CONFIG_PM
3792 #endif
3793 #ifdef CONFIG_PM
3794 #endif
3795 /* LDV_COMMENT_END_PREP */
3796 LDV_IN_INTERRUPT=1;
3797
3798
3799
3800 }
3801
3802 break;
3803 case 21: {
3804
3805 /** CALLBACK SECTION request_irq **/
3806 LDV_IN_INTERRUPT=2;
3807
3808 /* content: static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)*/
3809 /* LDV_COMMENT_BEGIN_PREP */
3810 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
3811 #define DRIVER_VERSION "8 Nov 2010"
3812 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
3813 ((ep)->udc->ep0_dir) : ((ep)->direction))
3814 #define RESET_TIMEOUT 10000
3815 #define FLUSH_TIMEOUT 10000
3816 #define EPSTATUS_TIMEOUT 10000
3817 #define PRIME_TIMEOUT 10000
3818 #define READSAFE_TIMEOUT 1000
3819 #define LOOPS_USEC_SHIFT 1
3820 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
3821 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
3822 /* LDV_COMMENT_END_PREP */
3823 /* LDV_COMMENT_FUNCTION_CALL */
3824 ldv_handler_precall();
3825 mv_udc_vbus_irq( var_mv_udc_vbus_irq_55_p0, var_mv_udc_vbus_irq_55_p1);
3826 /* LDV_COMMENT_BEGIN_PREP */
3827 #ifdef CONFIG_PM
3828 #endif
3829 #ifdef CONFIG_PM
3830 #endif
3831 /* LDV_COMMENT_END_PREP */
3832 LDV_IN_INTERRUPT=1;
3833
3834
3835
3836 }
3837
3838 break;
3839 default: break;
3840
3841 }
3842
3843 }
3844
3845 ldv_module_exit:
3846
3847 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3848 ldv_final: ldv_check_final_state();
3849
3850 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3851 return;
3852
3853 }
3854 #endif
3855
3856 /* LDV_COMMENT_END_MAIN */
3857
3858 #line 32 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a/linux-4.1-rc1.tar.xz/csd_deg_dscv/2900/dscv_tempdir/dscv/ri/152_1a/drivers/usb/gadget/udc/mv_udc_core.o.c.prepared" 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 /*
2 * linux/include/linux/clk.h
3 *
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
6 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #ifndef __LINUX_CLK_H
13 #define __LINUX_CLK_H
14
15 #include <linux/err.h>
16 #include <linux/kernel.h>
17 #include <linux/notifier.h>
18
19 struct device;
20
21 struct clk;
22
23 #ifdef CONFIG_COMMON_CLK
24
25 /**
26 * DOC: clk notifier callback types
27 *
28 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
29 * to indicate that the rate change will proceed. Drivers must
30 * immediately terminate any operations that will be affected by the
31 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
32 * NOTIFY_STOP or NOTIFY_BAD.
33 *
34 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
35 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
36 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
37 * always return NOTIFY_DONE or NOTIFY_OK.
38 *
39 * POST_RATE_CHANGE - called after the clk rate change has successfully
40 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
41 *
42 */
43 #define PRE_RATE_CHANGE BIT(0)
44 #define POST_RATE_CHANGE BIT(1)
45 #define ABORT_RATE_CHANGE BIT(2)
46
47 /**
48 * struct clk_notifier - associate a clk with a notifier
49 * @clk: struct clk * to associate the notifier with
50 * @notifier_head: a blocking_notifier_head for this clk
51 * @node: linked list pointers
52 *
53 * A list of struct clk_notifier is maintained by the notifier code.
54 * An entry is created whenever code registers the first notifier on a
55 * particular @clk. Future notifiers on that @clk are added to the
56 * @notifier_head.
57 */
58 struct clk_notifier {
59 struct clk *clk;
60 struct srcu_notifier_head notifier_head;
61 struct list_head node;
62 };
63
64 /**
65 * struct clk_notifier_data - rate data to pass to the notifier callback
66 * @clk: struct clk * being changed
67 * @old_rate: previous rate of this clk
68 * @new_rate: new rate of this clk
69 *
70 * For a pre-notifier, old_rate is the clk's rate before this rate
71 * change, and new_rate is what the rate will be in the future. For a
72 * post-notifier, old_rate and new_rate are both set to the clk's
73 * current rate (this was done to optimize the implementation).
74 */
75 struct clk_notifier_data {
76 struct clk *clk;
77 unsigned long old_rate;
78 unsigned long new_rate;
79 };
80
81 /**
82 * clk_notifier_register: register a clock rate-change notifier callback
83 * @clk: clock whose rate we are interested in
84 * @nb: notifier block with callback function pointer
85 *
86 * ProTip: debugging across notifier chains can be frustrating. Make sure that
87 * your notifier callback function prints a nice big warning in case of
88 * failure.
89 */
90 int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
91
92 /**
93 * clk_notifier_unregister: unregister a clock rate-change notifier callback
94 * @clk: clock whose rate we are no longer interested in
95 * @nb: notifier block which will be unregistered
96 */
97 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
98
99 /**
100 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
101 * for a clock source.
102 * @clk: clock source
103 *
104 * This gets the clock source accuracy expressed in ppb.
105 * A perfect clock returns 0.
106 */
107 long clk_get_accuracy(struct clk *clk);
108
109 /**
110 * clk_set_phase - adjust the phase shift of a clock signal
111 * @clk: clock signal source
112 * @degrees: number of degrees the signal is shifted
113 *
114 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
115 * success, -EERROR otherwise.
116 */
117 int clk_set_phase(struct clk *clk, int degrees);
118
119 /**
120 * clk_get_phase - return the phase shift of a clock signal
121 * @clk: clock signal source
122 *
123 * Returns the phase shift of a clock node in degrees, otherwise returns
124 * -EERROR.
125 */
126 int clk_get_phase(struct clk *clk);
127
128 /**
129 * clk_is_match - check if two clk's point to the same hardware clock
130 * @p: clk compared against q
131 * @q: clk compared against p
132 *
133 * Returns true if the two struct clk pointers both point to the same hardware
134 * clock node. Put differently, returns true if struct clk *p and struct clk *q
135 * share the same struct clk_core object.
136 *
137 * Returns false otherwise. Note that two NULL clks are treated as matching.
138 */
139 bool clk_is_match(const struct clk *p, const struct clk *q);
140
141 #else
142
143 static inline long clk_get_accuracy(struct clk *clk)
144 {
145 return -ENOTSUPP;
146 }
147
148 static inline long clk_set_phase(struct clk *clk, int phase)
149 {
150 return -ENOTSUPP;
151 }
152
153 static inline long clk_get_phase(struct clk *clk)
154 {
155 return -ENOTSUPP;
156 }
157
158 static inline bool clk_is_match(const struct clk *p, const struct clk *q)
159 {
160 return p == q;
161 }
162
163 #endif
164
165 /**
166 * clk_prepare - prepare a clock source
167 * @clk: clock source
168 *
169 * This prepares the clock source for use.
170 *
171 * Must not be called from within atomic context.
172 */
173 #ifdef CONFIG_HAVE_CLK_PREPARE
174 int clk_prepare(struct clk *clk);
175 #else
176 static inline int clk_prepare(struct clk *clk)
177 {
178 might_sleep();
179 return 0;
180 }
181 #endif
182
183 /**
184 * clk_unprepare - undo preparation of a clock source
185 * @clk: clock source
186 *
187 * This undoes a previously prepared clock. The caller must balance
188 * the number of prepare and unprepare calls.
189 *
190 * Must not be called from within atomic context.
191 */
192 #ifdef CONFIG_HAVE_CLK_PREPARE
193 void clk_unprepare(struct clk *clk);
194 #else
195 static inline void clk_unprepare(struct clk *clk)
196 {
197 might_sleep();
198 }
199 #endif
200
201 #ifdef CONFIG_HAVE_CLK
202 /**
203 * clk_get - lookup and obtain a reference to a clock producer.
204 * @dev: device for clock "consumer"
205 * @id: clock consumer ID
206 *
207 * Returns a struct clk corresponding to the clock producer, or
208 * valid IS_ERR() condition containing errno. The implementation
209 * uses @dev and @id to determine the clock consumer, and thereby
210 * the clock producer. (IOW, @id may be identical strings, but
211 * clk_get may return different clock producers depending on @dev.)
212 *
213 * Drivers must assume that the clock source is not enabled.
214 *
215 * clk_get should not be called from within interrupt context.
216 */
217 struct clk *clk_get(struct device *dev, const char *id);
218
219 /**
220 * devm_clk_get - lookup and obtain a managed reference to a clock producer.
221 * @dev: device for clock "consumer"
222 * @id: clock consumer ID
223 *
224 * Returns a struct clk corresponding to the clock producer, or
225 * valid IS_ERR() condition containing errno. The implementation
226 * uses @dev and @id to determine the clock consumer, and thereby
227 * the clock producer. (IOW, @id may be identical strings, but
228 * clk_get may return different clock producers depending on @dev.)
229 *
230 * Drivers must assume that the clock source is not enabled.
231 *
232 * devm_clk_get should not be called from within interrupt context.
233 *
234 * The clock will automatically be freed when the device is unbound
235 * from the bus.
236 */
237 struct clk *devm_clk_get(struct device *dev, const char *id);
238
239 /**
240 * clk_enable - inform the system when the clock source should be running.
241 * @clk: clock source
242 *
243 * If the clock can not be enabled/disabled, this should return success.
244 *
245 * May be called from atomic contexts.
246 *
247 * Returns success (0) or negative errno.
248 */
249 int clk_enable(struct clk *clk);
250
251 /**
252 * clk_disable - inform the system when the clock source is no longer required.
253 * @clk: clock source
254 *
255 * Inform the system that a clock source is no longer required by
256 * a driver and may be shut down.
257 *
258 * May be called from atomic contexts.
259 *
260 * Implementation detail: if the clock source is shared between
261 * multiple drivers, clk_enable() calls must be balanced by the
262 * same number of clk_disable() calls for the clock source to be
263 * disabled.
264 */
265 void clk_disable(struct clk *clk);
266
267 /**
268 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
269 * This is only valid once the clock source has been enabled.
270 * @clk: clock source
271 */
272 unsigned long clk_get_rate(struct clk *clk);
273
274 /**
275 * clk_put - "free" the clock source
276 * @clk: clock source
277 *
278 * Note: drivers must ensure that all clk_enable calls made on this
279 * clock source are balanced by clk_disable calls prior to calling
280 * this function.
281 *
282 * clk_put should not be called from within interrupt context.
283 */
284 void clk_put(struct clk *clk);
285
286 /**
287 * devm_clk_put - "free" a managed clock source
288 * @dev: device used to acquire the clock
289 * @clk: clock source acquired with devm_clk_get()
290 *
291 * Note: drivers must ensure that all clk_enable calls made on this
292 * clock source are balanced by clk_disable calls prior to calling
293 * this function.
294 *
295 * clk_put should not be called from within interrupt context.
296 */
297 void devm_clk_put(struct device *dev, struct clk *clk);
298
299 /*
300 * The remaining APIs are optional for machine class support.
301 */
302
303
304 /**
305 * clk_round_rate - adjust a rate to the exact rate a clock can provide
306 * @clk: clock source
307 * @rate: desired clock rate in Hz
308 *
309 * Returns rounded clock rate in Hz, or negative errno.
310 */
311 long clk_round_rate(struct clk *clk, unsigned long rate);
312
313 /**
314 * clk_set_rate - set the clock rate for a clock source
315 * @clk: clock source
316 * @rate: desired clock rate in Hz
317 *
318 * Returns success (0) or negative errno.
319 */
320 int clk_set_rate(struct clk *clk, unsigned long rate);
321
322 /**
323 * clk_has_parent - check if a clock is a possible parent for another
324 * @clk: clock source
325 * @parent: parent clock source
326 *
327 * This function can be used in drivers that need to check that a clock can be
328 * the parent of another without actually changing the parent.
329 *
330 * Returns true if @parent is a possible parent for @clk, false otherwise.
331 */
332 bool clk_has_parent(struct clk *clk, struct clk *parent);
333
334 /**
335 * clk_set_rate_range - set a rate range for a clock source
336 * @clk: clock source
337 * @min: desired minimum clock rate in Hz, inclusive
338 * @max: desired maximum clock rate in Hz, inclusive
339 *
340 * Returns success (0) or negative errno.
341 */
342 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
343
344 /**
345 * clk_set_min_rate - set a minimum clock rate for a clock source
346 * @clk: clock source
347 * @rate: desired minimum clock rate in Hz, inclusive
348 *
349 * Returns success (0) or negative errno.
350 */
351 int clk_set_min_rate(struct clk *clk, unsigned long rate);
352
353 /**
354 * clk_set_max_rate - set a maximum clock rate for a clock source
355 * @clk: clock source
356 * @rate: desired maximum clock rate in Hz, inclusive
357 *
358 * Returns success (0) or negative errno.
359 */
360 int clk_set_max_rate(struct clk *clk, unsigned long rate);
361
362 /**
363 * clk_set_parent - set the parent clock source for this clock
364 * @clk: clock source
365 * @parent: parent clock source
366 *
367 * Returns success (0) or negative errno.
368 */
369 int clk_set_parent(struct clk *clk, struct clk *parent);
370
371 /**
372 * clk_get_parent - get the parent clock source for this clock
373 * @clk: clock source
374 *
375 * Returns struct clk corresponding to parent clock source, or
376 * valid IS_ERR() condition containing errno.
377 */
378 struct clk *clk_get_parent(struct clk *clk);
379
380 /**
381 * clk_get_sys - get a clock based upon the device name
382 * @dev_id: device name
383 * @con_id: connection ID
384 *
385 * Returns a struct clk corresponding to the clock producer, or
386 * valid IS_ERR() condition containing errno. The implementation
387 * uses @dev_id and @con_id to determine the clock consumer, and
388 * thereby the clock producer. In contrast to clk_get() this function
389 * takes the device name instead of the device itself for identification.
390 *
391 * Drivers must assume that the clock source is not enabled.
392 *
393 * clk_get_sys should not be called from within interrupt context.
394 */
395 struct clk *clk_get_sys(const char *dev_id, const char *con_id);
396
397 #else /* !CONFIG_HAVE_CLK */
398
399 static inline struct clk *clk_get(struct device *dev, const char *id)
400 {
401 return NULL;
402 }
403
404 static inline struct clk *devm_clk_get(struct device *dev, const char *id)
405 {
406 return NULL;
407 }
408
409 static inline void clk_put(struct clk *clk) {}
410
411 static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
412
413 static inline int clk_enable(struct clk *clk)
414 {
415 return 0;
416 }
417
418 static inline void clk_disable(struct clk *clk) {}
419
420 static inline unsigned long clk_get_rate(struct clk *clk)
421 {
422 return 0;
423 }
424
425 static inline int clk_set_rate(struct clk *clk, unsigned long rate)
426 {
427 return 0;
428 }
429
430 static inline long clk_round_rate(struct clk *clk, unsigned long rate)
431 {
432 return 0;
433 }
434
435 static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
436 {
437 return true;
438 }
439
440 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
441 {
442 return 0;
443 }
444
445 static inline struct clk *clk_get_parent(struct clk *clk)
446 {
447 return NULL;
448 }
449
450 #endif
451
452 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
453 static inline int clk_prepare_enable(struct clk *clk)
454 {
455 int ret;
456
457 ret = clk_prepare(clk);
458 if (ret)
459 return ret;
460 ret = clk_enable(clk);
461 if (ret)
462 clk_unprepare(clk);
463
464 return ret;
465 }
466
467 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
468 static inline void clk_disable_unprepare(struct clk *clk)
469 {
470 clk_disable(clk);
471 clk_unprepare(clk);
472 }
473
474 /**
475 * clk_add_alias - add a new clock alias
476 * @alias: name for clock alias
477 * @alias_dev_name: device name
478 * @id: platform specific clock name
479 * @dev: device
480 *
481 * Allows using generic clock names for drivers by adding a new alias.
482 * Assumes clkdev, see clkdev.h for more info.
483 */
484 int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
485 struct device *dev);
486
487 struct device_node;
488 struct of_phandle_args;
489
490 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
491 struct clk *of_clk_get(struct device_node *np, int index);
492 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
493 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
494 #else
495 static inline struct clk *of_clk_get(struct device_node *np, int index)
496 {
497 return ERR_PTR(-ENOENT);
498 }
499 static inline struct clk *of_clk_get_by_name(struct device_node *np,
500 const char *name)
501 {
502 return ERR_PTR(-ENOENT);
503 }
504 #endif
505
506 #endif 1 /*
2 * device.h - generic, centralized driver model
3 *
4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
6 * Copyright (c) 2008-2009 Novell Inc.
7 *
8 * This file is released under the GPLv2
9 *
10 * See Documentation/driver-model/ for more information.
11 */
12
13 #ifndef _DEVICE_H_
14 #define _DEVICE_H_
15
16 #include <linux/ioport.h>
17 #include <linux/kobject.h>
18 #include <linux/klist.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/compiler.h>
22 #include <linux/types.h>
23 #include <linux/mutex.h>
24 #include <linux/pinctrl/devinfo.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/ratelimit.h>
28 #include <linux/uidgid.h>
29 #include <linux/gfp.h>
30 #include <asm/device.h>
31
32 struct device;
33 struct device_private;
34 struct device_driver;
35 struct driver_private;
36 struct module;
37 struct class;
38 struct subsys_private;
39 struct bus_type;
40 struct device_node;
41 struct fwnode_handle;
42 struct iommu_ops;
43 struct iommu_group;
44
45 struct bus_attribute {
46 struct attribute attr;
47 ssize_t (*show)(struct bus_type *bus, char *buf);
48 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
49 };
50
51 #define BUS_ATTR(_name, _mode, _show, _store) \
52 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
53 #define BUS_ATTR_RW(_name) \
54 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
55 #define BUS_ATTR_RO(_name) \
56 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
57
58 extern int __must_check bus_create_file(struct bus_type *,
59 struct bus_attribute *);
60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
61
62 /**
63 * struct bus_type - The bus type of the device
64 *
65 * @name: The name of the bus.
66 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
67 * @dev_root: Default device to use as the parent.
68 * @dev_attrs: Default attributes of the devices on the bus.
69 * @bus_groups: Default attributes of the bus.
70 * @dev_groups: Default attributes of the devices on the bus.
71 * @drv_groups: Default attributes of the device drivers on the bus.
72 * @match: Called, perhaps multiple times, whenever a new device or driver
73 * is added for this bus. It should return a nonzero value if the
74 * given device can be handled by the given driver.
75 * @uevent: Called when a device is added, removed, or a few other things
76 * that generate uevents to add the environment variables.
77 * @probe: Called when a new device or driver add to this bus, and callback
78 * the specific driver's probe to initial the matched device.
79 * @remove: Called when a device removed from this bus.
80 * @shutdown: Called at shut-down time to quiesce the device.
81 *
82 * @online: Called to put the device back online (after offlining it).
83 * @offline: Called to put the device offline for hot-removal. May fail.
84 *
85 * @suspend: Called when a device on this bus wants to go to sleep mode.
86 * @resume: Called to bring a device on this bus out of sleep mode.
87 * @pm: Power management operations of this bus, callback the specific
88 * device driver's pm-ops.
89 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
90 * driver implementations to a bus and allow the driver to do
91 * bus-specific setup
92 * @p: The private data of the driver core, only the driver core can
93 * touch this.
94 * @lock_key: Lock class key for use by the lock validator
95 *
96 * A bus is a channel between the processor and one or more devices. For the
97 * purposes of the device model, all devices are connected via a bus, even if
98 * it is an internal, virtual, "platform" bus. Buses can plug into each other.
99 * A USB controller is usually a PCI device, for example. The device model
100 * represents the actual connections between buses and the devices they control.
101 * A bus is represented by the bus_type structure. It contains the name, the
102 * default attributes, the bus' methods, PM operations, and the driver core's
103 * private data.
104 */
105 struct bus_type {
106 const char *name;
107 const char *dev_name;
108 struct device *dev_root;
109 struct device_attribute *dev_attrs; /* use dev_groups instead */
110 const struct attribute_group **bus_groups;
111 const struct attribute_group **dev_groups;
112 const struct attribute_group **drv_groups;
113
114 int (*match)(struct device *dev, struct device_driver *drv);
115 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
116 int (*probe)(struct device *dev);
117 int (*remove)(struct device *dev);
118 void (*shutdown)(struct device *dev);
119
120 int (*online)(struct device *dev);
121 int (*offline)(struct device *dev);
122
123 int (*suspend)(struct device *dev, pm_message_t state);
124 int (*resume)(struct device *dev);
125
126 const struct dev_pm_ops *pm;
127
128 const struct iommu_ops *iommu_ops;
129
130 struct subsys_private *p;
131 struct lock_class_key lock_key;
132 };
133
134 extern int __must_check bus_register(struct bus_type *bus);
135
136 extern void bus_unregister(struct bus_type *bus);
137
138 extern int __must_check bus_rescan_devices(struct bus_type *bus);
139
140 /* iterator helpers for buses */
141 struct subsys_dev_iter {
142 struct klist_iter ki;
143 const struct device_type *type;
144 };
145 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
146 struct bus_type *subsys,
147 struct device *start,
148 const struct device_type *type);
149 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
150 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
151
152 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
153 int (*fn)(struct device *dev, void *data));
154 struct device *bus_find_device(struct bus_type *bus, struct device *start,
155 void *data,
156 int (*match)(struct device *dev, void *data));
157 struct device *bus_find_device_by_name(struct bus_type *bus,
158 struct device *start,
159 const char *name);
160 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
161 struct device *hint);
162 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
163 void *data, int (*fn)(struct device_driver *, void *));
164 void bus_sort_breadthfirst(struct bus_type *bus,
165 int (*compare)(const struct device *a,
166 const struct device *b));
167 /*
168 * Bus notifiers: Get notified of addition/removal of devices
169 * and binding/unbinding of drivers to devices.
170 * In the long run, it should be a replacement for the platform
171 * notify hooks.
172 */
173 struct notifier_block;
174
175 extern int bus_register_notifier(struct bus_type *bus,
176 struct notifier_block *nb);
177 extern int bus_unregister_notifier(struct bus_type *bus,
178 struct notifier_block *nb);
179
180 /* All 4 notifers below get called with the target struct device *
181 * as an argument. Note that those functions are likely to be called
182 * with the device lock held in the core, so be careful.
183 */
184 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
185 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
186 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
187 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
188 bound */
189 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
190 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
191 unbound */
192 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
193 from the device */
194
195 extern struct kset *bus_get_kset(struct bus_type *bus);
196 extern struct klist *bus_get_device_klist(struct bus_type *bus);
197
198 /**
199 * struct device_driver - The basic device driver structure
200 * @name: Name of the device driver.
201 * @bus: The bus which the device of this driver belongs to.
202 * @owner: The module owner.
203 * @mod_name: Used for built-in modules.
204 * @suppress_bind_attrs: Disables bind/unbind via sysfs.
205 * @of_match_table: The open firmware table.
206 * @acpi_match_table: The ACPI match table.
207 * @probe: Called to query the existence of a specific device,
208 * whether this driver can work with it, and bind the driver
209 * to a specific device.
210 * @remove: Called when the device is removed from the system to
211 * unbind a device from this driver.
212 * @shutdown: Called at shut-down time to quiesce the device.
213 * @suspend: Called to put the device to sleep mode. Usually to a
214 * low power state.
215 * @resume: Called to bring a device from sleep mode.
216 * @groups: Default attributes that get created by the driver core
217 * automatically.
218 * @pm: Power management operations of the device which matched
219 * this driver.
220 * @p: Driver core's private data, no one other than the driver
221 * core can touch this.
222 *
223 * The device driver-model tracks all of the drivers known to the system.
224 * The main reason for this tracking is to enable the driver core to match
225 * up drivers with new devices. Once drivers are known objects within the
226 * system, however, a number of other things become possible. Device drivers
227 * can export information and configuration variables that are independent
228 * of any specific device.
229 */
230 struct device_driver {
231 const char *name;
232 struct bus_type *bus;
233
234 struct module *owner;
235 const char *mod_name; /* used for built-in modules */
236
237 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
238
239 const struct of_device_id *of_match_table;
240 const struct acpi_device_id *acpi_match_table;
241
242 int (*probe) (struct device *dev);
243 int (*remove) (struct device *dev);
244 void (*shutdown) (struct device *dev);
245 int (*suspend) (struct device *dev, pm_message_t state);
246 int (*resume) (struct device *dev);
247 const struct attribute_group **groups;
248
249 const struct dev_pm_ops *pm;
250
251 struct driver_private *p;
252 };
253
254
255 extern int __must_check driver_register(struct device_driver *drv);
256 extern void driver_unregister(struct device_driver *drv);
257
258 extern struct device_driver *driver_find(const char *name,
259 struct bus_type *bus);
260 extern int driver_probe_done(void);
261 extern void wait_for_device_probe(void);
262
263
264 /* sysfs interface for exporting driver attributes */
265
266 struct driver_attribute {
267 struct attribute attr;
268 ssize_t (*show)(struct device_driver *driver, char *buf);
269 ssize_t (*store)(struct device_driver *driver, const char *buf,
270 size_t count);
271 };
272
273 #define DRIVER_ATTR(_name, _mode, _show, _store) \
274 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
275 #define DRIVER_ATTR_RW(_name) \
276 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
277 #define DRIVER_ATTR_RO(_name) \
278 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
279 #define DRIVER_ATTR_WO(_name) \
280 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
281
282 extern int __must_check driver_create_file(struct device_driver *driver,
283 const struct driver_attribute *attr);
284 extern void driver_remove_file(struct device_driver *driver,
285 const struct driver_attribute *attr);
286
287 extern int __must_check driver_for_each_device(struct device_driver *drv,
288 struct device *start,
289 void *data,
290 int (*fn)(struct device *dev,
291 void *));
292 struct device *driver_find_device(struct device_driver *drv,
293 struct device *start, void *data,
294 int (*match)(struct device *dev, void *data));
295
296 /**
297 * struct subsys_interface - interfaces to device functions
298 * @name: name of the device function
299 * @subsys: subsytem of the devices to attach to
300 * @node: the list of functions registered at the subsystem
301 * @add_dev: device hookup to device function handler
302 * @remove_dev: device hookup to device function handler
303 *
304 * Simple interfaces attached to a subsystem. Multiple interfaces can
305 * attach to a subsystem and its devices. Unlike drivers, they do not
306 * exclusively claim or control devices. Interfaces usually represent
307 * a specific functionality of a subsystem/class of devices.
308 */
309 struct subsys_interface {
310 const char *name;
311 struct bus_type *subsys;
312 struct list_head node;
313 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
314 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
315 };
316
317 int subsys_interface_register(struct subsys_interface *sif);
318 void subsys_interface_unregister(struct subsys_interface *sif);
319
320 int subsys_system_register(struct bus_type *subsys,
321 const struct attribute_group **groups);
322 int subsys_virtual_register(struct bus_type *subsys,
323 const struct attribute_group **groups);
324
325 /**
326 * struct class - device classes
327 * @name: Name of the class.
328 * @owner: The module owner.
329 * @class_attrs: Default attributes of this class.
330 * @dev_groups: Default attributes of the devices that belong to the class.
331 * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
332 * @dev_uevent: Called when a device is added, removed from this class, or a
333 * few other things that generate uevents to add the environment
334 * variables.
335 * @devnode: Callback to provide the devtmpfs.
336 * @class_release: Called to release this class.
337 * @dev_release: Called to release the device.
338 * @suspend: Used to put the device to sleep mode, usually to a low power
339 * state.
340 * @resume: Used to bring the device from the sleep mode.
341 * @ns_type: Callbacks so sysfs can detemine namespaces.
342 * @namespace: Namespace of the device belongs to this class.
343 * @pm: The default device power management operations of this class.
344 * @p: The private data of the driver core, no one other than the
345 * driver core can touch this.
346 *
347 * A class is a higher-level view of a device that abstracts out low-level
348 * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
349 * at the class level, they are all simply disks. Classes allow user space
350 * to work with devices based on what they do, rather than how they are
351 * connected or how they work.
352 */
353 struct class {
354 const char *name;
355 struct module *owner;
356
357 struct class_attribute *class_attrs;
358 const struct attribute_group **dev_groups;
359 struct kobject *dev_kobj;
360
361 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
362 char *(*devnode)(struct device *dev, umode_t *mode);
363
364 void (*class_release)(struct class *class);
365 void (*dev_release)(struct device *dev);
366
367 int (*suspend)(struct device *dev, pm_message_t state);
368 int (*resume)(struct device *dev);
369
370 const struct kobj_ns_type_operations *ns_type;
371 const void *(*namespace)(struct device *dev);
372
373 const struct dev_pm_ops *pm;
374
375 struct subsys_private *p;
376 };
377
378 struct class_dev_iter {
379 struct klist_iter ki;
380 const struct device_type *type;
381 };
382
383 extern struct kobject *sysfs_dev_block_kobj;
384 extern struct kobject *sysfs_dev_char_kobj;
385 extern int __must_check __class_register(struct class *class,
386 struct lock_class_key *key);
387 extern void class_unregister(struct class *class);
388
389 /* This is a #define to keep the compiler from merging different
390 * instances of the __key variable */
391 #define class_register(class) \
392 ({ \
393 static struct lock_class_key __key; \
394 __class_register(class, &__key); \
395 })
396
397 struct class_compat;
398 struct class_compat *class_compat_register(const char *name);
399 void class_compat_unregister(struct class_compat *cls);
400 int class_compat_create_link(struct class_compat *cls, struct device *dev,
401 struct device *device_link);
402 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
403 struct device *device_link);
404
405 extern void class_dev_iter_init(struct class_dev_iter *iter,
406 struct class *class,
407 struct device *start,
408 const struct device_type *type);
409 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
410 extern void class_dev_iter_exit(struct class_dev_iter *iter);
411
412 extern int class_for_each_device(struct class *class, struct device *start,
413 void *data,
414 int (*fn)(struct device *dev, void *data));
415 extern struct device *class_find_device(struct class *class,
416 struct device *start, const void *data,
417 int (*match)(struct device *, const void *));
418
419 struct class_attribute {
420 struct attribute attr;
421 ssize_t (*show)(struct class *class, struct class_attribute *attr,
422 char *buf);
423 ssize_t (*store)(struct class *class, struct class_attribute *attr,
424 const char *buf, size_t count);
425 };
426
427 #define CLASS_ATTR(_name, _mode, _show, _store) \
428 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
429 #define CLASS_ATTR_RW(_name) \
430 struct class_attribute class_attr_##_name = __ATTR_RW(_name)
431 #define CLASS_ATTR_RO(_name) \
432 struct class_attribute class_attr_##_name = __ATTR_RO(_name)
433
434 extern int __must_check class_create_file_ns(struct class *class,
435 const struct class_attribute *attr,
436 const void *ns);
437 extern void class_remove_file_ns(struct class *class,
438 const struct class_attribute *attr,
439 const void *ns);
440
441 static inline int __must_check class_create_file(struct class *class,
442 const struct class_attribute *attr)
443 {
444 return class_create_file_ns(class, attr, NULL);
445 }
446
447 static inline void class_remove_file(struct class *class,
448 const struct class_attribute *attr)
449 {
450 return class_remove_file_ns(class, attr, NULL);
451 }
452
453 /* Simple class attribute that is just a static string */
454 struct class_attribute_string {
455 struct class_attribute attr;
456 char *str;
457 };
458
459 /* Currently read-only only */
460 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
461 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
462 #define CLASS_ATTR_STRING(_name, _mode, _str) \
463 struct class_attribute_string class_attr_##_name = \
464 _CLASS_ATTR_STRING(_name, _mode, _str)
465
466 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
467 char *buf);
468
469 struct class_interface {
470 struct list_head node;
471 struct class *class;
472
473 int (*add_dev) (struct device *, struct class_interface *);
474 void (*remove_dev) (struct device *, struct class_interface *);
475 };
476
477 extern int __must_check class_interface_register(struct class_interface *);
478 extern void class_interface_unregister(struct class_interface *);
479
480 extern struct class * __must_check __class_create(struct module *owner,
481 const char *name,
482 struct lock_class_key *key);
483 extern void class_destroy(struct class *cls);
484
485 /* This is a #define to keep the compiler from merging different
486 * instances of the __key variable */
487 #define class_create(owner, name) \
488 ({ \
489 static struct lock_class_key __key; \
490 __class_create(owner, name, &__key); \
491 })
492
493 /*
494 * The type of device, "struct device" is embedded in. A class
495 * or bus can contain devices of different types
496 * like "partitions" and "disks", "mouse" and "event".
497 * This identifies the device type and carries type-specific
498 * information, equivalent to the kobj_type of a kobject.
499 * If "name" is specified, the uevent will contain it in
500 * the DEVTYPE variable.
501 */
502 struct device_type {
503 const char *name;
504 const struct attribute_group **groups;
505 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
506 char *(*devnode)(struct device *dev, umode_t *mode,
507 kuid_t *uid, kgid_t *gid);
508 void (*release)(struct device *dev);
509
510 const struct dev_pm_ops *pm;
511 };
512
513 /* interface for exporting device attributes */
514 struct device_attribute {
515 struct attribute attr;
516 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
517 char *buf);
518 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
519 const char *buf, size_t count);
520 };
521
522 struct dev_ext_attribute {
523 struct device_attribute attr;
524 void *var;
525 };
526
527 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
528 char *buf);
529 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
530 const char *buf, size_t count);
531 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
532 char *buf);
533 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
534 const char *buf, size_t count);
535 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
536 char *buf);
537 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
538 const char *buf, size_t count);
539
540 #define DEVICE_ATTR(_name, _mode, _show, _store) \
541 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
542 #define DEVICE_ATTR_RW(_name) \
543 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
544 #define DEVICE_ATTR_RO(_name) \
545 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
546 #define DEVICE_ATTR_WO(_name) \
547 struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
548 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
549 struct dev_ext_attribute dev_attr_##_name = \
550 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
551 #define DEVICE_INT_ATTR(_name, _mode, _var) \
552 struct dev_ext_attribute dev_attr_##_name = \
553 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
554 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
555 struct dev_ext_attribute dev_attr_##_name = \
556 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
557 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
558 struct device_attribute dev_attr_##_name = \
559 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
560
561 extern int device_create_file(struct device *device,
562 const struct device_attribute *entry);
563 extern void device_remove_file(struct device *dev,
564 const struct device_attribute *attr);
565 extern bool device_remove_file_self(struct device *dev,
566 const struct device_attribute *attr);
567 extern int __must_check device_create_bin_file(struct device *dev,
568 const struct bin_attribute *attr);
569 extern void device_remove_bin_file(struct device *dev,
570 const struct bin_attribute *attr);
571
572 /* device resource management */
573 typedef void (*dr_release_t)(struct device *dev, void *res);
574 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
575
576 #ifdef CONFIG_DEBUG_DEVRES
577 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
578 const char *name);
579 #define devres_alloc(release, size, gfp) \
580 __devres_alloc(release, size, gfp, #release)
581 #else
582 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
583 #endif
584 extern void devres_for_each_res(struct device *dev, dr_release_t release,
585 dr_match_t match, void *match_data,
586 void (*fn)(struct device *, void *, void *),
587 void *data);
588 extern void devres_free(void *res);
589 extern void devres_add(struct device *dev, void *res);
590 extern void *devres_find(struct device *dev, dr_release_t release,
591 dr_match_t match, void *match_data);
592 extern void *devres_get(struct device *dev, void *new_res,
593 dr_match_t match, void *match_data);
594 extern void *devres_remove(struct device *dev, dr_release_t release,
595 dr_match_t match, void *match_data);
596 extern int devres_destroy(struct device *dev, dr_release_t release,
597 dr_match_t match, void *match_data);
598 extern int devres_release(struct device *dev, dr_release_t release,
599 dr_match_t match, void *match_data);
600
601 /* devres group */
602 extern void * __must_check devres_open_group(struct device *dev, void *id,
603 gfp_t gfp);
604 extern void devres_close_group(struct device *dev, void *id);
605 extern void devres_remove_group(struct device *dev, void *id);
606 extern int devres_release_group(struct device *dev, void *id);
607
608 /* managed devm_k.alloc/kfree for device drivers */
609 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
610 extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
611 va_list ap);
612 extern __printf(3, 4)
613 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
614 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
615 {
616 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
617 }
618 static inline void *devm_kmalloc_array(struct device *dev,
619 size_t n, size_t size, gfp_t flags)
620 {
621 if (size != 0 && n > SIZE_MAX / size)
622 return NULL;
623 return devm_kmalloc(dev, n * size, flags);
624 }
625 static inline void *devm_kcalloc(struct device *dev,
626 size_t n, size_t size, gfp_t flags)
627 {
628 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
629 }
630 extern void devm_kfree(struct device *dev, void *p);
631 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
632 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
633 gfp_t gfp);
634
635 extern unsigned long devm_get_free_pages(struct device *dev,
636 gfp_t gfp_mask, unsigned int order);
637 extern void devm_free_pages(struct device *dev, unsigned long addr);
638
639 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
640
641 /* allows to add/remove a custom action to devres stack */
642 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
643 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
644
645 struct device_dma_parameters {
646 /*
647 * a low level driver may set these to teach IOMMU code about
648 * sg limitations.
649 */
650 unsigned int max_segment_size;
651 unsigned long segment_boundary_mask;
652 };
653
654 /**
655 * struct device - The basic device structure
656 * @parent: The device's "parent" device, the device to which it is attached.
657 * In most cases, a parent device is some sort of bus or host
658 * controller. If parent is NULL, the device, is a top-level device,
659 * which is not usually what you want.
660 * @p: Holds the private data of the driver core portions of the device.
661 * See the comment of the struct device_private for detail.
662 * @kobj: A top-level, abstract class from which other classes are derived.
663 * @init_name: Initial name of the device.
664 * @type: The type of device.
665 * This identifies the device type and carries type-specific
666 * information.
667 * @mutex: Mutex to synchronize calls to its driver.
668 * @bus: Type of bus device is on.
669 * @driver: Which driver has allocated this
670 * @platform_data: Platform data specific to the device.
671 * Example: For devices on custom boards, as typical of embedded
672 * and SOC based hardware, Linux often uses platform_data to point
673 * to board-specific structures describing devices and how they
674 * are wired. That can include what ports are available, chip
675 * variants, which GPIO pins act in what additional roles, and so
676 * on. This shrinks the "Board Support Packages" (BSPs) and
677 * minimizes board-specific #ifdefs in drivers.
678 * @driver_data: Private pointer for driver specific info.
679 * @power: For device power management.
680 * See Documentation/power/devices.txt for details.
681 * @pm_domain: Provide callbacks that are executed during system suspend,
682 * hibernation, system resume and during runtime PM transitions
683 * along with subsystem-level and driver-level callbacks.
684 * @pins: For device pin management.
685 * See Documentation/pinctrl.txt for details.
686 * @numa_node: NUMA node this device is close to.
687 * @dma_mask: Dma mask (if dma'ble device).
688 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
689 * hardware supports 64-bit addresses for consistent allocations
690 * such descriptors.
691 * @dma_pfn_offset: offset of DMA memory range relatively of RAM
692 * @dma_parms: A low level driver may set these to teach IOMMU code about
693 * segment limitations.
694 * @dma_pools: Dma pools (if dma'ble device).
695 * @dma_mem: Internal for coherent mem override.
696 * @cma_area: Contiguous memory area for dma allocations
697 * @archdata: For arch-specific additions.
698 * @of_node: Associated device tree node.
699 * @fwnode: Associated device node supplied by platform firmware.
700 * @devt: For creating the sysfs "dev".
701 * @id: device instance
702 * @devres_lock: Spinlock to protect the resource of the device.
703 * @devres_head: The resources list of the device.
704 * @knode_class: The node used to add the device to the class list.
705 * @class: The class of the device.
706 * @groups: Optional attribute groups.
707 * @release: Callback to free the device after all references have
708 * gone away. This should be set by the allocator of the
709 * device (i.e. the bus driver that discovered the device).
710 * @iommu_group: IOMMU group the device belongs to.
711 *
712 * @offline_disabled: If set, the device is permanently online.
713 * @offline: Set after successful invocation of bus type's .offline().
714 *
715 * At the lowest level, every device in a Linux system is represented by an
716 * instance of struct device. The device structure contains the information
717 * that the device model core needs to model the system. Most subsystems,
718 * however, track additional information about the devices they host. As a
719 * result, it is rare for devices to be represented by bare device structures;
720 * instead, that structure, like kobject structures, is usually embedded within
721 * a higher-level representation of the device.
722 */
723 struct device {
724 struct device *parent;
725
726 struct device_private *p;
727
728 struct kobject kobj;
729 const char *init_name; /* initial name of the device */
730 const struct device_type *type;
731
732 struct mutex mutex; /* mutex to synchronize calls to
733 * its driver.
734 */
735
736 struct bus_type *bus; /* type of bus device is on */
737 struct device_driver *driver; /* which driver has allocated this
738 device */
739 void *platform_data; /* Platform specific data, device
740 core doesn't touch it */
741 void *driver_data; /* Driver data, set and get with
742 dev_set/get_drvdata */
743 struct dev_pm_info power;
744 struct dev_pm_domain *pm_domain;
745
746 #ifdef CONFIG_PINCTRL
747 struct dev_pin_info *pins;
748 #endif
749
750 #ifdef CONFIG_NUMA
751 int numa_node; /* NUMA node this device is close to */
752 #endif
753 u64 *dma_mask; /* dma mask (if dma'able device) */
754 u64 coherent_dma_mask;/* Like dma_mask, but for
755 alloc_coherent mappings as
756 not all hardware supports
757 64 bit addresses for consistent
758 allocations such descriptors. */
759 unsigned long dma_pfn_offset;
760
761 struct device_dma_parameters *dma_parms;
762
763 struct list_head dma_pools; /* dma pools (if dma'ble) */
764
765 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
766 override */
767 #ifdef CONFIG_DMA_CMA
768 struct cma *cma_area; /* contiguous memory area for dma
769 allocations */
770 #endif
771 /* arch specific additions */
772 struct dev_archdata archdata;
773
774 struct device_node *of_node; /* associated device tree node */
775 struct fwnode_handle *fwnode; /* firmware device node */
776
777 dev_t devt; /* dev_t, creates the sysfs "dev" */
778 u32 id; /* device instance */
779
780 spinlock_t devres_lock;
781 struct list_head devres_head;
782
783 struct klist_node knode_class;
784 struct class *class;
785 const struct attribute_group **groups; /* optional groups */
786
787 void (*release)(struct device *dev);
788 struct iommu_group *iommu_group;
789
790 bool offline_disabled:1;
791 bool offline:1;
792 };
793
794 static inline struct device *kobj_to_dev(struct kobject *kobj)
795 {
796 return container_of(kobj, struct device, kobj);
797 }
798
799 /* Get the wakeup routines, which depend on struct device */
800 #include <linux/pm_wakeup.h>
801
802 static inline const char *dev_name(const struct device *dev)
803 {
804 /* Use the init name until the kobject becomes available */
805 if (dev->init_name)
806 return dev->init_name;
807
808 return kobject_name(&dev->kobj);
809 }
810
811 extern __printf(2, 3)
812 int dev_set_name(struct device *dev, const char *name, ...);
813
814 #ifdef CONFIG_NUMA
815 static inline int dev_to_node(struct device *dev)
816 {
817 return dev->numa_node;
818 }
819 static inline void set_dev_node(struct device *dev, int node)
820 {
821 dev->numa_node = node;
822 }
823 #else
824 static inline int dev_to_node(struct device *dev)
825 {
826 return -1;
827 }
828 static inline void set_dev_node(struct device *dev, int node)
829 {
830 }
831 #endif
832
833 static inline void *dev_get_drvdata(const struct device *dev)
834 {
835 return dev->driver_data;
836 }
837
838 static inline void dev_set_drvdata(struct device *dev, void *data)
839 {
840 dev->driver_data = data;
841 }
842
843 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
844 {
845 return dev ? dev->power.subsys_data : NULL;
846 }
847
848 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
849 {
850 return dev->kobj.uevent_suppress;
851 }
852
853 static inline void dev_set_uevent_suppress(struct device *dev, int val)
854 {
855 dev->kobj.uevent_suppress = val;
856 }
857
858 static inline int device_is_registered(struct device *dev)
859 {
860 return dev->kobj.state_in_sysfs;
861 }
862
863 static inline void device_enable_async_suspend(struct device *dev)
864 {
865 if (!dev->power.is_prepared)
866 dev->power.async_suspend = true;
867 }
868
869 static inline void device_disable_async_suspend(struct device *dev)
870 {
871 if (!dev->power.is_prepared)
872 dev->power.async_suspend = false;
873 }
874
875 static inline bool device_async_suspend_enabled(struct device *dev)
876 {
877 return !!dev->power.async_suspend;
878 }
879
880 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
881 {
882 dev->power.ignore_children = enable;
883 }
884
885 static inline void dev_pm_syscore_device(struct device *dev, bool val)
886 {
887 #ifdef CONFIG_PM_SLEEP
888 dev->power.syscore = val;
889 #endif
890 }
891
892 static inline void device_lock(struct device *dev)
893 {
894 mutex_lock(&dev->mutex);
895 }
896
897 static inline int device_trylock(struct device *dev)
898 {
899 return mutex_trylock(&dev->mutex);
900 }
901
902 static inline void device_unlock(struct device *dev)
903 {
904 mutex_unlock(&dev->mutex);
905 }
906
907 static inline void device_lock_assert(struct device *dev)
908 {
909 lockdep_assert_held(&dev->mutex);
910 }
911
912 static inline struct device_node *dev_of_node(struct device *dev)
913 {
914 if (!IS_ENABLED(CONFIG_OF))
915 return NULL;
916 return dev->of_node;
917 }
918
919 void driver_init(void);
920
921 /*
922 * High level routines for use by the bus drivers
923 */
924 extern int __must_check device_register(struct device *dev);
925 extern void device_unregister(struct device *dev);
926 extern void device_initialize(struct device *dev);
927 extern int __must_check device_add(struct device *dev);
928 extern void device_del(struct device *dev);
929 extern int device_for_each_child(struct device *dev, void *data,
930 int (*fn)(struct device *dev, void *data));
931 extern struct device *device_find_child(struct device *dev, void *data,
932 int (*match)(struct device *dev, void *data));
933 extern int device_rename(struct device *dev, const char *new_name);
934 extern int device_move(struct device *dev, struct device *new_parent,
935 enum dpm_order dpm_order);
936 extern const char *device_get_devnode(struct device *dev,
937 umode_t *mode, kuid_t *uid, kgid_t *gid,
938 const char **tmp);
939
940 static inline bool device_supports_offline(struct device *dev)
941 {
942 return dev->bus && dev->bus->offline && dev->bus->online;
943 }
944
945 extern void lock_device_hotplug(void);
946 extern void unlock_device_hotplug(void);
947 extern int lock_device_hotplug_sysfs(void);
948 extern int device_offline(struct device *dev);
949 extern int device_online(struct device *dev);
950 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
951 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
952
953 /*
954 * Root device objects for grouping under /sys/devices
955 */
956 extern struct device *__root_device_register(const char *name,
957 struct module *owner);
958
959 /* This is a macro to avoid include problems with THIS_MODULE */
960 #define root_device_register(name) \
961 __root_device_register(name, THIS_MODULE)
962
963 extern void root_device_unregister(struct device *root);
964
965 static inline void *dev_get_platdata(const struct device *dev)
966 {
967 return dev->platform_data;
968 }
969
970 /*
971 * Manual binding of a device to driver. See drivers/base/bus.c
972 * for information on use.
973 */
974 extern int __must_check device_bind_driver(struct device *dev);
975 extern void device_release_driver(struct device *dev);
976 extern int __must_check device_attach(struct device *dev);
977 extern int __must_check driver_attach(struct device_driver *drv);
978 extern int __must_check device_reprobe(struct device *dev);
979
980 /*
981 * Easy functions for dynamically creating devices on the fly
982 */
983 extern struct device *device_create_vargs(struct class *cls,
984 struct device *parent,
985 dev_t devt,
986 void *drvdata,
987 const char *fmt,
988 va_list vargs);
989 extern __printf(5, 6)
990 struct device *device_create(struct class *cls, struct device *parent,
991 dev_t devt, void *drvdata,
992 const char *fmt, ...);
993 extern __printf(6, 7)
994 struct device *device_create_with_groups(struct class *cls,
995 struct device *parent, dev_t devt, void *drvdata,
996 const struct attribute_group **groups,
997 const char *fmt, ...);
998 extern void device_destroy(struct class *cls, dev_t devt);
999
1000 /*
1001 * Platform "fixup" functions - allow the platform to have their say
1002 * about devices and actions that the general device layer doesn't
1003 * know about.
1004 */
1005 /* Notify platform of device discovery */
1006 extern int (*platform_notify)(struct device *dev);
1007
1008 extern int (*platform_notify_remove)(struct device *dev);
1009
1010
1011 /*
1012 * get_device - atomically increment the reference count for the device.
1013 *
1014 */
1015 extern struct device *get_device(struct device *dev);
1016 extern void put_device(struct device *dev);
1017
1018 #ifdef CONFIG_DEVTMPFS
1019 extern int devtmpfs_create_node(struct device *dev);
1020 extern int devtmpfs_delete_node(struct device *dev);
1021 extern int devtmpfs_mount(const char *mntdir);
1022 #else
1023 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
1024 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
1025 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
1026 #endif
1027
1028 /* drivers/base/power/shutdown.c */
1029 extern void device_shutdown(void);
1030
1031 /* debugging and troubleshooting/diagnostic helpers. */
1032 extern const char *dev_driver_string(const struct device *dev);
1033
1034
1035 #ifdef CONFIG_PRINTK
1036
1037 extern __printf(3, 0)
1038 int dev_vprintk_emit(int level, const struct device *dev,
1039 const char *fmt, va_list args);
1040 extern __printf(3, 4)
1041 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
1042
1043 extern __printf(3, 4)
1044 void dev_printk(const char *level, const struct device *dev,
1045 const char *fmt, ...);
1046 extern __printf(2, 3)
1047 void dev_emerg(const struct device *dev, const char *fmt, ...);
1048 extern __printf(2, 3)
1049 void dev_alert(const struct device *dev, const char *fmt, ...);
1050 extern __printf(2, 3)
1051 void dev_crit(const struct device *dev, const char *fmt, ...);
1052 extern __printf(2, 3)
1053 void dev_err(const struct device *dev, const char *fmt, ...);
1054 extern __printf(2, 3)
1055 void dev_warn(const struct device *dev, const char *fmt, ...);
1056 extern __printf(2, 3)
1057 void dev_notice(const struct device *dev, const char *fmt, ...);
1058 extern __printf(2, 3)
1059 void _dev_info(const struct device *dev, const char *fmt, ...);
1060
1061 #else
1062
1063 static inline __printf(3, 0)
1064 int dev_vprintk_emit(int level, const struct device *dev,
1065 const char *fmt, va_list args)
1066 { return 0; }
1067 static inline __printf(3, 4)
1068 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
1069 { return 0; }
1070
1071 static inline void __dev_printk(const char *level, const struct device *dev,
1072 struct va_format *vaf)
1073 {}
1074 static inline __printf(3, 4)
1075 void dev_printk(const char *level, const struct device *dev,
1076 const char *fmt, ...)
1077 {}
1078
1079 static inline __printf(2, 3)
1080 void dev_emerg(const struct device *dev, const char *fmt, ...)
1081 {}
1082 static inline __printf(2, 3)
1083 void dev_crit(const struct device *dev, const char *fmt, ...)
1084 {}
1085 static inline __printf(2, 3)
1086 void dev_alert(const struct device *dev, const char *fmt, ...)
1087 {}
1088 static inline __printf(2, 3)
1089 void dev_err(const struct device *dev, const char *fmt, ...)
1090 {}
1091 static inline __printf(2, 3)
1092 void dev_warn(const struct device *dev, const char *fmt, ...)
1093 {}
1094 static inline __printf(2, 3)
1095 void dev_notice(const struct device *dev, const char *fmt, ...)
1096 {}
1097 static inline __printf(2, 3)
1098 void _dev_info(const struct device *dev, const char *fmt, ...)
1099 {}
1100
1101 #endif
1102
1103 /*
1104 * Stupid hackaround for existing uses of non-printk uses dev_info
1105 *
1106 * Note that the definition of dev_info below is actually _dev_info
1107 * and a macro is used to avoid redefining dev_info
1108 */
1109
1110 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
1111
1112 #if defined(CONFIG_DYNAMIC_DEBUG)
1113 #define dev_dbg(dev, format, ...) \
1114 do { \
1115 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
1116 } while (0)
1117 #elif defined(DEBUG)
1118 #define dev_dbg(dev, format, arg...) \
1119 dev_printk(KERN_DEBUG, dev, format, ##arg)
1120 #else
1121 #define dev_dbg(dev, format, arg...) \
1122 ({ \
1123 if (0) \
1124 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1125 })
1126 #endif
1127
1128 #ifdef CONFIG_PRINTK
1129 #define dev_level_once(dev_level, dev, fmt, ...) \
1130 do { \
1131 static bool __print_once __read_mostly; \
1132 \
1133 if (!__print_once) { \
1134 __print_once = true; \
1135 dev_level(dev, fmt, ##__VA_ARGS__); \
1136 } \
1137 } while (0)
1138 #else
1139 #define dev_level_once(dev_level, dev, fmt, ...) \
1140 do { \
1141 if (0) \
1142 dev_level(dev, fmt, ##__VA_ARGS__); \
1143 } while (0)
1144 #endif
1145
1146 #define dev_emerg_once(dev, fmt, ...) \
1147 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
1148 #define dev_alert_once(dev, fmt, ...) \
1149 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
1150 #define dev_crit_once(dev, fmt, ...) \
1151 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
1152 #define dev_err_once(dev, fmt, ...) \
1153 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
1154 #define dev_warn_once(dev, fmt, ...) \
1155 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
1156 #define dev_notice_once(dev, fmt, ...) \
1157 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
1158 #define dev_info_once(dev, fmt, ...) \
1159 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
1160 #define dev_dbg_once(dev, fmt, ...) \
1161 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
1162
1163 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1164 do { \
1165 static DEFINE_RATELIMIT_STATE(_rs, \
1166 DEFAULT_RATELIMIT_INTERVAL, \
1167 DEFAULT_RATELIMIT_BURST); \
1168 if (__ratelimit(&_rs)) \
1169 dev_level(dev, fmt, ##__VA_ARGS__); \
1170 } while (0)
1171
1172 #define dev_emerg_ratelimited(dev, fmt, ...) \
1173 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
1174 #define dev_alert_ratelimited(dev, fmt, ...) \
1175 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
1176 #define dev_crit_ratelimited(dev, fmt, ...) \
1177 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
1178 #define dev_err_ratelimited(dev, fmt, ...) \
1179 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
1180 #define dev_warn_ratelimited(dev, fmt, ...) \
1181 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
1182 #define dev_notice_ratelimited(dev, fmt, ...) \
1183 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
1184 #define dev_info_ratelimited(dev, fmt, ...) \
1185 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
1186 #if defined(CONFIG_DYNAMIC_DEBUG)
1187 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
1188 #define dev_dbg_ratelimited(dev, fmt, ...) \
1189 do { \
1190 static DEFINE_RATELIMIT_STATE(_rs, \
1191 DEFAULT_RATELIMIT_INTERVAL, \
1192 DEFAULT_RATELIMIT_BURST); \
1193 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
1194 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
1195 __ratelimit(&_rs)) \
1196 __dynamic_dev_dbg(&descriptor, dev, fmt, \
1197 ##__VA_ARGS__); \
1198 } while (0)
1199 #elif defined(DEBUG)
1200 #define dev_dbg_ratelimited(dev, fmt, ...) \
1201 do { \
1202 static DEFINE_RATELIMIT_STATE(_rs, \
1203 DEFAULT_RATELIMIT_INTERVAL, \
1204 DEFAULT_RATELIMIT_BURST); \
1205 if (__ratelimit(&_rs)) \
1206 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1207 } while (0)
1208 #else
1209 #define dev_dbg_ratelimited(dev, fmt, ...) \
1210 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
1211 #endif
1212
1213 #ifdef VERBOSE_DEBUG
1214 #define dev_vdbg dev_dbg
1215 #else
1216 #define dev_vdbg(dev, format, arg...) \
1217 ({ \
1218 if (0) \
1219 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1220 })
1221 #endif
1222
1223 /*
1224 * dev_WARN*() acts like dev_printk(), but with the key difference of
1225 * using WARN/WARN_ONCE to include file/line information and a backtrace.
1226 */
1227 #define dev_WARN(dev, format, arg...) \
1228 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
1229
1230 #define dev_WARN_ONCE(dev, condition, format, arg...) \
1231 WARN_ONCE(condition, "%s %s: " format, \
1232 dev_driver_string(dev), dev_name(dev), ## arg)
1233
1234 /* Create alias, so I can be autoloaded. */
1235 #define MODULE_ALIAS_CHARDEV(major,minor) \
1236 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
1237 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
1238 MODULE_ALIAS("char-major-" __stringify(major) "-*")
1239
1240 #ifdef CONFIG_SYSFS_DEPRECATED
1241 extern long sysfs_deprecated;
1242 #else
1243 #define sysfs_deprecated 0
1244 #endif
1245
1246 /**
1247 * module_driver() - Helper macro for drivers that don't do anything
1248 * special in module init/exit. This eliminates a lot of boilerplate.
1249 * Each module may only use this macro once, and calling it replaces
1250 * module_init() and module_exit().
1251 *
1252 * @__driver: driver name
1253 * @__register: register function for this driver type
1254 * @__unregister: unregister function for this driver type
1255 * @...: Additional arguments to be passed to __register and __unregister.
1256 *
1257 * Use this macro to construct bus specific macros for registering
1258 * drivers, and do not use it on its own.
1259 */
1260 #define module_driver(__driver, __register, __unregister, ...) \
1261 static int __init __driver##_init(void) \
1262 { \
1263 return __register(&(__driver) , ##__VA_ARGS__); \
1264 } \
1265 module_init(__driver##_init); \
1266 static void __exit __driver##_exit(void) \
1267 { \
1268 __unregister(&(__driver) , ##__VA_ARGS__); \
1269 } \
1270 module_exit(__driver##_exit);
1271
1272 #endif /* _DEVICE_H_ */ 1 #ifndef _LINUX_ERR_H
2 #define _LINUX_ERR_H
3
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6
7 #include <asm/errno.h>
8
9 /*
10 * Kernel pointers have redundant information, so we can use a
11 * scheme where we can return either an error code or a normal
12 * pointer with the same return value.
13 *
14 * This should be a per-architecture thing, to allow different
15 * error and pointer decisions.
16 */
17 #define MAX_ERRNO 4095
18
19 #ifndef __ASSEMBLY__
20
21 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
22
23 static inline void * __must_check ERR_PTR(long error)
24 {
25 return (void *) error;
26 }
27
28 static inline long __must_check PTR_ERR(__force const void *ptr)
29 {
30 return (long) ptr;
31 }
32
33 static inline bool __must_check IS_ERR(__force const void *ptr)
34 {
35 return IS_ERR_VALUE((unsigned long)ptr);
36 }
37
38 static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
39 {
40 return !ptr || IS_ERR_VALUE((unsigned long)ptr);
41 }
42
43 /**
44 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
45 * @ptr: The pointer to cast.
46 *
47 * Explicitly cast an error-valued pointer to another pointer type in such a
48 * way as to make it clear that's what's going on.
49 */
50 static inline void * __must_check ERR_CAST(__force const void *ptr)
51 {
52 /* cast away the const */
53 return (void *) ptr;
54 }
55
56 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
57 {
58 if (IS_ERR(ptr))
59 return PTR_ERR(ptr);
60 else
61 return 0;
62 }
63
64 /* Deprecated */
65 #define PTR_RET(p) PTR_ERR_OR_ZERO(p)
66
67 #endif
68
69 #endif /* _LINUX_ERR_H */ 1 /*
2 * ioport.h Definitions of routines for detecting, reserving and
3 * allocating system resources.
4 *
5 * Authors: Linus Torvalds
6 */
7
8 #ifndef _LINUX_IOPORT_H
9 #define _LINUX_IOPORT_H
10
11 #ifndef __ASSEMBLY__
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 /*
15 * Resources are tree-like, allowing
16 * nesting etc..
17 */
18 struct resource {
19 resource_size_t start;
20 resource_size_t end;
21 const char *name;
22 unsigned long flags;
23 struct resource *parent, *sibling, *child;
24 };
25
26 /*
27 * IO resources have these defined flags.
28 */
29 #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
30
31 #define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */
32 #define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
33 #define IORESOURCE_MEM 0x00000200
34 #define IORESOURCE_REG 0x00000300 /* Register offsets */
35 #define IORESOURCE_IRQ 0x00000400
36 #define IORESOURCE_DMA 0x00000800
37 #define IORESOURCE_BUS 0x00001000
38
39 #define IORESOURCE_PREFETCH 0x00002000 /* No side effects */
40 #define IORESOURCE_READONLY 0x00004000
41 #define IORESOURCE_CACHEABLE 0x00008000
42 #define IORESOURCE_RANGELENGTH 0x00010000
43 #define IORESOURCE_SHADOWABLE 0x00020000
44
45 #define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */
46 #define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */
47
48 #define IORESOURCE_MEM_64 0x00100000
49 #define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
50 #define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
51
52 #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
53 #define IORESOURCE_DISABLED 0x10000000
54 #define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
55 #define IORESOURCE_AUTO 0x40000000
56 #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
57
58 /* PnP IRQ specific bits (IORESOURCE_BITS) */
59 #define IORESOURCE_IRQ_HIGHEDGE (1<<0)
60 #define IORESOURCE_IRQ_LOWEDGE (1<<1)
61 #define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
62 #define IORESOURCE_IRQ_LOWLEVEL (1<<3)
63 #define IORESOURCE_IRQ_SHAREABLE (1<<4)
64 #define IORESOURCE_IRQ_OPTIONAL (1<<5)
65
66 /* PnP DMA specific bits (IORESOURCE_BITS) */
67 #define IORESOURCE_DMA_TYPE_MASK (3<<0)
68 #define IORESOURCE_DMA_8BIT (0<<0)
69 #define IORESOURCE_DMA_8AND16BIT (1<<0)
70 #define IORESOURCE_DMA_16BIT (2<<0)
71
72 #define IORESOURCE_DMA_MASTER (1<<2)
73 #define IORESOURCE_DMA_BYTE (1<<3)
74 #define IORESOURCE_DMA_WORD (1<<4)
75
76 #define IORESOURCE_DMA_SPEED_MASK (3<<6)
77 #define IORESOURCE_DMA_COMPATIBLE (0<<6)
78 #define IORESOURCE_DMA_TYPEA (1<<6)
79 #define IORESOURCE_DMA_TYPEB (2<<6)
80 #define IORESOURCE_DMA_TYPEF (3<<6)
81
82 /* PnP memory I/O specific bits (IORESOURCE_BITS) */
83 #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
84 #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
85 #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
86 #define IORESOURCE_MEM_TYPE_MASK (3<<3)
87 #define IORESOURCE_MEM_8BIT (0<<3)
88 #define IORESOURCE_MEM_16BIT (1<<3)
89 #define IORESOURCE_MEM_8AND16BIT (2<<3)
90 #define IORESOURCE_MEM_32BIT (3<<3)
91 #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
92 #define IORESOURCE_MEM_EXPANSIONROM (1<<6)
93
94 /* PnP I/O specific bits (IORESOURCE_BITS) */
95 #define IORESOURCE_IO_16BIT_ADDR (1<<0)
96 #define IORESOURCE_IO_FIXED (1<<1)
97
98 /* PCI ROM control bits (IORESOURCE_BITS) */
99 #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
100 #define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
101 #define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
102 #define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
103
104 /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
105 #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
106
107
108 /* helpers to define resources */
109 #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
110 { \
111 .start = (_start), \
112 .end = (_start) + (_size) - 1, \
113 .name = (_name), \
114 .flags = (_flags), \
115 }
116
117 #define DEFINE_RES_IO_NAMED(_start, _size, _name) \
118 DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
119 #define DEFINE_RES_IO(_start, _size) \
120 DEFINE_RES_IO_NAMED((_start), (_size), NULL)
121
122 #define DEFINE_RES_MEM_NAMED(_start, _size, _name) \
123 DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
124 #define DEFINE_RES_MEM(_start, _size) \
125 DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
126
127 #define DEFINE_RES_IRQ_NAMED(_irq, _name) \
128 DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
129 #define DEFINE_RES_IRQ(_irq) \
130 DEFINE_RES_IRQ_NAMED((_irq), NULL)
131
132 #define DEFINE_RES_DMA_NAMED(_dma, _name) \
133 DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
134 #define DEFINE_RES_DMA(_dma) \
135 DEFINE_RES_DMA_NAMED((_dma), NULL)
136
137 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
138 extern struct resource ioport_resource;
139 extern struct resource iomem_resource;
140
141 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
142 extern int request_resource(struct resource *root, struct resource *new);
143 extern int release_resource(struct resource *new);
144 void release_child_resources(struct resource *new);
145 extern void reserve_region_with_split(struct resource *root,
146 resource_size_t start, resource_size_t end,
147 const char *name);
148 extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
149 extern int insert_resource(struct resource *parent, struct resource *new);
150 extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
151 extern void arch_remove_reservations(struct resource *avail);
152 extern int allocate_resource(struct resource *root, struct resource *new,
153 resource_size_t size, resource_size_t min,
154 resource_size_t max, resource_size_t align,
155 resource_size_t (*alignf)(void *,
156 const struct resource *,
157 resource_size_t,
158 resource_size_t),
159 void *alignf_data);
160 struct resource *lookup_resource(struct resource *root, resource_size_t start);
161 int adjust_resource(struct resource *res, resource_size_t start,
162 resource_size_t size);
163 resource_size_t resource_alignment(struct resource *res);
164 static inline resource_size_t resource_size(const struct resource *res)
165 {
166 return res->end - res->start + 1;
167 }
168 static inline unsigned long resource_type(const struct resource *res)
169 {
170 return res->flags & IORESOURCE_TYPE_BITS;
171 }
172 /* True iff r1 completely contains r2 */
173 static inline bool resource_contains(struct resource *r1, struct resource *r2)
174 {
175 if (resource_type(r1) != resource_type(r2))
176 return false;
177 if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET)
178 return false;
179 return r1->start <= r2->start && r1->end >= r2->end;
180 }
181
182
183 /* Convenience shorthand with allocation */
184 #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
185 #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
186 #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
187 #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
188 #define request_mem_region_exclusive(start,n,name) \
189 __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
190 #define rename_region(region, newname) do { (region)->name = (newname); } while (0)
191
192 extern struct resource * __request_region(struct resource *,
193 resource_size_t start,
194 resource_size_t n,
195 const char *name, int flags);
196
197 /* Compatibility cruft */
198 #define release_region(start,n) __release_region(&ioport_resource, (start), (n))
199 #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
200
201 extern void __release_region(struct resource *, resource_size_t,
202 resource_size_t);
203 #ifdef CONFIG_MEMORY_HOTREMOVE
204 extern int release_mem_region_adjustable(struct resource *, resource_size_t,
205 resource_size_t);
206 #endif
207
208 /* Wrappers for managed devices */
209 struct device;
210
211 extern int devm_request_resource(struct device *dev, struct resource *root,
212 struct resource *new);
213 extern void devm_release_resource(struct device *dev, struct resource *new);
214
215 #define devm_request_region(dev,start,n,name) \
216 __devm_request_region(dev, &ioport_resource, (start), (n), (name))
217 #define devm_request_mem_region(dev,start,n,name) \
218 __devm_request_region(dev, &iomem_resource, (start), (n), (name))
219
220 extern struct resource * __devm_request_region(struct device *dev,
221 struct resource *parent, resource_size_t start,
222 resource_size_t n, const char *name);
223
224 #define devm_release_region(dev, start, n) \
225 __devm_release_region(dev, &ioport_resource, (start), (n))
226 #define devm_release_mem_region(dev, start, n) \
227 __devm_release_region(dev, &iomem_resource, (start), (n))
228
229 extern void __devm_release_region(struct device *dev, struct resource *parent,
230 resource_size_t start, resource_size_t n);
231 extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
232 extern int iomem_is_exclusive(u64 addr);
233
234 extern int
235 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
236 void *arg, int (*func)(unsigned long, unsigned long, void *));
237 extern int
238 walk_system_ram_res(u64 start, u64 end, void *arg,
239 int (*func)(u64, u64, void *));
240 extern int
241 walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
242 int (*func)(u64, u64, void *));
243
244 /* True if any part of r1 overlaps r2 */
245 static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
246 {
247 return (r1->start <= r2->end && r1->end >= r2->start);
248 }
249
250
251 #endif /* __ASSEMBLY__ */
252 #endif /* _LINUX_IOPORT_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139 #ifndef smp_mb__after_unlock_lock
140 #define smp_mb__after_unlock_lock() do { } while (0)
141 #endif
142
143 /**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question.
146 */
147 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
148
149 #ifdef CONFIG_DEBUG_SPINLOCK
150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
154 #else
155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
156 {
157 __acquire(lock);
158 arch_spin_lock(&lock->raw_lock);
159 }
160
161 static inline void
162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
163 {
164 __acquire(lock);
165 arch_spin_lock_flags(&lock->raw_lock, *flags);
166 }
167
168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
169 {
170 return arch_spin_trylock(&(lock)->raw_lock);
171 }
172
173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
174 {
175 arch_spin_unlock(&lock->raw_lock);
176 __release(lock);
177 }
178 #endif
179
180 /*
181 * Define the various spin_lock methods. Note we define these
182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
183 * various methods are defined as nops in the case they are not
184 * required.
185 */
186 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
187
188 #define raw_spin_lock(lock) _raw_spin_lock(lock)
189
190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
191 # define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193 # define raw_spin_lock_bh_nested(lock, subclass) \
194 _raw_spin_lock_bh_nested(lock, subclass)
195
196 # define raw_spin_lock_nest_lock(lock, nest_lock) \
197 do { \
198 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
199 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
200 } while (0)
201 #else
202 /*
203 * Always evaluate the 'subclass' argument to avoid that the compiler
204 * warns about set-but-not-used variables when building with
205 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
206 */
207 # define raw_spin_lock_nested(lock, subclass) \
208 _raw_spin_lock(((void)(subclass), (lock)))
209 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
210 # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
211 #endif
212
213 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
214
215 #define raw_spin_lock_irqsave(lock, flags) \
216 do { \
217 typecheck(unsigned long, flags); \
218 flags = _raw_spin_lock_irqsave(lock); \
219 } while (0)
220
221 #ifdef CONFIG_DEBUG_LOCK_ALLOC
222 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
223 do { \
224 typecheck(unsigned long, flags); \
225 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
226 } while (0)
227 #else
228 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
229 do { \
230 typecheck(unsigned long, flags); \
231 flags = _raw_spin_lock_irqsave(lock); \
232 } while (0)
233 #endif
234
235 #else
236
237 #define raw_spin_lock_irqsave(lock, flags) \
238 do { \
239 typecheck(unsigned long, flags); \
240 _raw_spin_lock_irqsave(lock, flags); \
241 } while (0)
242
243 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
244 raw_spin_lock_irqsave(lock, flags)
245
246 #endif
247
248 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
249 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
250 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
251 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
252
253 #define raw_spin_unlock_irqrestore(lock, flags) \
254 do { \
255 typecheck(unsigned long, flags); \
256 _raw_spin_unlock_irqrestore(lock, flags); \
257 } while (0)
258 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
259
260 #define raw_spin_trylock_bh(lock) \
261 __cond_lock(lock, _raw_spin_trylock_bh(lock))
262
263 #define raw_spin_trylock_irq(lock) \
264 ({ \
265 local_irq_disable(); \
266 raw_spin_trylock(lock) ? \
267 1 : ({ local_irq_enable(); 0; }); \
268 })
269
270 #define raw_spin_trylock_irqsave(lock, flags) \
271 ({ \
272 local_irq_save(flags); \
273 raw_spin_trylock(lock) ? \
274 1 : ({ local_irq_restore(flags); 0; }); \
275 })
276
277 /**
278 * raw_spin_can_lock - would raw_spin_trylock() succeed?
279 * @lock: the spinlock in question.
280 */
281 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
282
283 /* Include rwlock functions */
284 #include <linux/rwlock.h>
285
286 /*
287 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
288 */
289 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
290 # include <linux/spinlock_api_smp.h>
291 #else
292 # include <linux/spinlock_api_up.h>
293 #endif
294
295 /*
296 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
297 */
298
299 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
300 {
301 return &lock->rlock;
302 }
303
304 #define spin_lock_init(_lock) \
305 do { \
306 spinlock_check(_lock); \
307 raw_spin_lock_init(&(_lock)->rlock); \
308 } while (0)
309
310 static inline void spin_lock(spinlock_t *lock)
311 {
312 raw_spin_lock(&lock->rlock);
313 }
314
315 static inline void spin_lock_bh(spinlock_t *lock)
316 {
317 raw_spin_lock_bh(&lock->rlock);
318 }
319
320 static inline int spin_trylock(spinlock_t *lock)
321 {
322 return raw_spin_trylock(&lock->rlock);
323 }
324
325 #define spin_lock_nested(lock, subclass) \
326 do { \
327 raw_spin_lock_nested(spinlock_check(lock), subclass); \
328 } while (0)
329
330 #define spin_lock_bh_nested(lock, subclass) \
331 do { \
332 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
333 } while (0)
334
335 #define spin_lock_nest_lock(lock, nest_lock) \
336 do { \
337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
338 } while (0)
339
340 static inline void spin_lock_irq(spinlock_t *lock)
341 {
342 raw_spin_lock_irq(&lock->rlock);
343 }
344
345 #define spin_lock_irqsave(lock, flags) \
346 do { \
347 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
348 } while (0)
349
350 #define spin_lock_irqsave_nested(lock, flags, subclass) \
351 do { \
352 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
353 } while (0)
354
355 static inline void spin_unlock(spinlock_t *lock)
356 {
357 raw_spin_unlock(&lock->rlock);
358 }
359
360 static inline void spin_unlock_bh(spinlock_t *lock)
361 {
362 raw_spin_unlock_bh(&lock->rlock);
363 }
364
365 static inline void spin_unlock_irq(spinlock_t *lock)
366 {
367 raw_spin_unlock_irq(&lock->rlock);
368 }
369
370 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
371 {
372 raw_spin_unlock_irqrestore(&lock->rlock, flags);
373 }
374
375 static inline int spin_trylock_bh(spinlock_t *lock)
376 {
377 return raw_spin_trylock_bh(&lock->rlock);
378 }
379
380 static inline int spin_trylock_irq(spinlock_t *lock)
381 {
382 return raw_spin_trylock_irq(&lock->rlock);
383 }
384
385 #define spin_trylock_irqsave(lock, flags) \
386 ({ \
387 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
388 })
389
390 static inline void spin_unlock_wait(spinlock_t *lock)
391 {
392 raw_spin_unlock_wait(&lock->rlock);
393 }
394
395 static inline int spin_is_locked(spinlock_t *lock)
396 {
397 return raw_spin_is_locked(&lock->rlock);
398 }
399
400 static inline int spin_is_contended(spinlock_t *lock)
401 {
402 return raw_spin_is_contended(&lock->rlock);
403 }
404
405 static inline int spin_can_lock(spinlock_t *lock)
406 {
407 return raw_spin_can_lock(&lock->rlock);
408 }
409
410 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
411
412 /*
413 * Pull the atomic_t declaration:
414 * (asm-mips/atomic.h needs above definitions)
415 */
416 #include <linux/atomic.h>
417 /**
418 * atomic_dec_and_lock - lock on reaching reference count zero
419 * @atomic: the atomic counter
420 * @lock: the spinlock in question
421 *
422 * Decrements @atomic by 1. If the result is 0, returns true and locks
423 * @lock. Returns false for all other cases.
424 */
425 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
426 #define atomic_dec_and_lock(atomic, lock) \
427 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
428
429 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.1-rc1.tar.xz | drivers/usb/gadget/udc/mv_udc.ko | 152_1a | BLAST | Bug | Fixed | 2015-07-19 19:21:24 | L0197 |
Comment
Reported: 19 Jul 2015
[Home]