Error Trace
        
                          [Home]
Bug # 10
Show/hide error trace|            Error trace     
         {    95     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;    19     typedef signed char __s8;    20     typedef unsigned char __u8;    22     typedef short __s16;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    30     typedef unsigned long long __u64;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    32     typedef __u16 __le16;    34     typedef __u32 __le32;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   108     typedef __u32 uint32_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   147     typedef u64 dma_addr_t;   158     typedef unsigned int gfp_t;   159     typedef unsigned int fmode_t;   160     typedef unsigned int oom_flags_t;   163     typedef u64 phys_addr_t;   168     typedef phys_addr_t resource_size_t;   178     struct __anonstruct_atomic_t_6 {   int counter; } ;   178     typedef struct __anonstruct_atomic_t_6 atomic_t;   183     struct __anonstruct_atomic64_t_7 {   long counter; } ;   183     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   184     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   189     struct hlist_node ;   189     struct hlist_head {   struct hlist_node *first; } ;   193     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   204     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;    65     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    59     struct __anonstruct_ldv_1022_9 {   unsigned int a;   unsigned int b; } ;    59     struct __anonstruct_ldv_1037_10 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    59     union __anonunion_ldv_1038_8 {   struct __anonstruct_ldv_1022_9 ldv_1022;   struct __anonstruct_ldv_1037_10 ldv_1037; } ;    59     struct desc_struct {   union __anonunion_ldv_1038_8 ldv_1038; } ;    12     typedef unsigned long pteval_t;    15     typedef unsigned long pgdval_t;    16     typedef unsigned long pgprotval_t;    18     struct __anonstruct_pte_t_11 {   pteval_t pte; } ;    18     typedef struct __anonstruct_pte_t_11 pte_t;    20     struct pgprot {   pgprotval_t pgprot; } ;   242     typedef struct pgprot pgprot_t;   244     struct __anonstruct_pgd_t_12 {   pgdval_t pgd; } ;   244     typedef struct __anonstruct_pgd_t_12 pgd_t;   332     struct page ;   332     typedef struct page *pgtable_t;   340     struct file ;   353     struct seq_file ;   390     struct thread_struct ;   392     struct mm_struct ;   393     struct task_struct ;   394     struct cpumask ;   395     struct paravirt_callee_save {   void *func; } ;   196     struct pv_irq_ops {   struct paravirt_callee_save save_fl;   struct paravirt_callee_save restore_fl;   struct paravirt_callee_save irq_disable;   struct paravirt_callee_save irq_enable;   void (*safe_halt)();   void (*halt)();   void (*adjust_exception_frame)(); } ;   327     struct arch_spinlock ;    18     typedef u16 __ticket_t;    19     typedef u32 __ticketpair_t;    20     struct __raw_tickets {   __ticket_t head;   __ticket_t tail; } ;    32     union __anonunion_ldv_1458_15 {   __ticketpair_t head_tail;   struct __raw_tickets tickets; } ;    32     struct arch_spinlock {   union __anonunion_ldv_1458_15 ldv_1458; } ;    33     typedef struct arch_spinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   142     typedef void (*ctor_fn_t)();   219     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    48     struct device ;   400     struct file_operations ;   412     struct completion ;   235     struct atomic_notifier_head ;   416     struct pid ;   527     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   102     struct timespec ;   127     struct kernel_vm86_regs {   struct pt_regs pt;   unsigned short es;   unsigned short __esh;   unsigned short ds;   unsigned short __dsh;   unsigned short fs;   unsigned short __fsh;   unsigned short gs;   unsigned short __gsh; } ;    79     union __anonunion_ldv_2998_20 {   struct pt_regs *regs;   struct kernel_vm86_regs *vm86; } ;    79     struct math_emu_info {   long ___orig_eip;   union __anonunion_ldv_2998_20 ldv_2998; } ;   306     struct cpumask {   unsigned long bits[128U]; } ;    14     typedef struct cpumask cpumask_t;   671     typedef struct cpumask *cpumask_var_t;   162     struct seq_operations ;   294     struct i387_fsave_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;   312     struct __anonstruct_ldv_5289_25 {   u64 rip;   u64 rdp; } ;   312     struct __anonstruct_ldv_5295_26 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;   312     union __anonunion_ldv_5296_24 {   struct __anonstruct_ldv_5289_25 ldv_5289;   struct __anonstruct_ldv_5295_26 ldv_5295; } ;   312     union __anonunion_ldv_5305_27 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;   312     struct i387_fxsave_struct {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion_ldv_5296_24 ldv_5296;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion_ldv_5305_27 ldv_5305; } ;   346     struct i387_soft_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   367     struct ymmh_struct {   u32 ymmh_space[64U]; } ;   372     struct lwp_struct {   u8 reserved[128U]; } ;   377     struct bndregs_struct {   u64 bndregs[8U]; } ;   381     struct bndcsr_struct {   u64 cfg_reg_u;   u64 status_reg; } ;   386     struct xsave_hdr_struct {   u64 xstate_bv;   u64 reserved1[2U];   u64 reserved2[5U]; } ;   392     struct xsave_struct {   struct i387_fxsave_struct i387;   struct xsave_hdr_struct xsave_hdr;   struct ymmh_struct ymmh;   struct lwp_struct lwp;   struct bndregs_struct bndregs;   struct bndcsr_struct bndcsr; } ;   401     union thread_xstate {   struct i387_fsave_struct fsave;   struct i387_fxsave_struct fxsave;   struct i387_soft_struct soft;   struct xsave_struct xsave; } ;   409     struct fpu {   unsigned int last_cpu;   unsigned int has_fpu;   union thread_xstate *state; } ;   465     struct kmem_cache ;   466     struct perf_event ;   467     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned long usersp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fs;   unsigned long gs;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   struct fpu fpu;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   unsigned char fpu_counter; } ;    23     typedef atomic64_t atomic_long_t;    35     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    26     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct list_head hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   205     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references; } ;   530     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct_ldv_6346_31 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion_ldv_6347_30 {   struct raw_spinlock rlock;   struct __anonstruct_ldv_6346_31 ldv_6346; } ;    33     struct spinlock {   union __anonunion_ldv_6347_30 ldv_6347; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_32 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_32 rwlock_t;   412     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    51     typedef struct seqcount seqcount_t;   433     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_34 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_34 kuid_t;    27     struct __anonstruct_kgid_t_35 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_35 kgid_t;   127     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    34     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    39     typedef struct __wait_queue_head wait_queue_head_t;    98     struct __anonstruct_nodemask_t_36 {   unsigned long bits[16U]; } ;    98     typedef struct __anonstruct_nodemask_t_36 nodemask_t;   814     struct optimistic_spin_queue ;   815     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   const char *name;   void *magic;   struct lockdep_map dep_map; } ;    68     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   178     struct rw_semaphore ;   179     struct rw_semaphore {   long count;   raw_spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   struct optimistic_spin_queue *osq;   struct lockdep_map dep_map; } ;   174     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   105     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    72     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;   323     union ktime {   s64 tv64; } ;    59     typedef union ktime ktime_t;   412     struct tvec_base ;   413     struct timer_list {   struct list_head entry;   unsigned long expires;   struct tvec_base *base;   void (*function)(unsigned long);   unsigned long data;   int slack;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   266     struct workqueue_struct ;   267     struct work_struct ;    53     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   106     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   546     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list; } ;   553     struct dev_pm_qos ;   553     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool ignore_children;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   614     struct dev_pm_domain {   struct dev_pm_ops ops; } ;    22     struct __anonstruct_mm_context_t_101 {   void *ldt;   int size;   unsigned short ia32_compat;   struct mutex lock;   void *vdso; } ;    22     typedef struct __anonstruct_mm_context_t_101 mm_context_t;    18     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    40     struct rb_root {   struct rb_node *rb_node; } ;    87     struct vm_area_struct ;   167     struct notifier_block ;    51     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;    58     struct atomic_notifier_head {   spinlock_t lock;   struct notifier_block *head; } ;   835     struct nsproxy ;    37     struct cred ;    24     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct_ldv_14006_136 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct_ldv_14010_137 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion_ldv_14011_135 {   struct __anonstruct_ldv_14006_136 ldv_14006;   struct __anonstruct_ldv_14010_137 ldv_14010; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion_ldv_14011_135 ldv_14011;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct xol_area ;    95     struct uprobes_state {   struct xol_area *xol_area; } ;   133     struct address_space ;   134     union __anonunion_ldv_14120_138 {   struct address_space *mapping;   void *s_mem; } ;   134     union __anonunion_ldv_14126_140 {   unsigned long index;   void *freelist;   bool pfmemalloc; } ;   134     struct __anonstruct_ldv_14136_144 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   134     union __anonunion_ldv_14138_143 {   atomic_t _mapcount;   struct __anonstruct_ldv_14136_144 ldv_14136;   int units; } ;   134     struct __anonstruct_ldv_14140_142 {   union __anonunion_ldv_14138_143 ldv_14138;   atomic_t _count; } ;   134     union __anonunion_ldv_14142_141 {   unsigned long counters;   struct __anonstruct_ldv_14140_142 ldv_14140;   unsigned int active; } ;   134     struct __anonstruct_ldv_14143_139 {   union __anonunion_ldv_14126_140 ldv_14126;   union __anonunion_ldv_14142_141 ldv_14142; } ;   134     struct __anonstruct_ldv_14150_146 {   struct page *next;   int pages;   int pobjects; } ;   134     struct slab ;   134     union __anonunion_ldv_14155_145 {   struct list_head lru;   struct __anonstruct_ldv_14150_146 ldv_14150;   struct slab *slab_page;   struct callback_head callback_head;   pgtable_t pmd_huge_pte; } ;   134     union __anonunion_ldv_14161_147 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache;   struct page *first_page; } ;   134     struct page {   unsigned long flags;   union __anonunion_ldv_14120_138 ldv_14120;   struct __anonstruct_ldv_14143_139 ldv_14143;   union __anonunion_ldv_14155_145 ldv_14155;   union __anonunion_ldv_14161_147 ldv_14161;   unsigned long debug_flags; } ;   187     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   239     struct __anonstruct_linear_149 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   239     union __anonunion_shared_148 {   struct __anonstruct_linear_149 linear;   struct list_head nonlinear; } ;   239     struct anon_vma ;   239     struct vm_operations_struct ;   239     struct mempolicy ;   239     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   union __anonunion_shared_148 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy; } ;   311     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   317     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   330     struct task_rss_stat {   int events;   int count[3U]; } ;   338     struct mm_rss_stat {   atomic_long_t count[3U]; } ;   343     struct kioctx_table ;   344     struct linux_binfmt ;   344     struct mmu_notifier_mm ;   344     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long shared_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;    48     union __anonunion_ldv_14524_153 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    48     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion_ldv_14524_153 ldv_14524; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   153     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   185     struct dentry ;   186     struct iattr ;   187     struct super_block ;   188     struct file_system_type ;   189     struct kernfs_open_node ;   190     struct kernfs_iattrs ;   213     struct kernfs_root ;   213     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size; } ;    95     union __anonunion_ldv_14668_154 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    95     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion_ldv_14668_154 ldv_14668;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   137     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;   154     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   170     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   int event;   struct list_head list;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   186     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   462     struct sock ;   463     struct kobject ;   464     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   470     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    67     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   131     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   470     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   114     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   122     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   130     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   147     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   222     struct kernel_param ;   227     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    58     struct kparam_string ;    58     struct kparam_array ;    58     union __anonunion_ldv_15343_155 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    58     struct kernel_param {   const char *name;   const struct kernel_param_ops *ops;   u16 perm;   s16 level;   union __anonunion_ldv_15343_155 ldv_15343; } ;    70     struct kparam_string {   unsigned int maxlen;   char *string; } ;    76     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   461     struct mod_arch_specific { } ;    36     struct module_param_attrs ;    36     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    46     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;    72     struct exception_table_entry ;   205     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   212     struct module_ref {   unsigned long incs;   unsigned long decs; } ;   226     struct module_sect_attrs ;   226     struct module_notes_attrs ;   226     struct tracepoint ;   226     struct ftrace_event_call ;   226     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   void *module_init;   void *module_core;   unsigned int init_size;   unsigned int core_size;   unsigned int init_text_size;   unsigned int core_text_size;   unsigned int init_ro_size;   unsigned int core_ro_size;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   Elf64_Sym *symtab;   Elf64_Sym *core_symtab;   unsigned int num_symtab;   unsigned int core_num_syms;   char *strtab;   char *core_strtab;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct ftrace_event_call **trace_events;   unsigned int num_trace_events;   unsigned int num_ftrace_callsites;   unsigned long *ftrace_callsites;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   struct module_ref *refptr;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    57     struct mem_cgroup ;   368     struct kmem_cache_cpu {   void **freelist;   unsigned long tid;   struct page *page;   struct page *partial;   unsigned int stat[26U]; } ;    48     struct kmem_cache_order_objects {   unsigned long x; } ;    58     struct memcg_cache_params ;    58     struct kmem_cache_node ;    58     struct kmem_cache {   struct kmem_cache_cpu *cpu_slab;   unsigned long flags;   unsigned long min_partial;   int size;   int object_size;   int offset;   int cpu_partial;   struct kmem_cache_order_objects oo;   struct kmem_cache_order_objects max;   struct kmem_cache_order_objects min;   gfp_t allocflags;   int refcount;   void (*ctor)(void *);   int inuse;   int align;   int reserved;   const char *name;   struct list_head list;   struct kobject kobj;   struct memcg_cache_params *memcg_params;   int max_attr_size;   struct kset *memcg_kset;   int remote_node_defrag_ratio;   struct kmem_cache_node *node[1024U]; } ;   490     struct __anonstruct_ldv_15963_157 {   struct callback_head callback_head;   struct kmem_cache *memcg_caches[0U]; } ;   490     struct __anonstruct_ldv_15969_158 {   struct mem_cgroup *memcg;   struct list_head list;   struct kmem_cache *root_cache;   atomic_t nr_pages; } ;   490     union __anonunion_ldv_15970_156 {   struct __anonstruct_ldv_15963_157 ldv_15963;   struct __anonstruct_ldv_15969_158 ldv_15969; } ;   490     struct memcg_cache_params {   bool is_root_cache;   union __anonunion_ldv_15970_156 ldv_15970; } ;    15     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    67     struct path ;    68     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   struct user_namespace *user_ns;   void *private; } ;    35     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   196     struct pinctrl ;   197     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    42     struct dma_map_ops ;    42     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    11     struct pdev_archdata { } ;    14     struct device_private ;    15     struct device_driver ;    16     struct driver_private ;    17     struct class ;    18     struct subsys_private ;    19     struct bus_type ;    20     struct device_node ;    21     struct iommu_ops ;    22     struct iommu_group ;    60     struct device_attribute ;    60     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   138     struct device_type ;   195     struct of_device_id ;   195     struct acpi_device_id ;   195     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   321     struct class_attribute ;   321     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   414     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   482     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   510     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   640     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   649     struct acpi_device ;   650     struct acpi_dev_node {   struct acpi_device *companion; } ;   656     struct dma_coherent_mem ;   656     struct cma ;   656     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct dev_pin_info *pins;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct acpi_dev_node acpi_node;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   803     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    13     typedef unsigned long kernel_ulong_t;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data; } ;   219     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   479     struct platform_device_id {   char name[20U];   kernel_ulong_t driver_data; } ;   628     struct mfd_cell ;   629     struct platform_device {   const char *name;   int id;   bool id_auto;   struct device dev;   u32 num_resources;   struct resource *resource;   const struct platform_device_id *id_entry;   struct mfd_cell *mfd_cell;   struct pdev_archdata archdata; } ;    62     struct exception_table_entry {   int insn;   int fixup; } ;    61     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   unsigned long state;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   132     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t resolution;   ktime_t  (*get_time)();   ktime_t softirq_time;   ktime_t offset; } ;   163     struct hrtimer_cpu_base {   raw_spinlock_t lock;   unsigned int active_bases;   unsigned int clock_was_set;   ktime_t expires_next;   int hres_active;   int hang_detected;   unsigned long nr_events;   unsigned long nr_retries;   unsigned long nr_hangs;   ktime_t max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    65     struct dma_attrs {   unsigned long flags[1U]; } ;    70     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;    93     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   nodemask_t nodes_to_scan;   int nid; } ;    26     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    71     struct file_ra_state ;    72     struct user_struct ;    73     struct writeback_control ;   188     struct vm_fault {   unsigned int flags;   unsigned long pgoff;   void *virtual_address;   struct page *page;   unsigned long max_pgoff;   pte_t *pte; } ;   221     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   void (*map_pages)(struct vm_area_struct *, struct vm_fault *);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long);   int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;  2112     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    17     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   351     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *);   void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    30     typedef u32 phandle;    32     struct property {   char *name;   int length;   void *value;   struct property *next;   unsigned long _flags;   unsigned int unique_id;   struct bin_attribute attr; } ;    42     struct device_node {   const char *name;   const char *type;   phandle phandle;   const char *full_name;   struct property *properties;   struct property *deadprops;   struct device_node *parent;   struct device_node *child;   struct device_node *sibling;   struct device_node *next;   struct device_node *allnext;   struct kobject kobj;   unsigned long _flags;   void *data; } ;   783     struct usb_ctrlrequest {   __u8 bRequestType;   __u8 bRequest;   __le16 wValue;   __le16 wIndex;   __le16 wLength; } ;   253     struct usb_device_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __le16 bcdUSB;   __u8 bDeviceClass;   __u8 bDeviceSubClass;   __u8 bDeviceProtocol;   __u8 bMaxPacketSize0;   __le16 idVendor;   __le16 idProduct;   __le16 bcdDevice;   __u8 iManufacturer;   __u8 iProduct;   __u8 iSerialNumber;   __u8 bNumConfigurations; } ;   275     struct usb_config_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __le16 wTotalLength;   __u8 bNumInterfaces;   __u8 bConfigurationValue;   __u8 iConfiguration;   __u8 bmAttributes;   __u8 bMaxPower; } ;   343     struct usb_interface_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bInterfaceNumber;   __u8 bAlternateSetting;   __u8 bNumEndpoints;   __u8 bInterfaceClass;   __u8 bInterfaceSubClass;   __u8 bInterfaceProtocol;   __u8 iInterface; } ;   363     struct usb_endpoint_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bEndpointAddress;   __u8 bmAttributes;   __le16 wMaxPacketSize;   __u8 bInterval;   __u8 bRefresh;   __u8 bSynchAddress; } ;   613     struct usb_ss_ep_comp_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bMaxBurst;   __u8 bmAttributes;   __le16 wBytesPerInterval; } ;   692     struct usb_interface_assoc_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bFirstInterface;   __u8 bInterfaceCount;   __u8 bFunctionClass;   __u8 bFunctionSubClass;   __u8 bFunctionProtocol;   __u8 iFunction; } ;   751     struct usb_bos_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __le16 wTotalLength;   __u8 bNumDeviceCaps; } ;   801     struct usb_ext_cap_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bDevCapabilityType;   __le32 bmAttributes; } ;   811     struct usb_ss_cap_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bDevCapabilityType;   __u8 bmAttributes;   __le16 wSpeedSupported;   __u8 bFunctionalitySupport;   __u8 bU1devExitLat;   __le16 bU2DevExitLat; } ;   840     struct usb_ss_container_id_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bDevCapabilityType;   __u8 bReserved;   __u8 ContainerID[16U]; } ;   905     enum usb_device_speed {   USB_SPEED_UNKNOWN = 0,   USB_SPEED_LOW = 1,   USB_SPEED_FULL = 2,   USB_SPEED_HIGH = 3,   USB_SPEED_WIRELESS = 4,   USB_SPEED_SUPER = 5 } ;   914     enum usb_device_state {   USB_STATE_NOTATTACHED = 0,   USB_STATE_ATTACHED = 1,   USB_STATE_POWERED = 2,   USB_STATE_RECONNECTING = 3,   USB_STATE_UNAUTHENTICATED = 4,   USB_STATE_DEFAULT = 5,   USB_STATE_ADDRESS = 6,   USB_STATE_CONFIGURED = 7,   USB_STATE_SUSPENDED = 8 } ;    54     struct usb_ep ;    55     struct usb_request {   void *buf;   unsigned int length;   dma_addr_t dma;   struct scatterlist *sg;   unsigned int num_sgs;   unsigned int num_mapped_sgs;   unsigned short stream_id;   unsigned char no_interrupt;   unsigned char zero;   unsigned char short_not_ok;   void (*complete)(struct usb_ep *, struct usb_request *);   void *context;   struct list_head list;   int status;   unsigned int actual; } ;   113     struct usb_ep_ops {   int (*enable)(struct usb_ep *, const struct usb_endpoint_descriptor *);   int (*disable)(struct usb_ep *);   struct usb_request * (*alloc_request)(struct usb_ep *, gfp_t );   void (*free_request)(struct usb_ep *, struct usb_request *);   int (*queue)(struct usb_ep *, struct usb_request *, gfp_t );   int (*dequeue)(struct usb_ep *, struct usb_request *);   int (*set_halt)(struct usb_ep *, int);   int (*set_wedge)(struct usb_ep *);   int (*fifo_status)(struct usb_ep *);   void (*fifo_flush)(struct usb_ep *); } ;   142     struct usb_ep {   void *driver_data;   const char *name;   const struct usb_ep_ops *ops;   struct list_head ep_list;   unsigned short maxpacket;   unsigned short maxpacket_limit;   unsigned short max_streams;   unsigned char mult;   unsigned char maxburst;   u8 address;   const struct usb_endpoint_descriptor *desc;   const struct usb_ss_ep_comp_descriptor *comp_desc; } ;   462     struct usb_dcd_config_params {   __u8 bU1devExitLat;   __le16 bU2DevExitLat; } ;   471     struct usb_gadget ;   472     struct usb_gadget_driver ;   473     struct usb_gadget_ops {   int (*get_frame)(struct usb_gadget *);   int (*wakeup)(struct usb_gadget *);   int (*set_selfpowered)(struct usb_gadget *, int);   int (*vbus_session)(struct usb_gadget *, int);   int (*vbus_draw)(struct usb_gadget *, unsigned int);   int (*pullup)(struct usb_gadget *, int);   int (*ioctl)(struct usb_gadget *, unsigned int, unsigned long);   void (*get_config_params)(struct usb_dcd_config_params *);   int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *);   int (*udc_stop)(struct usb_gadget *, struct usb_gadget_driver *); } ;   494     struct usb_gadget {   struct work_struct work;   const struct usb_gadget_ops *ops;   struct usb_ep *ep0;   struct list_head ep_list;   enum usb_device_speed speed;   enum usb_device_speed max_speed;   enum usb_device_state state;   const char *name;   struct device dev;   unsigned int out_epnum;   unsigned int in_epnum;   unsigned char sg_supported;   unsigned char is_otg;   unsigned char is_a_peripheral;   unsigned char b_hnp_enable;   unsigned char a_hnp_support;   unsigned char a_alt_hnp_support;   unsigned char quirk_ep_out_aligned_size; } ;   795     struct usb_gadget_driver {   char *function;   enum usb_device_speed max_speed;   int (*bind)(struct usb_gadget *, struct usb_gadget_driver *);   void (*unbind)(struct usb_gadget *);   int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *);   void (*disconnect)(struct usb_gadget *);   void (*suspend)(struct usb_gadget *);   void (*resume)(struct usb_gadget *);   struct device_driver driver; } ;   461     struct hlist_bl_node ;   461     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct_ldv_24561_165 {   spinlock_t lock;   unsigned int count; } ;   114     union __anonunion_ldv_24562_164 {   struct __anonstruct_ldv_24561_165 ldv_24561; } ;   114     struct lockref {   union __anonunion_ldv_24562_164 ldv_24562; } ;    49     struct nameidata ;    50     struct vfsmount ;    51     struct __anonstruct_ldv_24585_167 {   u32 hash;   u32 len; } ;    51     union __anonunion_ldv_24587_166 {   struct __anonstruct_ldv_24585_167 ldv_24585;   u64 hash_len; } ;    51     struct qstr {   union __anonunion_ldv_24587_166 ldv_24587;   const unsigned char *name; } ;    90     struct dentry_operations ;    90     union __anonunion_d_u_168 {   struct list_head d_child;   struct callback_head d_rcu; } ;    90     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   struct list_head d_lru;   union __anonunion_d_u_168 d_u;   struct list_head d_subdirs;   struct hlist_node d_alias; } ;   142     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool ); } ;   477     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    27     struct list_lru_node {   spinlock_t lock;   struct list_head list;   long nr_items; } ;    30     struct list_lru {   struct list_lru_node *node;   nodemask_t active_nodes; } ;    58     struct __anonstruct_ldv_24948_170 {   struct radix_tree_node *parent;   void *private_data; } ;    58     union __anonunion_ldv_24950_169 {   struct __anonstruct_ldv_24948_170 ldv_24948;   struct callback_head callback_head; } ;    58     struct radix_tree_node {   unsigned int path;   unsigned int count;   union __anonunion_ldv_24950_169 ldv_24950;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   105     struct radix_tree_root {   unsigned int height;   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;   428     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   435     struct pid_namespace ;   435     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    30     struct block_device ;    31     struct io_context ;    59     struct export_operations ;    61     struct iovec ;    62     struct kiocb ;    63     struct pipe_inode_info ;    64     struct poll_table_struct ;    65     struct kstatfs ;    66     struct swap_info_struct ;    67     struct iov_iter ;    69     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   253     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;   176     struct fs_disk_quota {   __s8 d_version;   __s8 d_flags;   __u16 d_fieldmask;   __u32 d_id;   __u64 d_blk_hardlimit;   __u64 d_blk_softlimit;   __u64 d_ino_hardlimit;   __u64 d_ino_softlimit;   __u64 d_bcount;   __u64 d_icount;   __s32 d_itimer;   __s32 d_btimer;   __u16 d_iwarns;   __u16 d_bwarns;   __s32 d_padding2;   __u64 d_rtb_hardlimit;   __u64 d_rtb_softlimit;   __u64 d_rtbcount;   __s32 d_rtbtimer;   __u16 d_rtbwarns;   __s16 d_padding3;   char d_padding4[8U]; } ;    76     struct fs_qfilestat {   __u64 qfs_ino;   __u64 qfs_nblks;   __u32 qfs_nextents; } ;   151     typedef struct fs_qfilestat fs_qfilestat_t;   152     struct fs_quota_stat {   __s8 qs_version;   __u16 qs_flags;   __s8 qs_pad;   fs_qfilestat_t qs_uquota;   fs_qfilestat_t qs_gquota;   __u32 qs_incoredqs;   __s32 qs_btimelimit;   __s32 qs_itimelimit;   __s32 qs_rtbtimelimit;   __u16 qs_bwarnlimit;   __u16 qs_iwarnlimit; } ;   166     struct fs_qfilestatv {   __u64 qfs_ino;   __u64 qfs_nblks;   __u32 qfs_nextents;   __u32 qfs_pad; } ;   196     struct fs_quota_statv {   __s8 qs_version;   __u8 qs_pad1;   __u16 qs_flags;   __u32 qs_incoredqs;   struct fs_qfilestatv qs_uquota;   struct fs_qfilestatv qs_gquota;   struct fs_qfilestatv qs_pquota;   __s32 qs_btimelimit;   __s32 qs_itimelimit;   __s32 qs_rtbtimelimit;   __u16 qs_bwarnlimit;   __u16 qs_iwarnlimit;   __u64 qs_pad2[8U]; } ;   212     struct dquot ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_172 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_172 kprojid_t;   119     struct if_dqinfo {   __u64 dqi_bgrace;   __u64 dqi_igrace;   __u32 dqi_flags;   __u32 dqi_valid; } ;   152     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    60     typedef long long qsize_t;    61     union __anonunion_ldv_25749_173 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    61     struct kqid {   union __anonunion_ldv_25749_173 ldv_25749;   enum quota_type type; } ;   178     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time_t dqb_btime;   time_t dqb_itime; } ;   200     struct quota_format_type ;   201     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_maxblimit;   qsize_t dqi_maxilimit;   void *dqi_priv; } ;   264     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   291     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *); } ;   302     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *); } ;   316     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_on_meta)(struct super_block *, int, int);   int (*quota_off)(struct super_block *, int);   int (*quota_sync)(struct super_block *, int);   int (*get_info)(struct super_block *, int, struct if_dqinfo *);   int (*set_info)(struct super_block *, int, struct if_dqinfo *);   int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *);   int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *);   int (*get_xstate)(struct super_block *, struct fs_quota_stat *);   int (*set_xstate)(struct super_block *, unsigned int, int);   int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   334     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   380     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct rw_semaphore dqptr_sem;   struct inode *files[2U];   struct mem_dqinfo info[2U];   const struct quota_format_ops *ops[2U]; } ;   323     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t );   int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   382     struct backing_dev_info ;   383     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   unsigned int i_mmap_writable;   struct rb_root i_mmap;   struct list_head i_mmap_nonlinear;   struct mutex i_mmap_mutex;   unsigned long nrpages;   unsigned long nrshadows;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   struct backing_dev_info *backing_dev_info;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   405     struct request_queue ;   406     struct hd_struct ;   406     struct gendisk ;   406     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   struct list_head bd_inodes;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   478     struct posix_acl ;   479     struct inode_operations ;   479     union __anonunion_ldv_26164_176 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   479     union __anonunion_ldv_26184_177 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   479     struct file_lock ;   479     struct cdev ;   479     union __anonunion_ldv_26201_178 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev; } ;   479     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion_ldv_26164_176 ldv_26164;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct mutex i_mutex;   unsigned long dirtied_when;   struct hlist_node i_hash;   struct list_head i_wb_list;   struct list_head i_lru;   struct list_head i_sb_list;   union __anonunion_ldv_26184_177 ldv_26184;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock *i_flock;   struct address_space i_data;   struct dquot *i_dquot[2U];   struct list_head i_devices;   union __anonunion_ldv_26201_178 ldv_26201;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   void *i_private; } ;   715     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   723     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   746     union __anonunion_f_u_179 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   746     struct file {   union __anonunion_f_u_179 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   836     struct files_struct ;   836     typedef struct files_struct *fl_owner_t;   837     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   842     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, struct file_lock *, int);   void (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock **, int); } ;   860     struct nlm_lockowner ;   861     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_181 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_180 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_181 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_180 fl_u; } ;   963     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1157     struct sb_writers {   struct percpu_counter counter[3U];   wait_queue_head_t wait;   int frozen;   wait_queue_head_t wait_unfrozen;   struct lockdep_map lock_map[3U]; } ;  1173     struct super_operations ;  1173     struct xattr_handler ;  1173     struct mtd_info ;  1173     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   struct list_head s_inodes;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu; } ;  1403     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1441     struct dir_context {   int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1446     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t );   ssize_t  (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t );   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   int (*show_fdinfo)(struct seq_file *, struct file *); } ;  1488     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   void * (*follow_link)(struct dentry *, struct nameidata *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   void (*put_link)(struct dentry *, struct nameidata *, void *);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1535     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_fs)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, int);   long int (*free_cached_objects)(struct super_block *, long, int); } ;  1749     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    24     struct __anonstruct_sigset_t_182 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_182 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_184 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_185 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_186 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_187 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__sigfault_188 {   void *_addr;   short _addr_lsb; } ;    11     struct __anonstruct__sigpoll_189 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_190 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_183 {   int _pad[28U];   struct __anonstruct__kill_184 _kill;   struct __anonstruct__timer_185 _timer;   struct __anonstruct__rt_186 _rt;   struct __anonstruct__sigchld_187 _sigchld;   struct __anonstruct__sigfault_188 _sigfault;   struct __anonstruct__sigpoll_189 _sigpoll;   struct __anonstruct__sigsys_190 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_183 _sifields; } ;   109     typedef struct siginfo siginfo_t;    21     struct sigpending {   struct list_head list;   sigset_t signal; } ;   246     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   260     struct k_sigaction {   struct sigaction sa; } ;    46     struct seccomp_filter ;    47     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    39     struct assoc_array_ptr ;    39     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;   123     union __anonunion_ldv_29020_193 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   123     struct key_user ;   123     union __anonunion_ldv_29028_194 {   time_t expiry;   time_t revoked_at; } ;   123     struct __anonstruct_ldv_29041_196 {   struct key_type *type;   char *description; } ;   123     union __anonunion_ldv_29042_195 {   struct keyring_index_key index_key;   struct __anonstruct_ldv_29041_196 ldv_29041; } ;   123     union __anonunion_type_data_197 {   struct list_head link;   unsigned long x[2U];   void *p[2U];   int reject_error; } ;   123     union __anonunion_payload_199 {   unsigned long value;   void *rcudata;   void *data;   void *data2[2U]; } ;   123     union __anonunion_ldv_29057_198 {   union __anonunion_payload_199 payload;   struct assoc_array keys; } ;   123     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion_ldv_29020_193 ldv_29020;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion_ldv_29028_194 ldv_29028;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion_ldv_29042_195 ldv_29042;   union __anonunion_type_data_197 type_data;   union __anonunion_ldv_29057_198 ldv_29057; } ;   356     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    78     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   125     struct futex_pi_state ;   126     struct robust_list_head ;   127     struct bio_list ;   128     struct fs_struct ;   129     struct perf_event_context ;   130     struct blk_plug ;   180     struct cfs_rq ;   181     struct task_group ;   426     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   465     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   473     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   480     struct cputime {   cputime_t utime;   cputime_t stime; } ;   492     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   512     struct thread_group_cputimer {   struct task_cputime cputime;   int running;   raw_spinlock_t lock; } ;   554     struct autogroup ;   555     struct tty_struct ;   555     struct taskstats ;   555     struct tty_audit_buf ;   555     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   unsigned int audit_tty_log_passwd;   struct tty_audit_buf *tty_audit_buf;   struct rw_semaphore group_rwsem;   oom_flags_t oom_flags;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   735     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   778     struct reclaim_state ;   779     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   794     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   struct timespec blkio_start;   struct timespec blkio_end;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   struct timespec freepages_start;   struct timespec freepages_end;   u64 freepages_delay;   u32 freepages_count; } ;  1061     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1069     struct sched_avg {   u32 runnable_avg_sum;   u32 runnable_avg_period;   u64 last_runnable_update;   s64 decay_count;   unsigned long load_avg_contrib; } ;  1081     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1116     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1148     struct rt_rq ;  1148     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1164     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_new;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1222     struct memcg_batch_info {   int do_batch;   struct mem_cgroup *memcg;   unsigned long nr_pages;   unsigned long memsw_nr_pages; } ;  1643     struct memcg_oom_info {   struct mem_cgroup *memcg;   gfp_t gfp_mask;   int order;   unsigned char may_oom; } ;  1650     struct sched_class ;  1650     struct css_set ;  1650     struct compat_robust_list_head ;  1650     struct numa_group ;  1650     struct ftrace_ret_stack ;  1650     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   struct task_struct *last_wakee;   unsigned long wakee_flips;   unsigned long wakee_flip_decay_ts;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int btrace_seq;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   unsigned char brk_randomized;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned int jobctl;   unsigned int personality;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char no_new_privs;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   struct timespec start_time;   struct timespec real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   int link_count;   int total_link_count;   struct sysv_sem sysvsem;   unsigned long last_switch_count;   struct thread_struct thread;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   int (*notifier)(void *);   void *notifier_data;   sigset_t *notifier_mask;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct task_struct *pi_top_task;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults_memory;   unsigned long total_numa_faults;   unsigned long *numa_faults_buffer_memory;   unsigned long *numa_faults_cpu;   unsigned long *numa_faults_buffer_cpu;   unsigned long numa_faults_locality[2U];   unsigned long numa_pages_migrated;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   unsigned long timer_slack_ns;   unsigned long default_timer_slack_ns;   int curr_ret_stack;   struct ftrace_ret_stack *ret_stack;   unsigned long long ftrace_timestamp;   atomic_t trace_overrun;   atomic_t tracing_graph_pause;   unsigned long trace;   unsigned long trace_recursion;   struct memcg_batch_info memcg_batch;   unsigned int memcg_kmem_skip_account;   struct memcg_oom_info memcg_oom;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg; } ;  2998     struct usb_device ;  3000     struct wusb_dev ;  3001     struct ep_device ;  3002     struct usb_host_endpoint {   struct usb_endpoint_descriptor desc;   struct usb_ss_ep_comp_descriptor ss_ep_comp;   struct list_head urb_list;   void *hcpriv;   struct ep_device *ep_dev;   unsigned char *extra;   int extralen;   int enabled;   int streams; } ;    77     struct usb_host_interface {   struct usb_interface_descriptor desc;   int extralen;   unsigned char *extra;   struct usb_host_endpoint *endpoint;   char *string; } ;    92     enum usb_interface_condition {   USB_INTERFACE_UNBOUND = 0,   USB_INTERFACE_BINDING = 1,   USB_INTERFACE_BOUND = 2,   USB_INTERFACE_UNBINDING = 3 } ;    99     struct usb_interface {   struct usb_host_interface *altsetting;   struct usb_host_interface *cur_altsetting;   unsigned int num_altsetting;   struct usb_interface_assoc_descriptor *intf_assoc;   int minor;   enum usb_interface_condition condition;   unsigned char sysfs_files_created;   unsigned char ep_devs_created;   unsigned char unregistering;   unsigned char needs_remote_wakeup;   unsigned char needs_altsetting0;   unsigned char needs_binding;   unsigned char reset_running;   unsigned char resetting_device;   struct device dev;   struct device *usb_dev;   atomic_t pm_usage_cnt;   struct work_struct reset_ws; } ;   206     struct usb_interface_cache {   unsigned int num_altsetting;   struct kref ref;   struct usb_host_interface altsetting[0U]; } ;   235     struct usb_host_config {   struct usb_config_descriptor desc;   char *string;   struct usb_interface_assoc_descriptor *intf_assoc[16U];   struct usb_interface *interface[32U];   struct usb_interface_cache *intf_cache[32U];   unsigned char *extra;   int extralen; } ;   299     struct usb_host_bos {   struct usb_bos_descriptor *desc;   struct usb_ext_cap_descriptor *ext_cap;   struct usb_ss_cap_descriptor *ss_cap;   struct usb_ss_container_id_descriptor *ss_id; } ;   311     struct usb_devmap {   unsigned long devicemap[2U]; } ;   323     struct mon_bus ;   323     struct usb_bus {   struct device *controller;   int busnum;   const char *bus_name;   u8 uses_dma;   u8 uses_pio_for_control;   u8 otg_port;   unsigned char is_b_host;   unsigned char b_hnp_enable;   unsigned char no_stop_on_short;   unsigned char no_sg_constraint;   unsigned int sg_tablesize;   int devnum_next;   struct usb_devmap devmap;   struct usb_device *root_hub;   struct usb_bus *hs_companion;   struct list_head bus_list;   struct mutex usb_address0_mutex;   int bandwidth_allocated;   int bandwidth_int_reqs;   int bandwidth_isoc_reqs;   unsigned int resuming_ports;   struct mon_bus *mon_bus;   int monitored; } ;   374     struct usb_tt ;   375     enum usb_device_removable {   USB_DEVICE_REMOVABLE_UNKNOWN = 0,   USB_DEVICE_REMOVABLE = 1,   USB_DEVICE_FIXED = 2 } ;   388     struct usb2_lpm_parameters {   unsigned int besl;   int timeout; } ;   409     struct usb3_lpm_parameters {   unsigned int mel;   unsigned int pel;   unsigned int sel;   int timeout; } ;   448     struct usb_device {   int devnum;   char devpath[16U];   u32 route;   enum usb_device_state state;   enum usb_device_speed speed;   struct usb_tt *tt;   int ttport;   unsigned int toggle[2U];   struct usb_device *parent;   struct usb_bus *bus;   struct usb_host_endpoint ep0;   struct device dev;   struct usb_device_descriptor descriptor;   struct usb_host_bos *bos;   struct usb_host_config *config;   struct usb_host_config *actconfig;   struct usb_host_endpoint *ep_in[16U];   struct usb_host_endpoint *ep_out[16U];   char **rawdescriptors;   unsigned short bus_mA;   u8 portnum;   u8 level;   unsigned char can_submit;   unsigned char persist_enabled;   unsigned char have_langid;   unsigned char authorized;   unsigned char authenticated;   unsigned char wusb;   unsigned char lpm_capable;   unsigned char usb2_hw_lpm_capable;   unsigned char usb2_hw_lpm_besl_capable;   unsigned char usb2_hw_lpm_enabled;   unsigned char usb2_hw_lpm_allowed;   unsigned char usb3_lpm_enabled;   int string_langid;   char *product;   char *manufacturer;   char *serial;   struct list_head filelist;   int maxchild;   u32 quirks;   atomic_t urbnum;   unsigned long active_duration;   unsigned long connect_time;   unsigned char do_remote_wakeup;   unsigned char reset_resume;   unsigned char port_is_suspended;   struct wusb_dev *wusb_dev;   int slot_id;   enum usb_device_removable removable;   struct usb2_lpm_parameters l1_params;   struct usb3_lpm_parameters u1_params;   struct usb3_lpm_parameters u2_params;   unsigned int lpm_disable_count; } ;  1873     enum usb_phy_events {   USB_EVENT_NONE = 0,   USB_EVENT_VBUS = 1,   USB_EVENT_ID = 2,   USB_EVENT_CHARGER = 3,   USB_EVENT_ENUMERATED = 4 } ;  1881     enum usb_phy_type {   USB_PHY_TYPE_UNDEFINED = 0,   USB_PHY_TYPE_USB2 = 1,   USB_PHY_TYPE_USB3 = 2 } ;  1887     enum usb_otg_state {   OTG_STATE_UNDEFINED = 0,   OTG_STATE_B_IDLE = 1,   OTG_STATE_B_SRP_INIT = 2,   OTG_STATE_B_PERIPHERAL = 3,   OTG_STATE_B_WAIT_ACON = 4,   OTG_STATE_B_HOST = 5,   OTG_STATE_A_IDLE = 6,   OTG_STATE_A_WAIT_VRISE = 7,   OTG_STATE_A_WAIT_BCON = 8,   OTG_STATE_A_HOST = 9,   OTG_STATE_A_SUSPEND = 10,   OTG_STATE_A_PERIPHERAL = 11,   OTG_STATE_A_WAIT_VFALL = 12,   OTG_STATE_A_VBUS_ERR = 13 } ;  1904     struct usb_phy ;  1905     struct usb_otg ;  1906     struct usb_phy_io_ops {   int (*read)(struct usb_phy *, u32 );   int (*write)(struct usb_phy *, u32 , u32 ); } ;    73     struct usb_phy {   struct device *dev;   const char *label;   unsigned int flags;   enum usb_phy_type type;   enum usb_otg_state state;   enum usb_phy_events last_event;   struct usb_otg *otg;   struct device *io_dev;   struct usb_phy_io_ops *io_ops;   void *io_priv;   struct atomic_notifier_head notifier;   u16 port_status;   u16 port_change;   struct list_head head;   int (*init)(struct usb_phy *);   void (*shutdown)(struct usb_phy *);   int (*set_vbus)(struct usb_phy *, int);   int (*set_power)(struct usb_phy *, unsigned int);   int (*set_suspend)(struct usb_phy *, int);   int (*set_wakeup)(struct usb_phy *, bool );   int (*notify_connect)(struct usb_phy *, enum usb_device_speed );   int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed ); } ;   325     struct usb_otg {   u8 default_a;   struct usb_phy *phy;   struct usb_bus *host;   struct usb_gadget *gadget;   int (*set_host)(struct usb_otg *, struct usb_bus *);   int (*set_peripheral)(struct usb_otg *, struct usb_gadget *);   int (*set_vbus)(struct usb_otg *, bool );   int (*start_srp)(struct usb_otg *);   int (*start_hnp)(struct usb_otg *); } ;    94     enum usb_dr_mode {   USB_DR_MODE_UNKNOWN = 0,   USB_DR_MODE_HOST = 1,   USB_DR_MODE_PERIPHERAL = 2,   USB_DR_MODE_OTG = 3 } ;    37     struct dwc3_platform_data {   enum usb_device_speed maximum_speed;   enum usb_dr_mode dr_mode;   bool tx_fifo_resize; } ;    29     struct debugfs_reg32 {   char *name;   unsigned long offset; } ;    34     struct debugfs_regset32 {   const struct debugfs_reg32 *regs;   int nregs;   void *base; } ;   103     struct phy ;   104     struct phy_ops {   int (*init)(struct phy *);   int (*exit)(struct phy *);   int (*power_on)(struct phy *);   int (*power_off)(struct phy *);   struct module *owner; } ;    39     struct phy_attrs {   u32 bus_width; } ;    47     struct phy_init_data ;    47     struct phy {   struct device dev;   int id;   const struct phy_ops *ops;   struct phy_init_data *init_data;   struct mutex mutex;   int init_count;   int power_count;   struct phy_attrs attrs; } ;    83     struct phy_consumer {   const char *dev_name;   const char *port; } ;    94     struct phy_init_data {   unsigned int num_consumers;   struct phy_consumer *consumers; } ;   173     struct dwc3_trb ;   174     struct dwc3 ;   174     struct dwc3_event_buffer {   void *buf;   unsigned int length;   unsigned int lpos;   unsigned int count;   unsigned int flags;   dma_addr_t dma;   struct dwc3 *dwc; } ;   379     struct dwc3_ep {   struct usb_ep endpoint;   struct list_head request_list;   struct list_head req_queued;   struct dwc3_trb *trb_pool;   dma_addr_t trb_pool_dma;   u32 free_slot;   u32 busy_slot;   const struct usb_ss_ep_comp_descriptor *comp_desc;   struct dwc3 *dwc;   u32 saved_state;   unsigned int flags;   unsigned int current_trb;   u8 number;   u8 type;   u8 resource_index;   u32 interval;   char name[20U];   unsigned char direction;   unsigned char stream_capable; } ;   453     enum dwc3_ep0_next {   DWC3_EP0_UNKNOWN = 0,   DWC3_EP0_COMPLETE = 1,   DWC3_EP0_NRDY_DATA = 2,   DWC3_EP0_NRDY_STATUS = 3 } ;   460     enum dwc3_ep0_state {   EP0_UNCONNECTED = 0,   EP0_SETUP_PHASE = 1,   EP0_DATA_PHASE = 2,   EP0_STATUS_PHASE = 3 } ;   467     enum dwc3_link_state {   DWC3_LINK_STATE_U0 = 0,   DWC3_LINK_STATE_U1 = 1,   DWC3_LINK_STATE_U2 = 2,   DWC3_LINK_STATE_U3 = 3,   DWC3_LINK_STATE_SS_DIS = 4,   DWC3_LINK_STATE_RX_DET = 5,   DWC3_LINK_STATE_SS_INACT = 6,   DWC3_LINK_STATE_POLL = 7,   DWC3_LINK_STATE_RECOV = 8,   DWC3_LINK_STATE_HRESET = 9,   DWC3_LINK_STATE_CMPLY = 10,   DWC3_LINK_STATE_LPBK = 11,   DWC3_LINK_STATE_RESET = 14,   DWC3_LINK_STATE_RESUME = 15,   DWC3_LINK_STATE_MASK = 15 } ;   485     struct dwc3_trb {   u32 bpl;   u32 bph;   u32 size;   u32 ctrl; } ;   530     struct dwc3_hwparams {   u32 hwparams0;   u32 hwparams1;   u32 hwparams2;   u32 hwparams3;   u32 hwparams4;   u32 hwparams5;   u32 hwparams6;   u32 hwparams7;   u32 hwparams8; } ;   554     struct dwc3_request {   struct usb_request request;   struct list_head list;   struct dwc3_ep *dep;   u32 start_slot;   u8 epnum;   struct dwc3_trb *trb;   dma_addr_t trb_dma;   unsigned char direction;   unsigned char mapped;   unsigned char queued; } ;   596     struct dwc3 {   struct usb_ctrlrequest *ctrl_req;   struct dwc3_trb *ep0_trb;   void *ep0_bounce;   void *scratchbuf;   u8 *setup_buf;   dma_addr_t ctrl_req_addr;   dma_addr_t ep0_trb_addr;   dma_addr_t ep0_bounce_addr;   dma_addr_t scratch_addr;   struct dwc3_request ep0_usb_req;   spinlock_t lock;   struct device *dev;   struct platform_device *xhci;   struct resource xhci_resources[2U];   struct dwc3_event_buffer **ev_buffs;   struct dwc3_ep *eps[32U];   struct usb_gadget gadget;   struct usb_gadget_driver *gadget_driver;   struct usb_phy *usb2_phy;   struct usb_phy *usb3_phy;   struct phy *usb2_generic_phy;   struct phy *usb3_generic_phy;   void *regs;   size_t regs_size;   enum usb_dr_mode dr_mode;   u32 dcfg;   u32 gctl;   u32 nr_scratch;   u32 num_event_buffers;   u32 u1u2;   u32 maximum_speed;   u32 revision;   enum dwc3_ep0_next ep0_next_event;   enum dwc3_ep0_state ep0state;   enum dwc3_link_state link_state;   u16 isoch_delay;   u16 u2sel;   u16 u2pel;   u8 u1sel;   u8 u1pel;   u8 speed;   u8 num_out_eps;   u8 num_in_eps;   void *mem;   struct dwc3_hwparams hwparams;   struct dentry *root;   struct debugfs_regset32 *regset;   u8 test_mode;   u8 test_mode_nr;   unsigned char delayed_status;   unsigned char ep0_bounced;   unsigned char ep0_expect_in;   unsigned char has_hibernation;   unsigned char is_selfpowered;   unsigned char needs_fifo_resize;   unsigned char pullups_connected;   unsigned char resize_fifos;   unsigned char setup_packet_pending;   unsigned char start_config_issued;   unsigned char three_stage_setup; } ;   272     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;   760     struct dwc3_event_type {   unsigned char is_devspec;   unsigned char type;   unsigned int reserved8_31; } ;   770     struct dwc3_event_depevt {   unsigned char one_bit;   unsigned char endpoint_number;   unsigned char endpoint_event;   unsigned char reserved11_10;   unsigned char status;   unsigned short parameters; } ;   823     struct dwc3_event_devt {   unsigned char one_bit;   unsigned char device_event;   unsigned char type;   unsigned char reserved15_12;   unsigned short event_info;   unsigned char reserved31_25; } ;   854     struct dwc3_event_gevt {   unsigned char one_bit;   unsigned char device_event;   unsigned char phy_port_number;   unsigned int reserved31_12; } ;   868     union dwc3_event {   u32 raw;   struct dwc3_event_type type;   struct dwc3_event_depevt depevt;   struct dwc3_event_devt devt;   struct dwc3_event_gevt gevt; } ;   884     struct dwc3_gadget_ep_cmd_params {   u32 param2;   u32 param1;   u32 param0; } ;   596     struct timing {   u8 u1sel;   u8 u1pel;   u16 u2sel;   u16 u2pel; } ;  1061     struct uts_namespace ;  2112     struct mnt_namespace ;  2113     struct ipc_namespace ;  2114     struct net ;  2114     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns; } ;    88     struct pidmap {   atomic_t nr_free;   void *page; } ;    16     struct bsd_acct_struct ;    17     struct pid_namespace {   struct kref kref;   struct pidmap pidmap[128U];   struct callback_head rcu;   int last_pid;   unsigned int nr_hashed;   struct task_struct *child_reaper;   struct kmem_cache *pid_cachep;   unsigned int level;   struct pid_namespace *parent;   struct vfsmount *proc_mnt;   struct dentry *proc_self;   struct bsd_acct_struct *bacct;   struct user_namespace *user_ns;   struct work_struct proc_work;   kgid_t pid_gid;   int hide_pid;   int reboot;   unsigned int proc_inum; } ;   332     struct page___0 ;   332     typedef struct page___0 *pgtable_t___0;    44     struct __anonstruct____missing_field_name_211 {   unsigned int inuse;   unsigned int objects;   unsigned int frozen; } ;    44     union __anonunion____missing_field_name_210 {   atomic_t _mapcount;   struct __anonstruct____missing_field_name_211 __annonCompField39;   int units; } ;    44     struct __anonstruct____missing_field_name_209 {   union __anonunion____missing_field_name_210 __annonCompField40;   atomic_t _count; } ;    44     union __anonunion____missing_field_name_208 {   unsigned long counters;   struct __anonstruct____missing_field_name_209 __annonCompField41;   unsigned int active; } ;    44     struct __anonstruct____missing_field_name_206 {   union __anonunion_ldv_14126_140 __annonCompField38;   union __anonunion____missing_field_name_208 __annonCompField42; } ;    44     struct __anonstruct____missing_field_name_213 {   struct page___0 *next;   int pages;   int pobjects; } ;    44     union __anonunion____missing_field_name_212 {   struct list_head lru;   struct __anonstruct____missing_field_name_213 __annonCompField44;   struct slab *slab_page;   struct callback_head callback_head;   pgtable_t___0 pmd_huge_pte; } ;    44     union __anonunion____missing_field_name_214 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache___0 *slab_cache;   struct page___0 *first_page; } ;    44     struct page___0 {   unsigned long flags;   union __anonunion_ldv_14120_138 __annonCompField37;   struct __anonstruct____missing_field_name_206 __annonCompField43;   union __anonunion____missing_field_name_212 __annonCompField45;   union __anonunion____missing_field_name_214 __annonCompField46;   unsigned long debug_flags; } ;    29     struct attribute___0 {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;   175     struct sysfs_ops___0 {   ssize_t  (*show)(struct kobject___0 *, struct attribute___0 *, char *);   ssize_t  (*store)(struct kobject___0 *, struct attribute___0 *, const char *, size_t ); } ;    63     struct kobject___0 {   const char *name;   struct list_head entry;   struct kobject___0 *parent;   struct kset *kset;   struct kobj_type___0 *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned int state_initialized;   unsigned int state_in_sysfs;   unsigned int state_add_uevent_sent;   unsigned int state_remove_uevent_sent;   unsigned int uevent_suppress; } ;   115     struct kobj_type___0 {   void (*release)(struct kobject___0 *kobj);   const struct sysfs_ops___0 *sysfs_ops;   struct attribute___0 **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject___0 *kobj);   const void * (*namespace)(struct kobject___0 *kobj); } ;    40     struct kmem_cache_cpu___0 {   void **freelist;   unsigned long tid;   struct page___0 *page;   struct page___0 *partial;   unsigned int stat[26]; } ;    62     struct kmem_cache___0 {   struct kmem_cache_cpu___0 *cpu_slab;   unsigned long flags;   unsigned long min_partial;   int size;   int object_size;   int offset;   int cpu_partial;   struct kmem_cache_order_objects oo;   struct kmem_cache_order_objects max;   struct kmem_cache_order_objects min;   gfp_t allocflags;   int refcount;   void (*ctor)(void *);   int inuse;   int align;   int reserved;   const char *name;   struct list_head list;   struct kobject___0 kobj;   struct memcg_cache_params___0 *memcg_params;   int max_attr_size;   struct kset *memcg_kset;   int remote_node_defrag_ratio;   struct kmem_cache_node *node[1024]; } ;   531     struct __anonstruct____missing_field_name_227 {   struct callback_head callback_head;   struct kmem_cache___0 *memcg_caches[0]; } ;   531     struct __anonstruct____missing_field_name_228 {   struct mem_cgroup *memcg;   struct list_head list;   struct kmem_cache___0 *root_cache;   atomic_t nr_pages; } ;   531     union __anonunion____missing_field_name_226 {   struct __anonstruct____missing_field_name_227 __annonCompField50;   struct __anonstruct____missing_field_name_228 __annonCompField51; } ;   531     struct memcg_cache_params___0 {   bool is_root_cache;   union __anonunion____missing_field_name_226 __annonCompField52; } ;     1     long int __builtin_expect(long exp, long c);     2     void ldv_spin_lock();     3     void ldv_spin_unlock();   358     extern struct pv_irq_ops pv_irq_ops;    53     int __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);    71     void warn_slowpath_null(const char *, const int);    23     unsigned long int __phys_addr(unsigned long);   802     unsigned long int arch_local_save_flags();    23     void * ERR_PTR(long error);    28     long int PTR_ERR(const void *ptr);    33     bool  IS_ERR(const void *ptr);   155     int arch_irqs_disabled_flags(unsigned long flags);   689     void rep_nop();   694     void cpu_relax();    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    43     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   290     raw_spinlock_t * spinlock_check(spinlock_t *lock);   372     void ldv_spin_unlock_irqrestore_8(spinlock_t *lock, unsigned long flags);   372     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);   164     resource_size_t  resource_size(const struct resource *res);    77     extern volatile unsigned long jiffies;   303     unsigned long int msecs_to_jiffies(const unsigned int);    56     unsigned int readl(const volatile void *addr);    64     void writel(unsigned int val, volatile void *addr);   144     void kfree(const void *);   315     void * __kmalloc(size_t , gfp_t );   316     void * kmem_cache_alloc(struct kmem_cache *, gfp_t );   319     void * ldv_kmem_cache_alloc_16(struct kmem_cache *ldv_func_arg1, gfp_t flags);   563     void * kmalloc_array(size_t n, size_t size, gfp_t flags);    11     void ldv_check_alloc_flags(gfp_t flags);   607     void * devm_kmalloc(struct device *, size_t , gfp_t );   608     void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);   633     void * devm_ioremap_resource(struct device *, struct resource *);   837     void * dev_get_drvdata(const struct device *dev);   842     void dev_set_drvdata(struct device *dev, void *data);   954     void * dev_get_platdata(const struct device *dev);  1042     int dev_err(const struct device *, const char *, ...);    50     struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int);   202     void * platform_get_drvdata(const struct platform_device *pdev);   207     void platform_set_drvdata(struct platform_device *pdev, void *data);    42     int __pm_runtime_idle(struct device *, int);    44     int __pm_runtime_resume(struct device *, int);    48     void pm_runtime_enable(struct device *);    49     void __pm_runtime_disable(struct device *, bool );    50     void pm_runtime_allow(struct device *);    51     void pm_runtime_forbid(struct device *);   217     int pm_runtime_get_sync(struct device *dev);   233     int pm_runtime_put_sync(struct device *dev);   258     void pm_runtime_disable(struct device *dev);    10     void __const_udelay(unsigned long);    69     int valid_dma_direction(int dma_direction);    76     int is_device_dma_capable(struct device *dev);   131     void kmemcheck_mark_initialized(void *address, unsigned int n);    37     void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );    42     void debug_dma_mapping_error(struct device *, dma_addr_t );    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);    56     void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t );    27     extern struct device x86_dma_fallback_dev;    30     extern struct dma_map_ops *dma_ops;    32     struct dma_map_ops * get_dma_ops(struct device *dev);    10     dma_addr_t  dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    29     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    47     int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);    60     int dma_supported(struct device *, u64 );   103     unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp);   115     gfp_t  dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp);   131     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs);   160     void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs);    97     int dma_set_coherent_mask(struct device *dev, u64 mask);   435     struct property * of_find_property(const struct device_node *np, const char *name, int *lenp);   677     bool  of_property_read_bool(const struct device_node *np, const char *propname);   167     int usb_phy_init(struct usb_phy *x);   176     void usb_phy_shutdown(struct usb_phy *x);   203     struct usb_phy * devm_usb_get_phy(struct device *, enum usb_phy_type );   207     struct usb_phy * devm_usb_get_phy_by_phandle(struct device *, const char *, u8 );   266     int usb_phy_set_suspend(struct usb_phy *x, int suspend);    18     enum usb_dr_mode  of_usb_get_dr_mode(struct device_node *np);    24     enum usb_device_speed  of_usb_get_maximum_speed(struct device_node *np);   136     int phy_init(struct phy *);   137     int phy_exit(struct phy *);   138     int phy_power_on(struct phy *);   139     int phy_power_off(struct phy *);   150     struct phy * devm_phy_get(struct device *, const char *);   907     void dwc3_set_mode(struct dwc3 *dwc, u32 mode);   911     int dwc3_host_init(struct dwc3 *dwc);   912     void dwc3_host_exit(struct dwc3 *dwc);   921     int dwc3_gadget_init(struct dwc3 *dwc);   922     void dwc3_gadget_exit(struct dwc3 *dwc);   928     int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);   952     int dwc3_gadget_prepare(struct dwc3 *dwc);   953     void dwc3_gadget_complete(struct dwc3 *dwc);    26     u32  dwc3_readl(void *base, u32 offset);    36     void dwc3_writel(void *base, u32 offset, u32 value);    22     int dwc3_debugfs_init(struct dwc3 *dwc);    23     void dwc3_debugfs_exit(struct dwc3 *dwc);    79     int dwc3_core_soft_reset(struct dwc3 *dwc);   137     void dwc3_free_one_event_buffer(struct dwc3 *dwc, struct dwc3_event_buffer *evt);   151     struct dwc3_event_buffer * dwc3_alloc_one_event_buffer(struct dwc3 *dwc, unsigned int length);   174     void dwc3_free_event_buffers(struct dwc3 *dwc);   194     int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length);   229     int dwc3_event_buffers_setup(struct dwc3 *dwc);   254     void dwc3_event_buffers_cleanup(struct dwc3 *dwc);   272     int dwc3_alloc_scratch_buffers(struct dwc3 *dwc);   288     int dwc3_setup_scratch_buffers(struct dwc3 *dwc);   339     void dwc3_free_scratch_buffers(struct dwc3 *dwc);   356     void dwc3_core_num_eps(struct dwc3 *dwc);   367     void dwc3_cache_hwparams(struct dwc3 *dwc);   388     int dwc3_core_init(struct dwc3 *dwc);   495     void dwc3_core_exit(struct dwc3 *dwc);   504     int dwc3_core_get_phy(struct dwc3 *dwc);   571     int dwc3_core_init_mode(struct dwc3 *dwc);   615     void dwc3_core_exit_mode(struct dwc3 *dwc);   636     int dwc3_probe(struct platform_device *pdev);   800     int dwc3_remove(struct platform_device *pdev);   822     int dwc3_prepare(struct device *dev);   845     void dwc3_complete(struct device *dev);   998     void ldv_check_final_state();  1001     void ldv_check_return_value(int);  1004     void ldv_check_return_value_probe(int);  1007     void ldv_initialize();  1010     void ldv_handler_precall();  1013     int nondet_int();  1016     int LDV_IN_INTERRUPT = 0;  1019     void ldv_main0_sequence_infinite_withcheck_stateful();   319     void * ldv_kmem_cache_alloc_36(struct kmem_cache *ldv_func_arg1, gfp_t flags);    44     void platform_device_unregister(struct platform_device *);   163     struct platform_device * platform_device_alloc(const char *, int);   164     int platform_device_add_resources(struct platform_device *, const struct resource *, unsigned int);   169     int platform_device_add(struct platform_device *);   171     void platform_device_put(struct platform_device *);    45     int __dynamic_pr_debug(struct _ddebug *, const char *, ...);   391     int snprintf(char *, size_t , const char *, ...);    24     void INIT_LIST_HEAD(struct list_head *list);    47     void __list_add(struct list_head *, struct list_head *, struct list_head *);    74     void list_add_tail(struct list_head *new, struct list_head *head);   111     void __list_del_entry(struct list_head *);   112     void list_del(struct list_head *);   164     void list_move_tail(struct list_head *list, struct list_head *head);   176     int list_is_last(const struct list_head *list, const struct list_head *head);   186     int list_empty(const struct list_head *head);    66     void warn_slowpath_fmt(const char *, const int, const char *, ...);    55     void * memset(void *, int, size_t );    35     size_t  strlcat(char *, const char *, __kernel_size_t );    22     void _raw_spin_lock(raw_spinlock_t *);    39     void _raw_spin_unlock(raw_spinlock_t *);   301     void ldv_spin_lock_41(spinlock_t *lock);   301     void spin_lock(spinlock_t *lock);   345     void ldv_spin_unlock_45(spinlock_t *lock);   345     void spin_unlock(spinlock_t *lock);    86     const char * kobject_name(const struct kobject *kobj);   319     void * ldv_kmem_cache_alloc_56(struct kmem_cache *ldv_func_arg1, gfp_t flags);   641     void * kzalloc(size_t size, gfp_t flags);   806     const char * dev_name(const struct device *dev);  1021     const char * dev_driver_string(const struct device *);    52     int platform_get_irq(struct platform_device *, unsigned int);   123     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   142     void free_irq(unsigned int, void *);   224     struct scatterlist * sg_next(struct scatterlist *);   437     int usb_endpoint_type(const struct usb_endpoint_descriptor *epd);   471     int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd);   512     int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd);   603     int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd);   630     int usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp);   196     void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned int maxpacket_limit);   922     int usb_add_gadget_udc(struct device *, struct usb_gadget *);   923     void usb_del_gadget_udc(struct usb_gadget *);  1001     int usb_gadget_map_request(struct usb_gadget *, struct usb_request *, int);  1004     void usb_gadget_unmap_request(struct usb_gadget *, struct usb_request *, int);  1011     void usb_gadget_set_state(struct usb_gadget *, enum usb_device_state );   908     int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);   923     int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);   924     int dwc3_gadget_get_link_state(struct dwc3 *dwc);   925     int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);   926     int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned int ep, unsigned int cmd, struct dwc3_gadget_ep_cmd_params *params);   954     int dwc3_gadget_suspend(struct dwc3 *dwc);   955     int dwc3_gadget_resume(struct dwc3 *dwc);    63     struct dwc3_request * next_request(struct list_head *list);    71     void dwc3_gadget_move_request_queued(struct dwc3_request *req);    79     void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status);    84     void dwc3_ep0_out_start(struct dwc3 *dwc);    85     int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);    86     int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags);    88     int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);    97     u32  dwc3_gadget_ep_get_transfer_index(struct dwc3 *dwc, u8 number);   290     const char * dwc3_gadget_ep_cmd_string(u8 cmd);   316     const char * dwc3_gadget_generic_cmd_string(u8 cmd);   342     const char * dwc3_gadget_link_string(enum dwc3_link_state link_state);   445     dma_addr_t  dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb);   453     int dwc3_alloc_trb_pool(struct dwc3_ep *dep);   475     void dwc3_free_trb_pool(struct dwc3_ep *dep);   486     int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep);   509     int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, const struct usb_endpoint_descriptor *desc, const struct usb_ss_ep_comp_descriptor *comp_desc, bool ignore, bool restore);   572     int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);   591     int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, const struct usb_endpoint_descriptor *desc, const struct usb_ss_ep_comp_descriptor *comp_desc, bool ignore, bool restore);   649     void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);   650     void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep);   680     int __dwc3_gadget_ep_disable(struct dwc3_ep *dep);   706     int dwc3_gadget_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc);   712     int dwc3_gadget_ep0_disable(struct usb_ep *ep);   719     int dwc3_gadget_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc);   770     int dwc3_gadget_ep_disable(struct usb_ep *ep);   802     struct usb_request * dwc3_gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);   821     void dwc3_gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request);   834     void dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_request *req, dma_addr_t dma, unsigned int length, unsigned int last, unsigned int chain, unsigned int node);   918     void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting);  1029     int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, int start_new);  1133     int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req);  1232     int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags);  1259     int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request);  1333     int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value);  1357     int dwc3_gadget_ep_set_wedge(struct usb_ep *ep);  1375     struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { 7U, 5U, 0U, 0U, 0U, 0U, 0U, 0U };  1381     const struct usb_ep_ops dwc3_gadget_ep0_ops = { &dwc3_gadget_ep0_enable, &dwc3_gadget_ep0_disable, &dwc3_gadget_ep_alloc_request, &dwc3_gadget_ep_free_request, &dwc3_gadget_ep0_queue, &dwc3_gadget_ep_dequeue, &dwc3_gadget_ep0_set_halt, &dwc3_gadget_ep_set_wedge, 0, 0 };  1392     const struct usb_ep_ops dwc3_gadget_ep_ops = { &dwc3_gadget_ep_enable, &dwc3_gadget_ep_disable, &dwc3_gadget_ep_alloc_request, &dwc3_gadget_ep_free_request, &dwc3_gadget_ep_queue, &dwc3_gadget_ep_dequeue, &dwc3_gadget_ep_set_halt, &dwc3_gadget_ep_set_wedge, 0, 0 };  1405     int dwc3_gadget_get_frame(struct usb_gadget *g);  1414     int dwc3_gadget_wakeup(struct usb_gadget *g);  1494     int dwc3_gadget_set_selfpowered(struct usb_gadget *g, int is_selfpowered);  1507     int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend);  1561     int dwc3_gadget_pullup(struct usb_gadget *g, int is_on);  1576     void dwc3_gadget_enable_irq(struct dwc3 *dwc);  1594     void dwc3_gadget_disable_irq(struct dwc3 *dwc);  1600     irqreturn_t  dwc3_interrupt(int irq, void *_dwc);  1601     irqreturn_t  dwc3_thread_interrupt(int irq, void *_dwc);  1603     int dwc3_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver);  1717     int dwc3_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver);  1740     const struct usb_gadget_ops dwc3_gadget_ops = { &dwc3_gadget_get_frame, &dwc3_gadget_wakeup, &dwc3_gadget_set_selfpowered, 0, 0, &dwc3_gadget_pullup, 0, 0, &dwc3_gadget_start, &dwc3_gadget_stop };  1751     int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, u8 num, u32 direction);  1806     int dwc3_gadget_init_endpoints(struct dwc3 *dwc);  1827     void dwc3_gadget_free_endpoints(struct dwc3 *dwc);  2117     void dwc3_disconnect_gadget(struct dwc3 *dwc);  2126     void dwc3_suspend_gadget(struct dwc3 *dwc);  2135     void dwc3_resume_gadget(struct dwc3 *dwc);  2187     void dwc3_stop_active_transfers(struct dwc3 *dwc);  2205     void dwc3_clear_stall_all_ep(struct dwc3 *dwc);  2230     void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc);  2250     void dwc3_gadget_reset_interrupt(struct dwc3 *dwc);  2308     void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed);  2333     void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc);  2437     void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc);  2449     void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, unsigned int evtinfo);  2552     void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, unsigned int evtinfo);  2576     void dwc3_gadget_interrupt(struct dwc3 *dwc, const struct dwc3_event_devt *event);  2622     void dwc3_process_event_entry(struct dwc3 *dwc, const union dwc3_event *event);  2641     irqreturn_t  dwc3_process_event_buf(struct dwc3 *dwc, u32 buf);  2705     irqreturn_t  dwc3_check_event_buf(struct dwc3 *dwc, u32 buf);  2961     void ldv_main2_sequence_infinite_withcheck_stateful();     1     void * __builtin_memcpy(void *, const void *, unsigned long);    34     void * __memcpy(void *, const void *, size_t );   319     void * ldv_kmem_cache_alloc_76(struct kmem_cache *ldv_func_arg1, gfp_t flags);    82     void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event);   142     const char * dwc3_ep_event_string(u8 event);    51     void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);    52     void __dwc3_ep0_do_control_data(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req);    55     const char * dwc3_ep0_state_string(enum dwc3_ep0_state state);    71     int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, u32 len, u32 type);   118     int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, struct dwc3_request *req);   258     void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);   302     struct dwc3_ep * dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le);   319     void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req);   325     int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   383     int dwc3_ep0_handle_feature(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl, int set);   494     int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   524     int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   534     int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   589     void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req);   633     int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   669     int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   691     int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl);   733     void dwc3_ep0_inspect_setup(struct dwc3 *dwc, const struct dwc3_event_depevt *event);   767     void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event);   829     void dwc3_ep0_complete_status(struct dwc3 *dwc, const struct dwc3_event_depevt *event);   866     void dwc3_ep0_xfer_complete(struct dwc3 *dwc, const struct dwc3_event_depevt *event);   948     int dwc3_ep0_start_control_status(struct dwc3_ep *dep);   971     void dwc3_ep0_do_control_status(struct dwc3 *dwc, const struct dwc3_event_depevt *event);   979     void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep);   997     void dwc3_ep0_xfernotready(struct dwc3 *dwc, const struct dwc3_event_depevt *event);     1     unsigned long int __builtin_object_size(void *, int);   230     void might_fault();    41     int strncmp(const char *, const char *, __kernel_size_t );   319     void * ldv_kmem_cache_alloc_96(struct kmem_cache *ldv_func_arg1, gfp_t flags);    99     ssize_t  seq_read(struct file *, char *, size_t , loff_t *);   100     loff_t  seq_lseek(struct file *, loff_t , int);   107     int seq_printf(struct seq_file *, const char *, ...);   140     int single_open(struct file *, int (*)(struct seq_file *, void *), void *);   142     int single_release(struct inode *, struct file *);    49     struct dentry * debugfs_create_file(const char *, umode_t , struct dentry *, void *, const struct file_operations *);    53     struct dentry * debugfs_create_dir(const char *, struct dentry *);    59     void debugfs_remove_recursive(struct dentry *);    91     struct dentry * debugfs_create_regset32(const char *, umode_t , struct dentry *, struct debugfs_regset32 *);   645     unsigned long int _copy_from_user(void *, const void *, unsigned int);   667     void __copy_from_user_overflow();   688     unsigned long int copy_from_user(void *to, const void *from, unsigned long n);    57     const struct debugfs_reg32 dwc3_regs[302U] = { { (char *)"GSBUSCFG0", 0UL }, { (char *)"GSBUSCFG1", 4UL }, { (char *)"GTXTHRCFG", 8UL }, { (char *)"GRXTHRCFG", 12UL }, { (char *)"GCTL", 16UL }, { (char *)"GEVTEN", 20UL }, { (char *)"GSTS", 24UL }, { (char *)"GSNPSID", 32UL }, { (char *)"GGPIO", 36UL }, { (char *)"GUID", 40UL }, { (char *)"GUCTL", 44UL }, { (char *)"GBUSERRADDR0", 48UL }, { (char *)"GBUSERRADDR1", 52UL }, { (char *)"GPRTBIMAP0", 56UL }, { (char *)"GPRTBIMAP1", 60UL }, { (char *)"GHWPARAMS0", 64UL }, { (char *)"GHWPARAMS1", 68UL }, { (char *)"GHWPARAMS2", 72UL }, { (char *)"GHWPARAMS3", 76UL }, { (char *)"GHWPARAMS4", 80UL }, { (char *)"GHWPARAMS5", 84UL }, { (char *)"GHWPARAMS6", 88UL }, { (char *)"GHWPARAMS7", 92UL }, { (char *)"GDBGFIFOSPACE", 96UL }, { (char *)"GDBGLTSSM", 100UL }, { (char *)"GPRTBIMAP_HS0", 128UL }, { (char *)"GPRTBIMAP_HS1", 132UL }, { (char *)"GPRTBIMAP_FS0", 136UL }, { (char *)"GPRTBIMAP_FS1", 140UL }, { (char *)"GUSB2PHYCFG(0)", 256UL }, { (char *)"GUSB2PHYCFG(1)", 260UL }, { (char *)"GUSB2PHYCFG(2)", 264UL }, { (char *)"GUSB2PHYCFG(3)", 268UL }, { (char *)"GUSB2PHYCFG(4)", 272UL }, { (char *)"GUSB2PHYCFG(5)", 276UL }, { (char *)"GUSB2PHYCFG(6)", 280UL }, { (char *)"GUSB2PHYCFG(7)", 284UL }, { (char *)"GUSB2PHYCFG(8)", 288UL }, { (char *)"GUSB2PHYCFG(9)", 292UL }, { (char *)"GUSB2PHYCFG(10)", 296UL }, { (char *)"GUSB2PHYCFG(11)", 300UL }, { (char *)"GUSB2PHYCFG(12)", 304UL }, { (char *)"GUSB2PHYCFG(13)", 308UL }, { (char *)"GUSB2PHYCFG(14)", 312UL }, { (char *)"GUSB2PHYCFG(15)", 316UL }, { (char *)"GUSB2I2CCTL(0)", 320UL }, { (char *)"GUSB2I2CCTL(1)", 324UL }, { (char *)"GUSB2I2CCTL(2)", 328UL }, { (char *)"GUSB2I2CCTL(3)", 332UL }, { (char *)"GUSB2I2CCTL(4)", 336UL }, { (char *)"GUSB2I2CCTL(5)", 340UL }, { (char *)"GUSB2I2CCTL(6)", 344UL }, { (char *)"GUSB2I2CCTL(7)", 348UL }, { (char *)"GUSB2I2CCTL(8)", 352UL }, { (char *)"GUSB2I2CCTL(9)", 356UL }, { (char *)"GUSB2I2CCTL(10)", 360UL }, { (char *)"GUSB2I2CCTL(11)", 364UL }, { (char *)"GUSB2I2CCTL(12)", 368UL }, { (char *)"GUSB2I2CCTL(13)", 372UL }, { (char *)"GUSB2I2CCTL(14)", 376UL }, { (char *)"GUSB2I2CCTL(15)", 380UL }, { (char *)"GUSB2PHYACC(0)", 384UL }, { (char *)"GUSB2PHYACC(1)", 388UL }, { (char *)"GUSB2PHYACC(2)", 392UL }, { (char *)"GUSB2PHYACC(3)", 396UL }, { (char *)"GUSB2PHYACC(4)", 400UL }, { (char *)"GUSB2PHYACC(5)", 404UL }, { (char *)"GUSB2PHYACC(6)", 408UL }, { (char *)"GUSB2PHYACC(7)", 412UL }, { (char *)"GUSB2PHYACC(8)", 416UL }, { (char *)"GUSB2PHYACC(9)", 420UL }, { (char *)"GUSB2PHYACC(10)", 424UL }, { (char *)"GUSB2PHYACC(11)", 428UL }, { (char *)"GUSB2PHYACC(12)", 432UL }, { (char *)"GUSB2PHYACC(13)", 436UL }, { (char *)"GUSB2PHYACC(14)", 440UL }, { (char *)"GUSB2PHYACC(15)", 444UL }, { (char *)"GUSB3PIPECTL(0)", 448UL }, { (char *)"GUSB3PIPECTL(1)", 452UL }, { (char *)"GUSB3PIPECTL(2)", 456UL }, { (char *)"GUSB3PIPECTL(3)", 460UL }, { (char *)"GUSB3PIPECTL(4)", 464UL }, { (char *)"GUSB3PIPECTL(5)", 468UL }, { (char *)"GUSB3PIPECTL(6)", 472UL }, { (char *)"GUSB3PIPECTL(7)", 476UL }, { (char *)"GUSB3PIPECTL(8)", 480UL }, { (char *)"GUSB3PIPECTL(9)", 484UL }, { (char *)"GUSB3PIPECTL(10)", 488UL }, { (char *)"GUSB3PIPECTL(11)", 492UL }, { (char *)"GUSB3PIPECTL(12)", 496UL }, { (char *)"GUSB3PIPECTL(13)", 500UL }, { (char *)"GUSB3PIPECTL(14)", 504UL }, { (char *)"GUSB3PIPECTL(15)", 508UL }, { (char *)"GTXFIFOSIZ(0)", 512UL }, { (char *)"GTXFIFOSIZ(1)", 516UL }, { (char *)"GTXFIFOSIZ(2)", 520UL }, { (char *)"GTXFIFOSIZ(3)", 524UL }, { (char *)"GTXFIFOSIZ(4)", 528UL }, { (char *)"GTXFIFOSIZ(5)", 532UL }, { (char *)"GTXFIFOSIZ(6)", 536UL }, { (char *)"GTXFIFOSIZ(7)", 540UL }, { (char *)"GTXFIFOSIZ(8)", 544UL }, { (char *)"GTXFIFOSIZ(9)", 548UL }, { (char *)"GTXFIFOSIZ(10)", 552UL }, { (char *)"GTXFIFOSIZ(11)", 556UL }, { (char *)"GTXFIFOSIZ(12)", 560UL }, { (char *)"GTXFIFOSIZ(13)", 564UL }, { (char *)"GTXFIFOSIZ(14)", 568UL }, { (char *)"GTXFIFOSIZ(15)", 572UL }, { (char *)"GTXFIFOSIZ(16)", 576UL }, { (char *)"GTXFIFOSIZ(17)", 580UL }, { (char *)"GTXFIFOSIZ(18)", 584UL }, { (char *)"GTXFIFOSIZ(19)", 588UL }, { (char *)"GTXFIFOSIZ(20)", 592UL }, { (char *)"GTXFIFOSIZ(21)", 596UL }, { (char *)"GTXFIFOSIZ(22)", 600UL }, { (char *)"GTXFIFOSIZ(23)", 604UL }, { (char *)"GTXFIFOSIZ(24)", 608UL }, { (char *)"GTXFIFOSIZ(25)", 612UL }, { (char *)"GTXFIFOSIZ(26)", 616UL }, { (char *)"GTXFIFOSIZ(27)", 620UL }, { (char *)"GTXFIFOSIZ(28)", 624UL }, { (char *)"GTXFIFOSIZ(29)", 628UL }, { (char *)"GTXFIFOSIZ(30)", 632UL }, { (char *)"GTXFIFOSIZ(31)", 636UL }, { (char *)"GRXFIFOSIZ(0)", 640UL }, { (char *)"GRXFIFOSIZ(1)", 644UL }, { (char *)"GRXFIFOSIZ(2)", 648UL }, { (char *)"GRXFIFOSIZ(3)", 652UL }, { (char *)"GRXFIFOSIZ(4)", 656UL }, { (char *)"GRXFIFOSIZ(5)", 660UL }, { (char *)"GRXFIFOSIZ(6)", 664UL }, { (char *)"GRXFIFOSIZ(7)", 668UL }, { (char *)"GRXFIFOSIZ(8)", 672UL }, { (char *)"GRXFIFOSIZ(9)", 676UL }, { (char *)"GRXFIFOSIZ(10)", 680UL }, { (char *)"GRXFIFOSIZ(11)", 684UL }, { (char *)"GRXFIFOSIZ(12)", 688UL }, { (char *)"GRXFIFOSIZ(13)", 692UL }, { (char *)"GRXFIFOSIZ(14)", 696UL }, { (char *)"GRXFIFOSIZ(15)", 700UL }, { (char *)"GRXFIFOSIZ(16)", 704UL }, { (char *)"GRXFIFOSIZ(17)", 708UL }, { (char *)"GRXFIFOSIZ(18)", 712UL }, { (char *)"GRXFIFOSIZ(19)", 716UL }, { (char *)"GRXFIFOSIZ(20)", 720UL }, { (char *)"GRXFIFOSIZ(21)", 724UL }, { (char *)"GRXFIFOSIZ(22)", 728UL }, { (char *)"GRXFIFOSIZ(23)", 732UL }, { (char *)"GRXFIFOSIZ(24)", 736UL }, { (char *)"GRXFIFOSIZ(25)", 740UL }, { (char *)"GRXFIFOSIZ(26)", 744UL }, { (char *)"GRXFIFOSIZ(27)", 748UL }, { (char *)"GRXFIFOSIZ(28)", 752UL }, { (char *)"GRXFIFOSIZ(29)", 756UL }, { (char *)"GRXFIFOSIZ(30)", 760UL }, { (char *)"GRXFIFOSIZ(31)", 764UL }, { (char *)"GEVNTADRLO(0)", 768UL }, { (char *)"GEVNTADRHI(0)", 772UL }, { (char *)"GEVNTSIZ(0)", 776UL }, { (char *)"GEVNTCOUNT(0)", 780UL }, { (char *)"GHWPARAMS8", 1280UL }, { (char *)"DCFG", 1536UL }, { (char *)"DCTL", 1540UL }, { (char *)"DEVTEN", 1544UL }, { (char *)"DSTS", 1548UL }, { (char *)"DGCMDPAR", 1552UL }, { (char *)"DGCMD", 1556UL }, { (char *)"DALEPENA", 1568UL }, { (char *)"DEPCMDPAR2(0)", 1792UL }, { (char *)"DEPCMDPAR2(1)", 1808UL }, { (char *)"DEPCMDPAR2(2)", 1824UL }, { (char *)"DEPCMDPAR2(3)", 1840UL }, { (char *)"DEPCMDPAR2(4)", 1856UL }, { (char *)"DEPCMDPAR2(5)", 1872UL }, { (char *)"DEPCMDPAR2(6)", 1888UL }, { (char *)"DEPCMDPAR2(7)", 1904UL }, { (char *)"DEPCMDPAR2(8)", 1920UL }, { (char *)"DEPCMDPAR2(9)", 1936UL }, { (char *)"DEPCMDPAR2(10)", 1952UL }, { (char *)"DEPCMDPAR2(11)", 1968UL }, { (char *)"DEPCMDPAR2(12)", 1984UL }, { (char *)"DEPCMDPAR2(13)", 2000UL }, { (char *)"DEPCMDPAR2(14)", 2016UL }, { (char *)"DEPCMDPAR2(15)", 2032UL }, { (char *)"DEPCMDPAR2(16)", 2048UL }, { (char *)"DEPCMDPAR2(17)", 2064UL }, { (char *)"DEPCMDPAR2(18)", 2080UL }, { (char *)"DEPCMDPAR2(19)", 2096UL }, { (char *)"DEPCMDPAR2(20)", 2112UL }, { (char *)"DEPCMDPAR2(21)", 2128UL }, { (char *)"DEPCMDPAR2(22)", 2144UL }, { (char *)"DEPCMDPAR2(23)", 2160UL }, { (char *)"DEPCMDPAR2(24)", 2176UL }, { (char *)"DEPCMDPAR2(25)", 2192UL }, { (char *)"DEPCMDPAR2(26)", 2208UL }, { (char *)"DEPCMDPAR2(27)", 2224UL }, { (char *)"DEPCMDPAR2(28)", 2240UL }, { (char *)"DEPCMDPAR2(29)", 2256UL }, { (char *)"DEPCMDPAR2(30)", 2272UL }, { (char *)"DEPCMDPAR2(31)", 2288UL }, { (char *)"DEPCMDPAR1(0)", 1796UL }, { (char *)"DEPCMDPAR1(1)", 1812UL }, { (char *)"DEPCMDPAR1(2)", 1828UL }, { (char *)"DEPCMDPAR1(3)", 1844UL }, { (char *)"DEPCMDPAR1(4)", 1860UL }, { (char *)"DEPCMDPAR1(5)", 1876UL }, { (char *)"DEPCMDPAR1(6)", 1892UL }, { (char *)"DEPCMDPAR1(7)", 1908UL }, { (char *)"DEPCMDPAR1(8)", 1924UL }, { (char *)"DEPCMDPAR1(9)", 1940UL }, { (char *)"DEPCMDPAR1(10)", 1956UL }, { (char *)"DEPCMDPAR1(11)", 1972UL }, { (char *)"DEPCMDPAR1(12)", 1988UL }, { (char *)"DEPCMDPAR1(13)", 2004UL }, { (char *)"DEPCMDPAR1(14)", 2020UL }, { (char *)"DEPCMDPAR1(15)", 2036UL }, { (char *)"DEPCMDPAR1(16)", 2052UL }, { (char *)"DEPCMDPAR1(17)", 2068UL }, { (char *)"DEPCMDPAR1(18)", 2084UL }, { (char *)"DEPCMDPAR1(19)", 2100UL }, { (char *)"DEPCMDPAR1(20)", 2116UL }, { (char *)"DEPCMDPAR1(21)", 2132UL }, { (char *)"DEPCMDPAR1(22)", 2148UL }, { (char *)"DEPCMDPAR1(23)", 2164UL }, { (char *)"DEPCMDPAR1(24)", 2180UL }, { (char *)"DEPCMDPAR1(25)", 2196UL }, { (char *)"DEPCMDPAR1(26)", 2212UL }, { (char *)"DEPCMDPAR1(27)", 2228UL }, { (char *)"DEPCMDPAR1(28)", 2244UL }, { (char *)"DEPCMDPAR1(29)", 2260UL }, { (char *)"DEPCMDPAR1(30)", 2276UL }, { (char *)"DEPCMDPAR1(31)", 2292UL }, { (char *)"DEPCMDPAR0(0)", 1800UL }, { (char *)"DEPCMDPAR0(1)", 1816UL }, { (char *)"DEPCMDPAR0(2)", 1832UL }, { (char *)"DEPCMDPAR0(3)", 1848UL }, { (char *)"DEPCMDPAR0(4)", 1864UL }, { (char *)"DEPCMDPAR0(5)", 1880UL }, { (char *)"DEPCMDPAR0(6)", 1896UL }, { (char *)"DEPCMDPAR0(7)", 1912UL }, { (char *)"DEPCMDPAR0(8)", 1928UL }, { (char *)"DEPCMDPAR0(9)", 1944UL }, { (char *)"DEPCMDPAR0(10)", 1960UL }, { (char *)"DEPCMDPAR0(11)", 1976UL }, { (char *)"DEPCMDPAR0(12)", 1992UL }, { (char *)"DEPCMDPAR0(13)", 2008UL }, { (char *)"DEPCMDPAR0(14)", 2024UL }, { (char *)"DEPCMDPAR0(15)", 2040UL }, { (char *)"DEPCMDPAR0(16)", 2056UL }, { (char *)"DEPCMDPAR0(17)", 2072UL }, { (char *)"DEPCMDPAR0(18)", 2088UL }, { (char *)"DEPCMDPAR0(19)", 2104UL }, { (char *)"DEPCMDPAR0(20)", 2120UL }, { (char *)"DEPCMDPAR0(21)", 2136UL }, { (char *)"DEPCMDPAR0(22)", 2152UL }, { (char *)"DEPCMDPAR0(23)", 2168UL }, { (char *)"DEPCMDPAR0(24)", 2184UL }, { (char *)"DEPCMDPAR0(25)", 2200UL }, { (char *)"DEPCMDPAR0(26)", 2216UL }, { (char *)"DEPCMDPAR0(27)", 2232UL }, { (char *)"DEPCMDPAR0(28)", 2248UL }, { (char *)"DEPCMDPAR0(29)", 2264UL }, { (char *)"DEPCMDPAR0(30)", 2280UL }, { (char *)"DEPCMDPAR0(31)", 2296UL }, { (char *)"DEPCMD(0)", 1804UL }, { (char *)"DEPCMD(1)", 1820UL }, { (char *)"DEPCMD(2)", 1836UL }, { (char *)"DEPCMD(3)", 1852UL }, { (char *)"DEPCMD(4)", 1868UL }, { (char *)"DEPCMD(5)", 1884UL }, { (char *)"DEPCMD(6)", 1900UL }, { (char *)"DEPCMD(7)", 1916UL }, { (char *)"DEPCMD(8)", 1932UL }, { (char *)"DEPCMD(9)", 1948UL }, { (char *)"DEPCMD(10)", 1964UL }, { (char *)"DEPCMD(11)", 1980UL }, { (char *)"DEPCMD(12)", 1996UL }, { (char *)"DEPCMD(13)", 2012UL }, { (char *)"DEPCMD(14)", 2028UL }, { (char *)"DEPCMD(15)", 2044UL }, { (char *)"DEPCMD(16)", 2060UL }, { (char *)"DEPCMD(17)", 2076UL }, { (char *)"DEPCMD(18)", 2092UL }, { (char *)"DEPCMD(19)", 2108UL }, { (char *)"DEPCMD(20)", 2124UL }, { (char *)"DEPCMD(21)", 2140UL }, { (char *)"DEPCMD(22)", 2156UL }, { (char *)"DEPCMD(23)", 2172UL }, { (char *)"DEPCMD(24)", 2188UL }, { (char *)"DEPCMD(25)", 2204UL }, { (char *)"DEPCMD(26)", 2220UL }, { (char *)"DEPCMD(27)", 2236UL }, { (char *)"DEPCMD(28)", 2252UL }, { (char *)"DEPCMD(29)", 2268UL }, { (char *)"DEPCMD(30)", 2284UL }, { (char *)"DEPCMD(31)", 2300UL }, { (char *)"OCFG", 2816UL }, { (char *)"OCTL", 2820UL }, { (char *)"OEVT", 2824UL }, { (char *)"OEVTEN", 2828UL }, { (char *)"OSTS", 2832UL } };   375     int dwc3_mode_show(struct seq_file *s, void *unused);   402     int dwc3_mode_open(struct inode *inode, struct file *file);   407     ssize_t  dwc3_mode_write(struct file *file, const char *ubuf, size_t count, loff_t *ppos);   436     const struct file_operations dwc3_mode_fops = { 0, &seq_lseek, &seq_read, &dwc3_mode_write, 0, 0, 0, 0, 0, 0, 0, 0, 0, &dwc3_mode_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   444     int dwc3_testmode_show(struct seq_file *s, void *unused);   482     int dwc3_testmode_open(struct inode *inode, struct file *file);   487     ssize_t  dwc3_testmode_write(struct file *file, const char *ubuf, size_t count, loff_t *ppos);   519     const struct file_operations dwc3_testmode_fops = { 0, &seq_lseek, &seq_read, &dwc3_testmode_write, 0, 0, 0, 0, 0, 0, 0, 0, 0, &dwc3_testmode_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   527     int dwc3_link_state_show(struct seq_file *s, void *unused);   589     int dwc3_link_state_open(struct inode *inode, struct file *file);   594     ssize_t  dwc3_link_state_write(struct file *file, const char *ubuf, size_t count, loff_t *ppos);   628     const struct file_operations dwc3_link_state_fops = { 0, &seq_lseek, &seq_read, &dwc3_link_state_write, 0, 0, 0, 0, 0, 0, 0, 0, 0, &dwc3_link_state_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   744     void ldv_main4_sequence_infinite_withcheck_stateful();    10     void ldv_error();    25     int ldv_undef_int();    59     void __builtin_trap();    20     int ldv_spin = 0;    30     struct page___0 * ldv_some_page();    33     struct page___0 * ldv_check_alloc_flags_and_return_some_page(gfp_t flags);    42     void ldv_check_alloc_nonatomic();    63     int ldv_spin_trylock();           return ;         }        {      2963     struct usb_ep *var_group1;  2964     const struct usb_endpoint_descriptor *var_dwc3_gadget_ep0_enable_19_p1;  2965     gfp_t var_dwc3_gadget_ep_alloc_request_23_p1;  2966     struct usb_request *var_group2;  2967     const struct usb_endpoint_descriptor *var_dwc3_gadget_ep_enable_21_p1;  2968     gfp_t var_dwc3_gadget_ep_queue_31_p2;  2969     int var_dwc3_gadget_ep_set_halt_34_p1;  2970     struct usb_gadget *var_group3;  2971     int var_dwc3_gadget_set_selfpowered_38_p1;  2972     int var_dwc3_gadget_pullup_40_p1;  2973     struct usb_gadget_driver *var_group4;  2974     int var_dwc3_interrupt_70_p0;  2975     void *var_dwc3_interrupt_70_p1;  2976     int var_dwc3_thread_interrupt_68_p0;  2977     void *var_dwc3_thread_interrupt_68_p1;  2978     int tmp;  2979     int tmp___0;  3055     LDV_IN_INTERRUPT = 1;  3064     ldv_initialize() { /* Function call is skipped due to function is undefined */}  3074     goto ldv_33014;  3074     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  3076     goto ldv_33013;  3075     ldv_33013:;  3077     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  3077     switch (tmp) 3172     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {  1359       struct dwc3_ep *dep;  1360       const struct usb_ep *__mptr;  1361       struct dwc3 *dwc;  1362       unsigned long flags;  1363       int tmp;  1364       int tmp___0;  1359       __mptr = (const struct usb_ep *)ep;  1359       dep = (struct dwc3_ep *)__mptr;  1360       dwc = dep->dwc;  1364       dep->flags = (dep->flags) | 4U;             {               {             }  374           _raw_spin_unlock_irqrestore(&(lock->ldv_6347.rlock), flags) { /* Function call is skipped due to function is undefined */}               } 1367       unsigned int __CPAchecker_TMP_0 = (unsigned int)(dep->number);  1367       unsigned int __CPAchecker_TMP_1 = (unsigned int)(dep->number);             {           }  285         struct dwc3_ep *dep;   286         const struct usb_ep *__mptr;   287         struct dwc3 *dwc;   285         __mptr = (const struct usb_ep *)ep;   285         dep = (struct dwc3_ep *)__mptr;   286         dwc = dep->dwc;               {             }  260           struct dwc3_ep *dep;   261           struct dwc3_request *req;   262           int tmp;   263           dep = (dwc->eps)[1];   264           dep->flags = 1U;   267           dep = (dwc->eps)[0];                 {  1306             struct dwc3_gadget_ep_cmd_params params;  1307             struct dwc3 *dwc;  1308             int ret;  1307             dwc = dep->dwc;  1310             memset((void *)(¶ms), 0, 12UL) { /* Function call is skipped due to function is undefined */}  1313             unsigned int __CPAchecker_TMP_0 = (unsigned int)(dep->number);                   {   411               struct dwc3_ep *dep;   412               u32 timeout;   413               u32 reg;   414               struct _ddebug descriptor;   415               const char *tmp;   416               long tmp___0;   417               struct _ddebug descriptor___0;   418               long tmp___1;   411               dep = (dwc->eps)[ep];   412               timeout = 500U;   415               descriptor.modname = "dwc3";   415               descriptor.function = "dwc3_send_gadget_ep_cmd";   415               descriptor.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.16-rc1.tar.xz--X--39_7a--X--43_1a--X--cpachecker/linux-3.16-rc1.tar.xz/csd_deg_dscv/2620/dscv_tempdir/dscv/ri/43_1a/drivers/usb/dwc3/gadget.o.c.prepared";   415               descriptor.format = "%s: cmd \'%s\' [%d] params %08x %08x %08x\n";   415               descriptor.lineno = 418U;   415               descriptor.flags = 1U;   415               const struct device *__CPAchecker_TMP_0 = (const struct device *)(dwc->dev);   415               __dynamic_dev_dbg(&descriptor, __CPAchecker_TMP_0, "%s: cmd \'%s\' [%d] params %08x %08x %08x\n", (char *)(&(dep->name)), tmp, cmd, params->param0, params->param1, params->param2) { /* Function call is skipped due to function is undefined */}                     {                       { 64 Ignored inline assembler code    65                   return ;;                       }   44                 return ;;                     }                    {                       { 64 Ignored inline assembler code    65                   return ;;                       }   44                 return ;;                     }                    {                       { 64 Ignored inline assembler code    65                   return ;;                       }   44                 return ;;                     }                    {                       { 64 Ignored inline assembler code    65                   return ;;                       }   44                 return ;;                     }  425               ldv_32216:;                     {    28                 unsigned int tmp;                       {    58                   unsigned int ret;    56                   Ignored inline assembler code   56                   return ret;;                       }   33                 return tmp;;                     }  428               descriptor___0.modname = "dwc3";   428               descriptor___0.function = "dwc3_send_gadget_ep_cmd";   428               descriptor___0.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.16-rc1.tar.xz--X--39_7a--X--43_1a--X--cpachecker/linux-3.16-rc1.tar.xz/csd_deg_dscv/2620/dscv_tempdir/dscv/ri/43_1a/drivers/usb/dwc3/gadget.o.c.prepared";   428               descriptor___0.format = "Command Complete --> %d\n";   428               descriptor___0.lineno = 429U;   428               descriptor___0.flags = 1U;   428               const struct device *__CPAchecker_TMP_1 = (const struct device *)(dwc->dev);   428               __dynamic_dev_dbg(&descriptor___0, __CPAchecker_TMP_1, "Command Complete --> %d\n", (reg >> 15) & 1U) { /* Function call is skipped due to function is undefined */}                   } 1319             dep->flags = (dep->flags) | 2U;                 }  269           dep->flags = 1U;   270           dwc->delayed_status = 0U;                 {    65             int tmp;    66             const struct list_head *__mptr;    65             assume(!(tmp != 0));    68             const struct list_head *__CPAchecker_TMP_0 = (const struct list_head *)(list->next);    68             __mptr = __CPAchecker_TMP_0;    68             return ((struct dwc3_request *)__mptr) + 18446744073709551528UL;;                 }                {   249             struct dwc3 *dwc;   250             int i;   251             int tmp;   252             struct _ddebug descriptor;   253             long tmp___0;   250             dwc = dep->dwc;   269             list_del(&(req->list)) { /* Function call is skipped due to function is undefined */}   270             req->trb = (struct dwc3_trb *)0;   273             req->request.status = status;   278             int __CPAchecker_TMP_1 = (int)(req->direction);   278             usb_gadget_unmap_request(&(dwc->gadget), &(req->request), __CPAchecker_TMP_1) { /* Function call is skipped due to function is undefined */}   281             descriptor.modname = "dwc3";   281             descriptor.function = "dwc3_gadget_giveback";   281             descriptor.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.16-rc1.tar.xz--X--39_7a--X--43_1a--X--cpachecker/linux-3.16-rc1.tar.xz/csd_deg_dscv/2620/dscv_tempdir/dscv/ri/43_1a/drivers/usb/dwc3/gadget.o.c.prepared";   281             descriptor.format = "request %p from %s completed %d/%d ===> %d\n";   281             descriptor.lineno = 283U;   281             descriptor.flags = 1U;   281             const struct device *__CPAchecker_TMP_2 = (const struct device *)(dwc->dev);   281             __dynamic_dev_dbg(&descriptor, __CPAchecker_TMP_2, "request %p from %s completed %d/%d ===> %d\n", req, (char *)(&(dep->name)), req->request.actual, req->request.length, status) { /* Function call is skipped due to function is undefined */}                   {                     {                   }  347                 _raw_spin_unlock(&(lock->ldv_6347.rlock)) { /* Function call is skipped due to function is undefined */}                     }  286             (*(req->request.complete))(&(dep->endpoint), &(req->request));                   {                 }                    {                   }  303                 _raw_spin_lock(&(lock->ldv_6347.rlock)) { /* Function call is skipped due to function is undefined */}                     }  279           dwc->ep0state = 1;                 {               }  295             int ret;   296             int __ret_warn_on;   297             long tmp;                   {    74               struct dwc3_gadget_ep_cmd_params params;    75               struct dwc3_trb *trb;    76               struct dwc3_ep *dep;    77               int ret;    78               struct _ddebug descriptor;    79               long tmp;    80               struct _ddebug descriptor___0;    81               long tmp___0;    82               u32 tmp___1;    80               dep = (dwc->eps)[(int)epnum];    86               trb = dwc->ep0_trb;    88               trb->bpl = (unsigned int)buf_dma;    89               trb->bph = (unsigned int)(buf_dma >> 32ULL);    90               trb->size = len;    91               trb->ctrl = type;    93               trb->ctrl = (trb->ctrl) | 3075U;    98               memset((void *)(¶ms), 0, 12UL) { /* Function call is skipped due to function is undefined */}    99               params.param0 = (unsigned int)((dwc->ep0_trb_addr) >> 32ULL);   100               unsigned int __CPAchecker_TMP_1 = (unsigned int)(dwc->ep0_trb_addr);   100               params.param1 = __CPAchecker_TMP_1;   102               unsigned int __CPAchecker_TMP_2 = (unsigned int)(dep->number);                     {   411                 struct dwc3_ep *dep;   412                 u32 timeout;   413                 u32 reg;   414                 struct _ddebug descriptor;   415                 const char *tmp;   416                 long tmp___0;   417                 struct _ddebug descriptor___0;   418                 long tmp___1;   411                 dep = (dwc->eps)[ep];   412                 timeout = 500U;   415                 descriptor.modname = "dwc3";   415                 descriptor.function = "dwc3_send_gadget_ep_cmd";   415                 descriptor.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.16-rc1.tar.xz--X--39_7a--X--43_1a--X--cpachecker/linux-3.16-rc1.tar.xz/csd_deg_dscv/2620/dscv_tempdir/dscv/ri/43_1a/drivers/usb/dwc3/gadget.o.c.prepared";   415                 descriptor.format = "%s: cmd \'%s\' [%d] params %08x %08x %08x\n";   415                 descriptor.lineno = 418U;   415                 descriptor.flags = 1U;   415                 const struct device *__CPAchecker_TMP_0 = (const struct device *)(dwc->dev);   415                 __dynamic_dev_dbg(&descriptor, __CPAchecker_TMP_0, "%s: cmd \'%s\' [%d] params %08x %08x %08x\n", (char *)(&(dep->name)), tmp, cmd, params->param0, params->param1, params->param2) { /* Function call is skipped due to function is undefined */}                       {                         { 64 Ignored inline assembler code    65                     return ;;                         }   44                   return ;;                       }                      {                         { 64 Ignored inline assembler code    65                     return ;;                         }   44                   return ;;                       }                      {                         { 64 Ignored inline assembler code    65                     return ;;                         }   44                   return ;;                       }                      {                         { 64 Ignored inline assembler code    65                     return ;;                         }   44                   return ;;                       }  425                 ldv_32216:;                       {    28                   unsigned int tmp;                         {    58                     unsigned int ret;    56                     Ignored inline assembler code   56                     return ret;;                         }   33                   return tmp;;                       }  428                 descriptor___0.modname = "dwc3";   428                 descriptor___0.function = "dwc3_send_gadget_ep_cmd";   428                 descriptor___0.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.16-rc1.tar.xz--X--39_7a--X--43_1a--X--cpachecker/linux-3.16-rc1.tar.xz/csd_deg_dscv/2620/dscv_tempdir/dscv/ri/43_1a/drivers/usb/dwc3/gadget.o.c.prepared";   428                 descriptor___0.format = "Command Complete --> %d\n";   428                 descriptor___0.lineno = 429U;   428                 descriptor___0.flags = 1U;   428                 const struct device *__CPAchecker_TMP_1 = (const struct device *)(dwc->dev);   428                 __dynamic_dev_dbg(&descriptor___0, __CPAchecker_TMP_1, "Command Complete --> %d\n", (reg >> 15) & 1U) { /* Function call is skipped due to function is undefined */}                     }  109               dep->flags = (dep->flags) | 16U;   110               int __CPAchecker_TMP_4 = (int)(dep->number);                     {    99                 u32 res_id;                       {    28                   unsigned int tmp;                         {    58                     unsigned int ret;    56                     Ignored inline assembler code   56                     return ret;;                         }   33                   return tmp;;                       }  103                 return (res_id >> 16) & 127U;;                     }  110               dep->resource_index = (u8 )tmp___1;   113               dwc->ep0_next_event = 1;                   }  299             __ret_warn_on = ret < 0;                 } 3180     goto ldv_32990;  3455     ldv_32990:;  3456     ldv_33014:;  3074     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  3076     goto ldv_33013;  3075     ldv_33013:;  3077     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  3077     switch (tmp) 3223     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {         }  804       struct dwc3_request *req;   805       struct dwc3_ep *dep;   806       const struct usb_ep *__mptr;   807       struct dwc3 *dwc;   808       void *tmp;   806       __mptr = (const struct usb_ep *)ep;   806       dep = (struct dwc3_ep *)__mptr;   807       dwc = dep->dwc;             {           }              {             }}  |              Source code         
     1 #ifndef _ASM_X86_IO_H
    2 #define _ASM_X86_IO_H
    3 
    4 /*
    5  * This file contains the definitions for the x86 IO instructions
    6  * inb/inw/inl/outb/outw/outl and the "string versions" of the same
    7  * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
    8  * versions of the single-IO instructions (inb_p/inw_p/..).
    9  *
   10  * This file is not meant to be obfuscating: it's just complicated
   11  * to (a) handle it all in a way that makes gcc able to optimize it
   12  * as well as possible and (b) trying to avoid writing the same thing
   13  * over and over again with slight variations and possibly making a
   14  * mistake somewhere.
   15  */
   16 
   17 /*
   18  * Thanks to James van Artsdalen for a better timing-fix than
   19  * the two short jumps: using outb's to a nonexistent port seems
   20  * to guarantee better timings even on fast machines.
   21  *
   22  * On the other hand, I'd like to be sure of a non-existent port:
   23  * I feel a bit unsafe about using 0x80 (should be safe, though)
   24  *
   25  *		Linus
   26  */
   27 
   28  /*
   29   *  Bit simplified and optimized by Jan Hubicka
   30   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
   31   *
   32   *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
   33   *  isa_read[wl] and isa_write[wl] fixed
   34   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   35   */
   36 
   37 #define ARCH_HAS_IOREMAP_WC
   38 
   39 #include <linux/string.h>
   40 #include <linux/compiler.h>
   41 #include <asm/page.h>
   42 #include <asm/early_ioremap.h>
   43 
   44 #define build_mmio_read(name, size, type, reg, barrier) \
   45 static inline type name(const volatile void __iomem *addr) \
   46 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
   47 :"m" (*(volatile type __force *)addr) barrier); return ret; }
   48 
   49 #define build_mmio_write(name, size, type, reg, barrier) \
   50 static inline void name(type val, volatile void __iomem *addr) \
   51 { asm volatile("mov" size " %0,%1": :reg (val), \
   52 "m" (*(volatile type __force *)addr) barrier); }
   53 
   54 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
   55 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
   56 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
   57 
   58 build_mmio_read(__readb, "b", unsigned char, "=q", )
   59 build_mmio_read(__readw, "w", unsigned short, "=r", )
   60 build_mmio_read(__readl, "l", unsigned int, "=r", )
   61 
   62 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
   63 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
   64 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
   65 
   66 build_mmio_write(__writeb, "b", unsigned char, "q", )
   67 build_mmio_write(__writew, "w", unsigned short, "r", )
   68 build_mmio_write(__writel, "l", unsigned int, "r", )
   69 
   70 #define readb_relaxed(a) __readb(a)
   71 #define readw_relaxed(a) __readw(a)
   72 #define readl_relaxed(a) __readl(a)
   73 #define __raw_readb __readb
   74 #define __raw_readw __readw
   75 #define __raw_readl __readl
   76 
   77 #define __raw_writeb __writeb
   78 #define __raw_writew __writew
   79 #define __raw_writel __writel
   80 
   81 #define mmiowb() barrier()
   82 
   83 #ifdef CONFIG_X86_64
   84 
   85 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
   86 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
   87 
   88 #define readq_relaxed(a)	readq(a)
   89 
   90 #define __raw_readq(a)		readq(a)
   91 #define __raw_writeq(val, addr)	writeq(val, addr)
   92 
   93 /* Let people know that we have them */
   94 #define readq			readq
   95 #define writeq			writeq
   96 
   97 #endif
   98 
   99 /**
  100  *	virt_to_phys	-	map virtual addresses to physical
  101  *	@address: address to remap
  102  *
  103  *	The returned physical address is the physical (CPU) mapping for
  104  *	the memory address given. It is only valid to use this function on
  105  *	addresses directly mapped or allocated via kmalloc.
  106  *
  107  *	This function does not give bus mappings for DMA transfers. In
  108  *	almost all conceivable cases a device driver should not be using
  109  *	this function
  110  */
  111 
  112 static inline phys_addr_t virt_to_phys(volatile void *address)
  113 {
  114 	return __pa(address);
  115 }
  116 
  117 /**
  118  *	phys_to_virt	-	map physical address to virtual
  119  *	@address: address to remap
  120  *
  121  *	The returned virtual address is a current CPU mapping for
  122  *	the memory address given. It is only valid to use this function on
  123  *	addresses that have a kernel mapping
  124  *
  125  *	This function does not handle bus mappings for DMA transfers. In
  126  *	almost all conceivable cases a device driver should not be using
  127  *	this function
  128  */
  129 
  130 static inline void *phys_to_virt(phys_addr_t address)
  131 {
  132 	return __va(address);
  133 }
  134 
  135 /*
  136  * Change "struct page" to physical address.
  137  */
  138 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
  139 
  140 /*
  141  * ISA I/O bus memory addresses are 1:1 with the physical address.
  142  * However, we truncate the address to unsigned int to avoid undesirable
  143  * promitions in legacy drivers.
  144  */
  145 static inline unsigned int isa_virt_to_bus(volatile void *address)
  146 {
  147 	return (unsigned int)virt_to_phys(address);
  148 }
  149 #define isa_page_to_bus(page)	((unsigned int)page_to_phys(page))
  150 #define isa_bus_to_virt		phys_to_virt
  151 
  152 /*
  153  * However PCI ones are not necessarily 1:1 and therefore these interfaces
  154  * are forbidden in portable PCI drivers.
  155  *
  156  * Allow them on x86 for legacy drivers, though.
  157  */
  158 #define virt_to_bus virt_to_phys
  159 #define bus_to_virt phys_to_virt
  160 
  161 /**
  162  * ioremap     -   map bus memory into CPU space
  163  * @offset:    bus address of the memory
  164  * @size:      size of the resource to map
  165  *
  166  * ioremap performs a platform specific sequence of operations to
  167  * make bus memory CPU accessible via the readb/readw/readl/writeb/
  168  * writew/writel functions and the other mmio helpers. The returned
  169  * address is not guaranteed to be usable directly as a virtual
  170  * address.
  171  *
  172  * If the area you are trying to map is a PCI BAR you should have a
  173  * look at pci_iomap().
  174  */
  175 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
  176 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
  177 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
  178 				unsigned long prot_val);
  179 
  180 /*
  181  * The default ioremap() behavior is non-cached:
  182  */
  183 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
  184 {
  185 	return ioremap_nocache(offset, size);
  186 }
  187 
  188 extern void iounmap(volatile void __iomem *addr);
  189 
  190 extern void set_iounmap_nonlazy(void);
  191 
  192 #ifdef __KERNEL__
  193 
  194 #include <asm-generic/iomap.h>
  195 
  196 #include <linux/vmalloc.h>
  197 
  198 /*
  199  * Convert a virtual cached pointer to an uncached pointer
  200  */
  201 #define xlate_dev_kmem_ptr(p)	p
  202 
  203 static inline void
  204 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
  205 {
  206 	memset((void __force *)addr, val, count);
  207 }
  208 
  209 static inline void
  210 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
  211 {
  212 	memcpy(dst, (const void __force *)src, count);
  213 }
  214 
  215 static inline void
  216 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
  217 {
  218 	memcpy((void __force *)dst, src, count);
  219 }
  220 
  221 /*
  222  * ISA space is 'always mapped' on a typical x86 system, no need to
  223  * explicitly ioremap() it. The fact that the ISA IO space is mapped
  224  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
  225  * are physical addresses. The following constant pointer can be
  226  * used as the IO-area pointer (it can be iounmapped as well, so the
  227  * analogy with PCI is quite large):
  228  */
  229 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
  230 
  231 /*
  232  *	Cache management
  233  *
  234  *	This needed for two cases
  235  *	1. Out of order aware processors
  236  *	2. Accidentally out of order processors (PPro errata #51)
  237  */
  238 
  239 static inline void flush_write_buffers(void)
  240 {
  241 #if defined(CONFIG_X86_PPRO_FENCE)
  242 	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
  243 #endif
  244 }
  245 
  246 #endif /* __KERNEL__ */
  247 
  248 extern void native_io_delay(void);
  249 
  250 extern int io_delay_type;
  251 extern void io_delay_init(void);
  252 
  253 #if defined(CONFIG_PARAVIRT)
  254 #include <asm/paravirt.h>
  255 #else
  256 
  257 static inline void slow_down_io(void)
  258 {
  259 	native_io_delay();
  260 #ifdef REALLY_SLOW_IO
  261 	native_io_delay();
  262 	native_io_delay();
  263 	native_io_delay();
  264 #endif
  265 }
  266 
  267 #endif
  268 
  269 #define BUILDIO(bwl, bw, type)						\
  270 static inline void out##bwl(unsigned type value, int port)		\
  271 {									\
  272 	asm volatile("out" #bwl " %" #bw "0, %w1"			\
  273 		     : : "a"(value), "Nd"(port));			\
  274 }									\
  275 									\
  276 static inline unsigned type in##bwl(int port)				\
  277 {									\
  278 	unsigned type value;						\
  279 	asm volatile("in" #bwl " %w1, %" #bw "0"			\
  280 		     : "=a"(value) : "Nd"(port));			\
  281 	return value;							\
  282 }									\
  283 									\
  284 static inline void out##bwl##_p(unsigned type value, int port)		\
  285 {									\
  286 	out##bwl(value, port);						\
  287 	slow_down_io();							\
  288 }									\
  289 									\
  290 static inline unsigned type in##bwl##_p(int port)			\
  291 {									\
  292 	unsigned type value = in##bwl(port);				\
  293 	slow_down_io();							\
  294 	return value;							\
  295 }									\
  296 									\
  297 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
  298 {									\
  299 	asm volatile("rep; outs" #bwl					\
  300 		     : "+S"(addr), "+c"(count) : "d"(port));		\
  301 }									\
  302 									\
  303 static inline void ins##bwl(int port, void *addr, unsigned long count)	\
  304 {									\
  305 	asm volatile("rep; ins" #bwl					\
  306 		     : "+D"(addr), "+c"(count) : "d"(port));		\
  307 }
  308 
  309 BUILDIO(b, b, char)
  310 BUILDIO(w, w, short)
  311 BUILDIO(l, , int)
  312 
  313 extern void *xlate_dev_mem_ptr(unsigned long phys);
  314 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
  315 
  316 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  317 				unsigned long prot_val);
  318 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
  319 
  320 extern bool is_early_ioremap_ptep(pte_t *ptep);
  321 
  322 #ifdef CONFIG_XEN
  323 #include <xen/xen.h>
  324 struct bio_vec;
  325 
  326 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
  327 				      const struct bio_vec *vec2);
  328 
  329 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
  330 	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
  331 	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
  332 #endif	/* CONFIG_XEN */
  333 
  334 #define IO_SPACE_LIMIT 0xffff
  335 
  336 #ifdef CONFIG_MTRR
  337 extern int __must_check arch_phys_wc_add(unsigned long base,
  338 					 unsigned long size);
  339 extern void arch_phys_wc_del(int handle);
  340 #define arch_phys_wc_add arch_phys_wc_add
  341 #endif
  342 
  343 #endif /* _ASM_X86_IO_H */                 1 
    2 extern void ldv_spin_lock(void);
    3 extern void ldv_spin_unlock(void);
    4 extern int ldv_spin_trylock(void);
    5 
    6 #include <linux/kernel.h>
    7 #include <linux/module.h>
    8 #include <linux/slab.h>
    9 
   10 extern void *ldv_undefined_pointer(void);
   11 extern void ldv_check_alloc_flags(gfp_t flags);
   12 extern void ldv_check_alloc_nonatomic(void);
   13 /* Returns an arbitrary page in addition to checking flags */
   14 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   15 
   16 /**
   17  * core.c - DesignWare USB3 DRD Controller Core file
   18  *
   19  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
   20  *
   21  * Authors: Felipe Balbi <balbi@ti.com>,
   22  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   23  *
   24  * This program is free software: you can redistribute it and/or modify
   25  * it under the terms of the GNU General Public License version 2  of
   26  * the License as published by the Free Software Foundation.
   27  *
   28  * This program is distributed in the hope that it will be useful,
   29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   31  * GNU General Public License for more details.
   32  *
   33  * You should have received a copy of the GNU General Public License
   34  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
   35  */
   36 
   37 #include <linux/module.h>
   38 #include <linux/kernel.h>
   39 #include <linux/slab.h>
   40 #include <linux/spinlock.h>
   41 #include <linux/platform_device.h>
   42 #include <linux/pm_runtime.h>
   43 #include <linux/interrupt.h>
   44 #include <linux/ioport.h>
   45 #include <linux/io.h>
   46 #include <linux/list.h>
   47 #include <linux/delay.h>
   48 #include <linux/dma-mapping.h>
   49 #include <linux/of.h>
   50 
   51 #include <linux/usb/ch9.h>
   52 #include <linux/usb/gadget.h>
   53 #include <linux/usb/of.h>
   54 #include <linux/usb/otg.h>
   55 
   56 #include "platform_data.h"
   57 #include "core.h"
   58 #include "gadget.h"
   59 #include "io.h"
   60 
   61 #include "debug.h"
   62 
   63 /* -------------------------------------------------------------------------- */
   64 
   65 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
   66 {
   67 	u32 reg;
   68 
   69 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
   70 	reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
   71 	reg |= DWC3_GCTL_PRTCAPDIR(mode);
   72 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
   73 }
   74 
   75 /**
   76  * dwc3_core_soft_reset - Issues core soft reset and PHY reset
   77  * @dwc: pointer to our context structure
   78  */
   79 static int dwc3_core_soft_reset(struct dwc3 *dwc)
   80 {
   81 	u32		reg;
   82 	int		ret;
   83 
   84 	/* Before Resetting PHY, put Core in Reset */
   85 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
   86 	reg |= DWC3_GCTL_CORESOFTRESET;
   87 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
   88 
   89 	/* Assert USB3 PHY reset */
   90 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
   91 	reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
   92 	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
   93 
   94 	/* Assert USB2 PHY reset */
   95 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
   96 	reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
   97 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
   98 
   99 	usb_phy_init(dwc->usb2_phy);
  100 	usb_phy_init(dwc->usb3_phy);
  101 	ret = phy_init(dwc->usb2_generic_phy);
  102 	if (ret < 0)
  103 		return ret;
  104 
  105 	ret = phy_init(dwc->usb3_generic_phy);
  106 	if (ret < 0) {
  107 		phy_exit(dwc->usb2_generic_phy);
  108 		return ret;
  109 	}
  110 	mdelay(100);
  111 
  112 	/* Clear USB3 PHY reset */
  113 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
  114 	reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
  115 	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
  116 
  117 	/* Clear USB2 PHY reset */
  118 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
  119 	reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
  120 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
  121 
  122 	mdelay(100);
  123 
  124 	/* After PHYs are stable we can take Core out of reset state */
  125 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
  126 	reg &= ~DWC3_GCTL_CORESOFTRESET;
  127 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
  128 
  129 	return 0;
  130 }
  131 
  132 /**
  133  * dwc3_free_one_event_buffer - Frees one event buffer
  134  * @dwc: Pointer to our controller context structure
  135  * @evt: Pointer to event buffer to be freed
  136  */
  137 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
  138 		struct dwc3_event_buffer *evt)
  139 {
  140 	dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
  141 }
  142 
  143 /**
  144  * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
  145  * @dwc: Pointer to our controller context structure
  146  * @length: size of the event buffer
  147  *
  148  * Returns a pointer to the allocated event buffer structure on success
  149  * otherwise ERR_PTR(errno).
  150  */
  151 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
  152 		unsigned length)
  153 {
  154 	struct dwc3_event_buffer	*evt;
  155 
  156 	evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
  157 	if (!evt)
  158 		return ERR_PTR(-ENOMEM);
  159 
  160 	evt->dwc	= dwc;
  161 	evt->length	= length;
  162 	evt->buf	= dma_alloc_coherent(dwc->dev, length,
  163 			&evt->dma, GFP_KERNEL);
  164 	if (!evt->buf)
  165 		return ERR_PTR(-ENOMEM);
  166 
  167 	return evt;
  168 }
  169 
  170 /**
  171  * dwc3_free_event_buffers - frees all allocated event buffers
  172  * @dwc: Pointer to our controller context structure
  173  */
  174 static void dwc3_free_event_buffers(struct dwc3 *dwc)
  175 {
  176 	struct dwc3_event_buffer	*evt;
  177 	int i;
  178 
  179 	for (i = 0; i < dwc->num_event_buffers; i++) {
  180 		evt = dwc->ev_buffs[i];
  181 		if (evt)
  182 			dwc3_free_one_event_buffer(dwc, evt);
  183 	}
  184 }
  185 
  186 /**
  187  * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
  188  * @dwc: pointer to our controller context structure
  189  * @length: size of event buffer
  190  *
  191  * Returns 0 on success otherwise negative errno. In the error case, dwc
  192  * may contain some buffers allocated but not all which were requested.
  193  */
  194 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
  195 {
  196 	int			num;
  197 	int			i;
  198 
  199 	num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
  200 	dwc->num_event_buffers = num;
  201 
  202 	dwc->ev_buffs = devm_kzalloc(dwc->dev, sizeof(*dwc->ev_buffs) * num,
  203 			GFP_KERNEL);
  204 	if (!dwc->ev_buffs) {
  205 		dev_err(dwc->dev, "can't allocate event buffers array\n");
  206 		return -ENOMEM;
  207 	}
  208 
  209 	for (i = 0; i < num; i++) {
  210 		struct dwc3_event_buffer	*evt;
  211 
  212 		evt = dwc3_alloc_one_event_buffer(dwc, length);
  213 		if (IS_ERR(evt)) {
  214 			dev_err(dwc->dev, "can't allocate event buffer\n");
  215 			return PTR_ERR(evt);
  216 		}
  217 		dwc->ev_buffs[i] = evt;
  218 	}
  219 
  220 	return 0;
  221 }
  222 
  223 /**
  224  * dwc3_event_buffers_setup - setup our allocated event buffers
  225  * @dwc: pointer to our controller context structure
  226  *
  227  * Returns 0 on success otherwise negative errno.
  228  */
  229 static int dwc3_event_buffers_setup(struct dwc3 *dwc)
  230 {
  231 	struct dwc3_event_buffer	*evt;
  232 	int				n;
  233 
  234 	for (n = 0; n < dwc->num_event_buffers; n++) {
  235 		evt = dwc->ev_buffs[n];
  236 		dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
  237 				evt->buf, (unsigned long long) evt->dma,
  238 				evt->length);
  239 
  240 		evt->lpos = 0;
  241 
  242 		dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
  243 				lower_32_bits(evt->dma));
  244 		dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
  245 				upper_32_bits(evt->dma));
  246 		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
  247 				DWC3_GEVNTSIZ_SIZE(evt->length));
  248 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
  249 	}
  250 
  251 	return 0;
  252 }
  253 
  254 static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
  255 {
  256 	struct dwc3_event_buffer	*evt;
  257 	int				n;
  258 
  259 	for (n = 0; n < dwc->num_event_buffers; n++) {
  260 		evt = dwc->ev_buffs[n];
  261 
  262 		evt->lpos = 0;
  263 
  264 		dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
  265 		dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
  266 		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), DWC3_GEVNTSIZ_INTMASK
  267 				| DWC3_GEVNTSIZ_SIZE(0));
  268 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
  269 	}
  270 }
  271 
  272 static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
  273 {
  274 	if (!dwc->has_hibernation)
  275 		return 0;
  276 
  277 	if (!dwc->nr_scratch)
  278 		return 0;
  279 
  280 	dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
  281 			DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
  282 	if (!dwc->scratchbuf)
  283 		return -ENOMEM;
  284 
  285 	return 0;
  286 }
  287 
  288 static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
  289 {
  290 	dma_addr_t scratch_addr;
  291 	u32 param;
  292 	int ret;
  293 
  294 	if (!dwc->has_hibernation)
  295 		return 0;
  296 
  297 	if (!dwc->nr_scratch)
  298 		return 0;
  299 
  300 	 /* should never fall here */
  301 	if (!WARN_ON(dwc->scratchbuf))
  302 		return 0;
  303 
  304 	scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
  305 			dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
  306 			DMA_BIDIRECTIONAL);
  307 	if (dma_mapping_error(dwc->dev, scratch_addr)) {
  308 		dev_err(dwc->dev, "failed to map scratch buffer\n");
  309 		ret = -EFAULT;
  310 		goto err0;
  311 	}
  312 
  313 	dwc->scratch_addr = scratch_addr;
  314 
  315 	param = lower_32_bits(scratch_addr);
  316 
  317 	ret = dwc3_send_gadget_generic_command(dwc,
  318 			DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
  319 	if (ret < 0)
  320 		goto err1;
  321 
  322 	param = upper_32_bits(scratch_addr);
  323 
  324 	ret = dwc3_send_gadget_generic_command(dwc,
  325 			DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
  326 	if (ret < 0)
  327 		goto err1;
  328 
  329 	return 0;
  330 
  331 err1:
  332 	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
  333 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
  334 
  335 err0:
  336 	return ret;
  337 }
  338 
  339 static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
  340 {
  341 	if (!dwc->has_hibernation)
  342 		return;
  343 
  344 	if (!dwc->nr_scratch)
  345 		return;
  346 
  347 	 /* should never fall here */
  348 	if (!WARN_ON(dwc->scratchbuf))
  349 		return;
  350 
  351 	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
  352 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
  353 	kfree(dwc->scratchbuf);
  354 }
  355 
  356 static void dwc3_core_num_eps(struct dwc3 *dwc)
  357 {
  358 	struct dwc3_hwparams	*parms = &dwc->hwparams;
  359 
  360 	dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
  361 	dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
  362 
  363 	dev_vdbg(dwc->dev, "found %d IN and %d OUT endpoints\n",
  364 			dwc->num_in_eps, dwc->num_out_eps);
  365 }
  366 
  367 static void dwc3_cache_hwparams(struct dwc3 *dwc)
  368 {
  369 	struct dwc3_hwparams	*parms = &dwc->hwparams;
  370 
  371 	parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
  372 	parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
  373 	parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
  374 	parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
  375 	parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
  376 	parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
  377 	parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
  378 	parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
  379 	parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
  380 }
  381 
  382 /**
  383  * dwc3_core_init - Low-level initialization of DWC3 Core
  384  * @dwc: Pointer to our controller context structure
  385  *
  386  * Returns 0 on success otherwise negative errno.
  387  */
  388 static int dwc3_core_init(struct dwc3 *dwc)
  389 {
  390 	unsigned long		timeout;
  391 	u32			hwparams4 = dwc->hwparams.hwparams4;
  392 	u32			reg;
  393 	int			ret;
  394 
  395 	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
  396 	/* This should read as U3 followed by revision number */
  397 	if ((reg & DWC3_GSNPSID_MASK) != 0x55330000) {
  398 		dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
  399 		ret = -ENODEV;
  400 		goto err0;
  401 	}
  402 	dwc->revision = reg;
  403 
  404 	/* issue device SoftReset too */
  405 	timeout = jiffies + msecs_to_jiffies(500);
  406 	dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
  407 	do {
  408 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  409 		if (!(reg & DWC3_DCTL_CSFTRST))
  410 			break;
  411 
  412 		if (time_after(jiffies, timeout)) {
  413 			dev_err(dwc->dev, "Reset Timed Out\n");
  414 			ret = -ETIMEDOUT;
  415 			goto err0;
  416 		}
  417 
  418 		cpu_relax();
  419 	} while (true);
  420 
  421 	ret = dwc3_core_soft_reset(dwc);
  422 	if (ret)
  423 		goto err0;
  424 
  425 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
  426 	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
  427 	reg &= ~DWC3_GCTL_DISSCRAMBLE;
  428 
  429 	switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
  430 	case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
  431 		/**
  432 		 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
  433 		 * issue which would cause xHCI compliance tests to fail.
  434 		 *
  435 		 * Because of that we cannot enable clock gating on such
  436 		 * configurations.
  437 		 *
  438 		 * Refers to:
  439 		 *
  440 		 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
  441 		 * SOF/ITP Mode Used
  442 		 */
  443 		if ((dwc->dr_mode == USB_DR_MODE_HOST ||
  444 				dwc->dr_mode == USB_DR_MODE_OTG) &&
  445 				(dwc->revision >= DWC3_REVISION_210A &&
  446 				dwc->revision <= DWC3_REVISION_250A))
  447 			reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
  448 		else
  449 			reg &= ~DWC3_GCTL_DSBLCLKGTNG;
  450 		break;
  451 	case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
  452 		/* enable hibernation here */
  453 		dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
  454 		break;
  455 	default:
  456 		dev_dbg(dwc->dev, "No power optimization available\n");
  457 	}
  458 
  459 	/*
  460 	 * WORKAROUND: DWC3 revisions <1.90a have a bug
  461 	 * where the device can fail to connect at SuperSpeed
  462 	 * and falls back to high-speed mode which causes
  463 	 * the device to enter a Connect/Disconnect loop
  464 	 */
  465 	if (dwc->revision < DWC3_REVISION_190A)
  466 		reg |= DWC3_GCTL_U2RSTECN;
  467 
  468 	dwc3_core_num_eps(dwc);
  469 
  470 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
  471 
  472 	ret = dwc3_alloc_scratch_buffers(dwc);
  473 	if (ret)
  474 		goto err1;
  475 
  476 	ret = dwc3_setup_scratch_buffers(dwc);
  477 	if (ret)
  478 		goto err2;
  479 
  480 	return 0;
  481 
  482 err2:
  483 	dwc3_free_scratch_buffers(dwc);
  484 
  485 err1:
  486 	usb_phy_shutdown(dwc->usb2_phy);
  487 	usb_phy_shutdown(dwc->usb3_phy);
  488 	phy_exit(dwc->usb2_generic_phy);
  489 	phy_exit(dwc->usb3_generic_phy);
  490 
  491 err0:
  492 	return ret;
  493 }
  494 
  495 static void dwc3_core_exit(struct dwc3 *dwc)
  496 {
  497 	dwc3_free_scratch_buffers(dwc);
  498 	usb_phy_shutdown(dwc->usb2_phy);
  499 	usb_phy_shutdown(dwc->usb3_phy);
  500 	phy_exit(dwc->usb2_generic_phy);
  501 	phy_exit(dwc->usb3_generic_phy);
  502 }
  503 
  504 static int dwc3_core_get_phy(struct dwc3 *dwc)
  505 {
  506 	struct device		*dev = dwc->dev;
  507 	struct device_node	*node = dev->of_node;
  508 	int ret;
  509 
  510 	if (node) {
  511 		dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
  512 		dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
  513 	} else {
  514 		dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
  515 		dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
  516 	}
  517 
  518 	if (IS_ERR(dwc->usb2_phy)) {
  519 		ret = PTR_ERR(dwc->usb2_phy);
  520 		if (ret == -ENXIO || ret == -ENODEV) {
  521 			dwc->usb2_phy = NULL;
  522 		} else if (ret == -EPROBE_DEFER) {
  523 			return ret;
  524 		} else {
  525 			dev_err(dev, "no usb2 phy configured\n");
  526 			return ret;
  527 		}
  528 	}
  529 
  530 	if (IS_ERR(dwc->usb3_phy)) {
  531 		ret = PTR_ERR(dwc->usb3_phy);
  532 		if (ret == -ENXIO || ret == -ENODEV) {
  533 			dwc->usb3_phy = NULL;
  534 		} else if (ret == -EPROBE_DEFER) {
  535 			return ret;
  536 		} else {
  537 			dev_err(dev, "no usb3 phy configured\n");
  538 			return ret;
  539 		}
  540 	}
  541 
  542 	dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
  543 	if (IS_ERR(dwc->usb2_generic_phy)) {
  544 		ret = PTR_ERR(dwc->usb2_generic_phy);
  545 		if (ret == -ENOSYS || ret == -ENODEV) {
  546 			dwc->usb2_generic_phy = NULL;
  547 		} else if (ret == -EPROBE_DEFER) {
  548 			return ret;
  549 		} else {
  550 			dev_err(dev, "no usb2 phy configured\n");
  551 			return ret;
  552 		}
  553 	}
  554 
  555 	dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
  556 	if (IS_ERR(dwc->usb3_generic_phy)) {
  557 		ret = PTR_ERR(dwc->usb3_generic_phy);
  558 		if (ret == -ENOSYS || ret == -ENODEV) {
  559 			dwc->usb3_generic_phy = NULL;
  560 		} else if (ret == -EPROBE_DEFER) {
  561 			return ret;
  562 		} else {
  563 			dev_err(dev, "no usb3 phy configured\n");
  564 			return ret;
  565 		}
  566 	}
  567 
  568 	return 0;
  569 }
  570 
  571 static int dwc3_core_init_mode(struct dwc3 *dwc)
  572 {
  573 	struct device *dev = dwc->dev;
  574 	int ret;
  575 
  576 	switch (dwc->dr_mode) {
  577 	case USB_DR_MODE_PERIPHERAL:
  578 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
  579 		ret = dwc3_gadget_init(dwc);
  580 		if (ret) {
  581 			dev_err(dev, "failed to initialize gadget\n");
  582 			return ret;
  583 		}
  584 		break;
  585 	case USB_DR_MODE_HOST:
  586 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
  587 		ret = dwc3_host_init(dwc);
  588 		if (ret) {
  589 			dev_err(dev, "failed to initialize host\n");
  590 			return ret;
  591 		}
  592 		break;
  593 	case USB_DR_MODE_OTG:
  594 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
  595 		ret = dwc3_host_init(dwc);
  596 		if (ret) {
  597 			dev_err(dev, "failed to initialize host\n");
  598 			return ret;
  599 		}
  600 
  601 		ret = dwc3_gadget_init(dwc);
  602 		if (ret) {
  603 			dev_err(dev, "failed to initialize gadget\n");
  604 			return ret;
  605 		}
  606 		break;
  607 	default:
  608 		dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
  609 		return -EINVAL;
  610 	}
  611 
  612 	return 0;
  613 }
  614 
  615 static void dwc3_core_exit_mode(struct dwc3 *dwc)
  616 {
  617 	switch (dwc->dr_mode) {
  618 	case USB_DR_MODE_PERIPHERAL:
  619 		dwc3_gadget_exit(dwc);
  620 		break;
  621 	case USB_DR_MODE_HOST:
  622 		dwc3_host_exit(dwc);
  623 		break;
  624 	case USB_DR_MODE_OTG:
  625 		dwc3_host_exit(dwc);
  626 		dwc3_gadget_exit(dwc);
  627 		break;
  628 	default:
  629 		/* do nothing */
  630 		break;
  631 	}
  632 }
  633 
  634 #define DWC3_ALIGN_MASK		(16 - 1)
  635 
  636 static int dwc3_probe(struct platform_device *pdev)
  637 {
  638 	struct device		*dev = &pdev->dev;
  639 	struct dwc3_platform_data *pdata = dev_get_platdata(dev);
  640 	struct device_node	*node = dev->of_node;
  641 	struct resource		*res;
  642 	struct dwc3		*dwc;
  643 
  644 	int			ret;
  645 
  646 	void __iomem		*regs;
  647 	void			*mem;
  648 
  649 	mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
  650 	if (!mem) {
  651 		dev_err(dev, "not enough memory\n");
  652 		return -ENOMEM;
  653 	}
  654 	dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
  655 	dwc->mem = mem;
  656 	dwc->dev = dev;
  657 
  658 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  659 	if (!res) {
  660 		dev_err(dev, "missing IRQ\n");
  661 		return -ENODEV;
  662 	}
  663 	dwc->xhci_resources[1].start = res->start;
  664 	dwc->xhci_resources[1].end = res->end;
  665 	dwc->xhci_resources[1].flags = res->flags;
  666 	dwc->xhci_resources[1].name = res->name;
  667 
  668 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  669 	if (!res) {
  670 		dev_err(dev, "missing memory resource\n");
  671 		return -ENODEV;
  672 	}
  673 
  674 	if (node) {
  675 		dwc->maximum_speed = of_usb_get_maximum_speed(node);
  676 
  677 		dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
  678 		dwc->dr_mode = of_usb_get_dr_mode(node);
  679 	} else if (pdata) {
  680 		dwc->maximum_speed = pdata->maximum_speed;
  681 
  682 		dwc->needs_fifo_resize = pdata->tx_fifo_resize;
  683 		dwc->dr_mode = pdata->dr_mode;
  684 	}
  685 
  686 	/* default to superspeed if no maximum_speed passed */
  687 	if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
  688 		dwc->maximum_speed = USB_SPEED_SUPER;
  689 
  690 	ret = dwc3_core_get_phy(dwc);
  691 	if (ret)
  692 		return ret;
  693 
  694 	dwc->xhci_resources[0].start = res->start;
  695 	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
  696 					DWC3_XHCI_REGS_END;
  697 	dwc->xhci_resources[0].flags = res->flags;
  698 	dwc->xhci_resources[0].name = res->name;
  699 
  700 	res->start += DWC3_GLOBALS_REGS_START;
  701 
  702 	/*
  703 	 * Request memory region but exclude xHCI regs,
  704 	 * since it will be requested by the xhci-plat driver.
  705 	 */
  706 	regs = devm_ioremap_resource(dev, res);
  707 	if (IS_ERR(regs))
  708 		return PTR_ERR(regs);
  709 
  710 	spin_lock_init(&dwc->lock);
  711 	platform_set_drvdata(pdev, dwc);
  712 
  713 	dwc->regs	= regs;
  714 	dwc->regs_size	= resource_size(res);
  715 
  716 	dev->dma_mask	= dev->parent->dma_mask;
  717 	dev->dma_parms	= dev->parent->dma_parms;
  718 	dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
  719 
  720 	pm_runtime_enable(dev);
  721 	pm_runtime_get_sync(dev);
  722 	pm_runtime_forbid(dev);
  723 
  724 	dwc3_cache_hwparams(dwc);
  725 
  726 	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
  727 	if (ret) {
  728 		dev_err(dwc->dev, "failed to allocate event buffers\n");
  729 		ret = -ENOMEM;
  730 		goto err0;
  731 	}
  732 
  733 	if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
  734 		dwc->dr_mode = USB_DR_MODE_HOST;
  735 	else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
  736 		dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
  737 
  738 	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
  739 		dwc->dr_mode = USB_DR_MODE_OTG;
  740 
  741 	ret = dwc3_core_init(dwc);
  742 	if (ret) {
  743 		dev_err(dev, "failed to initialize core\n");
  744 		goto err0;
  745 	}
  746 
  747 	usb_phy_set_suspend(dwc->usb2_phy, 0);
  748 	usb_phy_set_suspend(dwc->usb3_phy, 0);
  749 	ret = phy_power_on(dwc->usb2_generic_phy);
  750 	if (ret < 0)
  751 		goto err1;
  752 
  753 	ret = phy_power_on(dwc->usb3_generic_phy);
  754 	if (ret < 0)
  755 		goto err_usb2phy_power;
  756 
  757 	ret = dwc3_event_buffers_setup(dwc);
  758 	if (ret) {
  759 		dev_err(dwc->dev, "failed to setup event buffers\n");
  760 		goto err_usb3phy_power;
  761 	}
  762 
  763 	ret = dwc3_core_init_mode(dwc);
  764 	if (ret)
  765 		goto err2;
  766 
  767 	ret = dwc3_debugfs_init(dwc);
  768 	if (ret) {
  769 		dev_err(dev, "failed to initialize debugfs\n");
  770 		goto err3;
  771 	}
  772 
  773 	pm_runtime_allow(dev);
  774 
  775 	return 0;
  776 
  777 err3:
  778 	dwc3_core_exit_mode(dwc);
  779 
  780 err2:
  781 	dwc3_event_buffers_cleanup(dwc);
  782 
  783 err_usb3phy_power:
  784 	phy_power_off(dwc->usb3_generic_phy);
  785 
  786 err_usb2phy_power:
  787 	phy_power_off(dwc->usb2_generic_phy);
  788 
  789 err1:
  790 	usb_phy_set_suspend(dwc->usb2_phy, 1);
  791 	usb_phy_set_suspend(dwc->usb3_phy, 1);
  792 	dwc3_core_exit(dwc);
  793 
  794 err0:
  795 	dwc3_free_event_buffers(dwc);
  796 
  797 	return ret;
  798 }
  799 
  800 static int dwc3_remove(struct platform_device *pdev)
  801 {
  802 	struct dwc3	*dwc = platform_get_drvdata(pdev);
  803 
  804 	usb_phy_set_suspend(dwc->usb2_phy, 1);
  805 	usb_phy_set_suspend(dwc->usb3_phy, 1);
  806 	phy_power_off(dwc->usb2_generic_phy);
  807 	phy_power_off(dwc->usb3_generic_phy);
  808 
  809 	pm_runtime_put_sync(&pdev->dev);
  810 	pm_runtime_disable(&pdev->dev);
  811 
  812 	dwc3_debugfs_exit(dwc);
  813 	dwc3_core_exit_mode(dwc);
  814 	dwc3_event_buffers_cleanup(dwc);
  815 	dwc3_free_event_buffers(dwc);
  816 	dwc3_core_exit(dwc);
  817 
  818 	return 0;
  819 }
  820 
  821 #ifdef CONFIG_PM_SLEEP
  822 static int dwc3_prepare(struct device *dev)
  823 {
  824 	struct dwc3	*dwc = dev_get_drvdata(dev);
  825 	unsigned long	flags;
  826 
  827 	spin_lock_irqsave(&dwc->lock, flags);
  828 
  829 	switch (dwc->dr_mode) {
  830 	case USB_DR_MODE_PERIPHERAL:
  831 	case USB_DR_MODE_OTG:
  832 		dwc3_gadget_prepare(dwc);
  833 		/* FALLTHROUGH */
  834 	case USB_DR_MODE_HOST:
  835 	default:
  836 		dwc3_event_buffers_cleanup(dwc);
  837 		break;
  838 	}
  839 
  840 	spin_unlock_irqrestore(&dwc->lock, flags);
  841 
  842 	return 0;
  843 }
  844 
  845 static void dwc3_complete(struct device *dev)
  846 {
  847 	struct dwc3	*dwc = dev_get_drvdata(dev);
  848 	unsigned long	flags;
  849 
  850 	spin_lock_irqsave(&dwc->lock, flags);
  851 
  852 	dwc3_event_buffers_setup(dwc);
  853 	switch (dwc->dr_mode) {
  854 	case USB_DR_MODE_PERIPHERAL:
  855 	case USB_DR_MODE_OTG:
  856 		dwc3_gadget_complete(dwc);
  857 		/* FALLTHROUGH */
  858 	case USB_DR_MODE_HOST:
  859 	default:
  860 		break;
  861 	}
  862 
  863 	spin_unlock_irqrestore(&dwc->lock, flags);
  864 }
  865 
  866 static int dwc3_suspend(struct device *dev)
  867 {
  868 	struct dwc3	*dwc = dev_get_drvdata(dev);
  869 	unsigned long	flags;
  870 
  871 	spin_lock_irqsave(&dwc->lock, flags);
  872 
  873 	switch (dwc->dr_mode) {
  874 	case USB_DR_MODE_PERIPHERAL:
  875 	case USB_DR_MODE_OTG:
  876 		dwc3_gadget_suspend(dwc);
  877 		/* FALLTHROUGH */
  878 	case USB_DR_MODE_HOST:
  879 	default:
  880 		/* do nothing */
  881 		break;
  882 	}
  883 
  884 	dwc->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
  885 	spin_unlock_irqrestore(&dwc->lock, flags);
  886 
  887 	usb_phy_shutdown(dwc->usb3_phy);
  888 	usb_phy_shutdown(dwc->usb2_phy);
  889 	phy_exit(dwc->usb2_generic_phy);
  890 	phy_exit(dwc->usb3_generic_phy);
  891 
  892 	return 0;
  893 }
  894 
  895 static int dwc3_resume(struct device *dev)
  896 {
  897 	struct dwc3	*dwc = dev_get_drvdata(dev);
  898 	unsigned long	flags;
  899 	int		ret;
  900 
  901 	usb_phy_init(dwc->usb3_phy);
  902 	usb_phy_init(dwc->usb2_phy);
  903 	ret = phy_init(dwc->usb2_generic_phy);
  904 	if (ret < 0)
  905 		return ret;
  906 
  907 	ret = phy_init(dwc->usb3_generic_phy);
  908 	if (ret < 0)
  909 		goto err_usb2phy_init;
  910 
  911 	spin_lock_irqsave(&dwc->lock, flags);
  912 
  913 	dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl);
  914 
  915 	switch (dwc->dr_mode) {
  916 	case USB_DR_MODE_PERIPHERAL:
  917 	case USB_DR_MODE_OTG:
  918 		dwc3_gadget_resume(dwc);
  919 		/* FALLTHROUGH */
  920 	case USB_DR_MODE_HOST:
  921 	default:
  922 		/* do nothing */
  923 		break;
  924 	}
  925 
  926 	spin_unlock_irqrestore(&dwc->lock, flags);
  927 
  928 	pm_runtime_disable(dev);
  929 	pm_runtime_set_active(dev);
  930 	pm_runtime_enable(dev);
  931 
  932 	return 0;
  933 
  934 err_usb2phy_init:
  935 	phy_exit(dwc->usb2_generic_phy);
  936 
  937 	return ret;
  938 }
  939 
  940 static const struct dev_pm_ops dwc3_dev_pm_ops = {
  941 	.prepare	= dwc3_prepare,
  942 	.complete	= dwc3_complete,
  943 
  944 	SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
  945 };
  946 
  947 #define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
  948 #else
  949 #define DWC3_PM_OPS	NULL
  950 #endif
  951 
  952 #ifdef CONFIG_OF
  953 static const struct of_device_id of_dwc3_match[] = {
  954 	{
  955 		.compatible = "snps,dwc3"
  956 	},
  957 	{
  958 		.compatible = "synopsys,dwc3"
  959 	},
  960 	{ },
  961 };
  962 MODULE_DEVICE_TABLE(of, of_dwc3_match);
  963 #endif
  964 
  965 static struct platform_driver dwc3_driver = {
  966 	.probe		= dwc3_probe,
  967 	.remove		= dwc3_remove,
  968 	.driver		= {
  969 		.name	= "dwc3",
  970 		.of_match_table	= of_match_ptr(of_dwc3_match),
  971 		.pm	= DWC3_PM_OPS,
  972 	},
  973 };
  974 
  975 module_platform_driver(dwc3_driver);
  976 
  977 MODULE_ALIAS("platform:dwc3");
  978 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
  979 MODULE_LICENSE("GPL v2");
  980 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
  981 
  982 
  983 
  984 
  985 
  986 /* LDV_COMMENT_BEGIN_MAIN */
  987 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
  988 
  989 /*###########################################################################*/
  990 
  991 /*############## Driver Environment Generator 0.2 output ####################*/
  992 
  993 /*###########################################################################*/
  994 
  995 
  996 
  997 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  998 void ldv_check_final_state(void);
  999 
 1000 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 1001 void ldv_check_return_value(int res);
 1002 
 1003 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 1004 void ldv_check_return_value_probe(int res);
 1005 
 1006 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 1007 void ldv_initialize(void);
 1008 
 1009 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 1010 void ldv_handler_precall(void);
 1011 
 1012 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 1013 int nondet_int(void);
 1014 
 1015 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 1016 int LDV_IN_INTERRUPT;
 1017 
 1018 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 1019 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 1020 
 1021 
 1022 
 1023 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 1024 	/*============================= VARIABLE DECLARATION PART   =============================*/
 1025 	/** STRUCT: struct type: dev_pm_ops, struct name: dwc3_dev_pm_ops **/
 1026 	/* content: static int dwc3_prepare(struct device *dev)*/
 1027 	/* LDV_COMMENT_BEGIN_PREP */
 1028 	#define DWC3_ALIGN_MASK		(16 - 1)
 1029 	#ifdef CONFIG_PM_SLEEP
 1030 	/* LDV_COMMENT_END_PREP */
 1031 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_prepare" */
 1032 	struct device * var_group1;
 1033 	/* LDV_COMMENT_BEGIN_PREP */
 1034 	#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1035 	#else
 1036 	#define DWC3_PM_OPS	NULL
 1037 	#endif
 1038 	#ifdef CONFIG_OF
 1039 	#endif
 1040 	/* LDV_COMMENT_END_PREP */
 1041 	/* content: static void dwc3_complete(struct device *dev)*/
 1042 	/* LDV_COMMENT_BEGIN_PREP */
 1043 	#define DWC3_ALIGN_MASK		(16 - 1)
 1044 	#ifdef CONFIG_PM_SLEEP
 1045 	/* LDV_COMMENT_END_PREP */
 1046 	/* LDV_COMMENT_BEGIN_PREP */
 1047 	#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1048 	#else
 1049 	#define DWC3_PM_OPS	NULL
 1050 	#endif
 1051 	#ifdef CONFIG_OF
 1052 	#endif
 1053 	/* LDV_COMMENT_END_PREP */
 1054 
 1055 	/** STRUCT: struct type: platform_driver, struct name: dwc3_driver **/
 1056 	/* content: static int dwc3_probe(struct platform_device *pdev)*/
 1057 	/* LDV_COMMENT_BEGIN_PREP */
 1058 	#define DWC3_ALIGN_MASK		(16 - 1)
 1059 	/* LDV_COMMENT_END_PREP */
 1060 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_probe" */
 1061 	struct platform_device * var_group2;
 1062 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "dwc3_probe" */
 1063 	static int res_dwc3_probe_18;
 1064 	/* LDV_COMMENT_BEGIN_PREP */
 1065 	#ifdef CONFIG_PM_SLEEP
 1066 	#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1067 	#else
 1068 	#define DWC3_PM_OPS	NULL
 1069 	#endif
 1070 	#ifdef CONFIG_OF
 1071 	#endif
 1072 	/* LDV_COMMENT_END_PREP */
 1073 	/* content: static int dwc3_remove(struct platform_device *pdev)*/
 1074 	/* LDV_COMMENT_BEGIN_PREP */
 1075 	#define DWC3_ALIGN_MASK		(16 - 1)
 1076 	/* LDV_COMMENT_END_PREP */
 1077 	/* LDV_COMMENT_BEGIN_PREP */
 1078 	#ifdef CONFIG_PM_SLEEP
 1079 	#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1080 	#else
 1081 	#define DWC3_PM_OPS	NULL
 1082 	#endif
 1083 	#ifdef CONFIG_OF
 1084 	#endif
 1085 	/* LDV_COMMENT_END_PREP */
 1086 
 1087 
 1088 
 1089 
 1090 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 1091 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 1092 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 1093 	LDV_IN_INTERRUPT=1;
 1094 
 1095 
 1096 
 1097 
 1098 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 1099 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 1100 	/*============================= FUNCTION CALL SECTION       =============================*/
 1101 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 1102 	ldv_initialize();
 1103 	
 1104 
 1105 	int ldv_s_dwc3_driver_platform_driver = 0;
 1106 
 1107 
 1108 	while(  nondet_int()
 1109 		|| !(ldv_s_dwc3_driver_platform_driver == 0)
 1110 	) {
 1111 
 1112 		switch(nondet_int()) {
 1113 
 1114 			case 0: {
 1115 
 1116 				/** STRUCT: struct type: dev_pm_ops, struct name: dwc3_dev_pm_ops **/
 1117 				
 1118 
 1119 				/* content: static int dwc3_prepare(struct device *dev)*/
 1120 				/* LDV_COMMENT_BEGIN_PREP */
 1121 				#define DWC3_ALIGN_MASK		(16 - 1)
 1122 				#ifdef CONFIG_PM_SLEEP
 1123 				/* LDV_COMMENT_END_PREP */
 1124 				/* LDV_COMMENT_FUNCTION_CALL Function from field "prepare" from driver structure with callbacks "dwc3_dev_pm_ops" */
 1125 				ldv_handler_precall();
 1126 				dwc3_prepare( var_group1);
 1127 				/* LDV_COMMENT_BEGIN_PREP */
 1128 				#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1129 				#else
 1130 				#define DWC3_PM_OPS	NULL
 1131 				#endif
 1132 				#ifdef CONFIG_OF
 1133 				#endif
 1134 				/* LDV_COMMENT_END_PREP */
 1135 				
 1136 
 1137 				
 1138 
 1139 			}
 1140 
 1141 			break;
 1142 			case 1: {
 1143 
 1144 				/** STRUCT: struct type: dev_pm_ops, struct name: dwc3_dev_pm_ops **/
 1145 				
 1146 
 1147 				/* content: static void dwc3_complete(struct device *dev)*/
 1148 				/* LDV_COMMENT_BEGIN_PREP */
 1149 				#define DWC3_ALIGN_MASK		(16 - 1)
 1150 				#ifdef CONFIG_PM_SLEEP
 1151 				/* LDV_COMMENT_END_PREP */
 1152 				/* LDV_COMMENT_FUNCTION_CALL Function from field "complete" from driver structure with callbacks "dwc3_dev_pm_ops" */
 1153 				ldv_handler_precall();
 1154 				dwc3_complete( var_group1);
 1155 				/* LDV_COMMENT_BEGIN_PREP */
 1156 				#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1157 				#else
 1158 				#define DWC3_PM_OPS	NULL
 1159 				#endif
 1160 				#ifdef CONFIG_OF
 1161 				#endif
 1162 				/* LDV_COMMENT_END_PREP */
 1163 				
 1164 
 1165 				
 1166 
 1167 			}
 1168 
 1169 			break;
 1170 			case 2: {
 1171 
 1172 				/** STRUCT: struct type: platform_driver, struct name: dwc3_driver **/
 1173 				if(ldv_s_dwc3_driver_platform_driver==0) {
 1174 
 1175 				/* content: static int dwc3_probe(struct platform_device *pdev)*/
 1176 				/* LDV_COMMENT_BEGIN_PREP */
 1177 				#define DWC3_ALIGN_MASK		(16 - 1)
 1178 				/* LDV_COMMENT_END_PREP */
 1179 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "dwc3_driver". Standart function test for correct return result. */
 1180 				res_dwc3_probe_18 = dwc3_probe( var_group2);
 1181 				 ldv_check_return_value(res_dwc3_probe_18);
 1182 				 ldv_check_return_value_probe(res_dwc3_probe_18);
 1183 				 if(res_dwc3_probe_18) 
 1184 					goto ldv_module_exit;
 1185 				/* LDV_COMMENT_BEGIN_PREP */
 1186 				#ifdef CONFIG_PM_SLEEP
 1187 				#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1188 				#else
 1189 				#define DWC3_PM_OPS	NULL
 1190 				#endif
 1191 				#ifdef CONFIG_OF
 1192 				#endif
 1193 				/* LDV_COMMENT_END_PREP */
 1194 				ldv_s_dwc3_driver_platform_driver++;
 1195 
 1196 				}
 1197 
 1198 			}
 1199 
 1200 			break;
 1201 			case 3: {
 1202 
 1203 				/** STRUCT: struct type: platform_driver, struct name: dwc3_driver **/
 1204 				if(ldv_s_dwc3_driver_platform_driver==1) {
 1205 
 1206 				/* content: static int dwc3_remove(struct platform_device *pdev)*/
 1207 				/* LDV_COMMENT_BEGIN_PREP */
 1208 				#define DWC3_ALIGN_MASK		(16 - 1)
 1209 				/* LDV_COMMENT_END_PREP */
 1210 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "dwc3_driver" */
 1211 				ldv_handler_precall();
 1212 				dwc3_remove( var_group2);
 1213 				/* LDV_COMMENT_BEGIN_PREP */
 1214 				#ifdef CONFIG_PM_SLEEP
 1215 				#define DWC3_PM_OPS	&(dwc3_dev_pm_ops)
 1216 				#else
 1217 				#define DWC3_PM_OPS	NULL
 1218 				#endif
 1219 				#ifdef CONFIG_OF
 1220 				#endif
 1221 				/* LDV_COMMENT_END_PREP */
 1222 				ldv_s_dwc3_driver_platform_driver=0;
 1223 
 1224 				}
 1225 
 1226 			}
 1227 
 1228 			break;
 1229 			default: break;
 1230 
 1231 		}
 1232 
 1233 	}
 1234 
 1235 	ldv_module_exit: 
 1236 
 1237 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1238 	ldv_final: ldv_check_final_state();
 1239 
 1240 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1241 	return;
 1242 
 1243 }
 1244 #endif
 1245 
 1246 /* LDV_COMMENT_END_MAIN */                 1 
    2 extern void ldv_spin_lock(void);
    3 extern void ldv_spin_unlock(void);
    4 extern int ldv_spin_trylock(void);
    5 
    6 #include <linux/kernel.h>
    7 #include <linux/module.h>
    8 #include <linux/slab.h>
    9 
   10 extern void *ldv_undefined_pointer(void);
   11 extern void ldv_check_alloc_flags(gfp_t flags);
   12 extern void ldv_check_alloc_nonatomic(void);
   13 /* Returns an arbitrary page in addition to checking flags */
   14 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   15 /**
   16  * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
   17  *
   18  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
   19  *
   20  * Authors: Felipe Balbi <balbi@ti.com>,
   21  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   22  *
   23  * This program is free software: you can redistribute it and/or modify
   24  * it under the terms of the GNU General Public License version 2  of
   25  * the License as published by the Free Software Foundation.
   26  *
   27  * This program is distributed in the hope that it will be useful,
   28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   30  * GNU General Public License for more details.
   31  */
   32 
   33 #include <linux/kernel.h>
   34 #include <linux/slab.h>
   35 #include <linux/spinlock.h>
   36 #include <linux/platform_device.h>
   37 #include <linux/pm_runtime.h>
   38 #include <linux/interrupt.h>
   39 #include <linux/io.h>
   40 #include <linux/list.h>
   41 #include <linux/dma-mapping.h>
   42 
   43 #include <linux/usb/ch9.h>
   44 #include <linux/usb/gadget.h>
   45 #include <linux/usb/composite.h>
   46 
   47 #include "core.h"
   48 #include "gadget.h"
   49 #include "io.h"
   50 
   51 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
   52 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
   53 		struct dwc3_ep *dep, struct dwc3_request *req);
   54 
   55 static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
   56 {
   57 	switch (state) {
   58 	case EP0_UNCONNECTED:
   59 		return "Unconnected";
   60 	case EP0_SETUP_PHASE:
   61 		return "Setup Phase";
   62 	case EP0_DATA_PHASE:
   63 		return "Data Phase";
   64 	case EP0_STATUS_PHASE:
   65 		return "Status Phase";
   66 	default:
   67 		return "UNKNOWN";
   68 	}
   69 }
   70 
   71 static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
   72 		u32 len, u32 type)
   73 {
   74 	struct dwc3_gadget_ep_cmd_params params;
   75 	struct dwc3_trb			*trb;
   76 	struct dwc3_ep			*dep;
   77 
   78 	int				ret;
   79 
   80 	dep = dwc->eps[epnum];
   81 	if (dep->flags & DWC3_EP_BUSY) {
   82 		dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
   83 		return 0;
   84 	}
   85 
   86 	trb = dwc->ep0_trb;
   87 
   88 	trb->bpl = lower_32_bits(buf_dma);
   89 	trb->bph = upper_32_bits(buf_dma);
   90 	trb->size = len;
   91 	trb->ctrl = type;
   92 
   93 	trb->ctrl |= (DWC3_TRB_CTRL_HWO
   94 			| DWC3_TRB_CTRL_LST
   95 			| DWC3_TRB_CTRL_IOC
   96 			| DWC3_TRB_CTRL_ISP_IMI);
   97 
   98 	memset(¶ms, 0, sizeof(params));
   99 	params.param0 = upper_32_bits(dwc->ep0_trb_addr);
  100 	params.param1 = lower_32_bits(dwc->ep0_trb_addr);
  101 
  102 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  103 			DWC3_DEPCMD_STARTTRANSFER, ¶ms);
  104 	if (ret < 0) {
  105 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
  106 		return ret;
  107 	}
  108 
  109 	dep->flags |= DWC3_EP_BUSY;
  110 	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
  111 			dep->number);
  112 
  113 	dwc->ep0_next_event = DWC3_EP0_COMPLETE;
  114 
  115 	return 0;
  116 }
  117 
  118 static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
  119 		struct dwc3_request *req)
  120 {
  121 	struct dwc3		*dwc = dep->dwc;
  122 
  123 	req->request.actual	= 0;
  124 	req->request.status	= -EINPROGRESS;
  125 	req->epnum		= dep->number;
  126 
  127 	list_add_tail(&req->list, &dep->request_list);
  128 
  129 	/*
  130 	 * Gadget driver might not be quick enough to queue a request
  131 	 * before we get a Transfer Not Ready event on this endpoint.
  132 	 *
  133 	 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
  134 	 * flag is set, it's telling us that as soon as Gadget queues the
  135 	 * required request, we should kick the transfer here because the
  136 	 * IRQ we were waiting for is long gone.
  137 	 */
  138 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
  139 		unsigned	direction;
  140 
  141 		direction = !!(dep->flags & DWC3_EP0_DIR_IN);
  142 
  143 		if (dwc->ep0state != EP0_DATA_PHASE) {
  144 			dev_WARN(dwc->dev, "Unexpected pending request\n");
  145 			return 0;
  146 		}
  147 
  148 		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
  149 
  150 		dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
  151 				DWC3_EP0_DIR_IN);
  152 
  153 		return 0;
  154 	}
  155 
  156 	/*
  157 	 * In case gadget driver asked us to delay the STATUS phase,
  158 	 * handle it here.
  159 	 */
  160 	if (dwc->delayed_status) {
  161 		unsigned	direction;
  162 
  163 		direction = !dwc->ep0_expect_in;
  164 		dwc->delayed_status = false;
  165 		usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);
  166 
  167 		if (dwc->ep0state == EP0_STATUS_PHASE)
  168 			__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
  169 		else
  170 			dev_dbg(dwc->dev, "too early for delayed status\n");
  171 
  172 		return 0;
  173 	}
  174 
  175 	/*
  176 	 * Unfortunately we have uncovered a limitation wrt the Data Phase.
  177 	 *
  178 	 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
  179 	 * come before issueing Start Transfer command, but if we do, we will
  180 	 * miss situations where the host starts another SETUP phase instead of
  181 	 * the DATA phase.  Such cases happen at least on TD.7.6 of the Link
  182 	 * Layer Compliance Suite.
  183 	 *
  184 	 * The problem surfaces due to the fact that in case of back-to-back
  185 	 * SETUP packets there will be no XferNotReady(DATA) generated and we
  186 	 * will be stuck waiting for XferNotReady(DATA) forever.
  187 	 *
  188 	 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
  189 	 * it tells us to start Data Phase right away. It also mentions that if
  190 	 * we receive a SETUP phase instead of the DATA phase, core will issue
  191 	 * XferComplete for the DATA phase, before actually initiating it in
  192 	 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
  193 	 * can only be used to print some debugging logs, as the core expects
  194 	 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
  195 	 * just so it completes right away, without transferring anything and,
  196 	 * only then, we can go back to the SETUP phase.
  197 	 *
  198 	 * Because of this scenario, SNPS decided to change the programming
  199 	 * model of control transfers and support on-demand transfers only for
  200 	 * the STATUS phase. To fix the issue we have now, we will always wait
  201 	 * for gadget driver to queue the DATA phase's struct usb_request, then
  202 	 * start it right away.
  203 	 *
  204 	 * If we're actually in a 2-stage transfer, we will wait for
  205 	 * XferNotReady(STATUS).
  206 	 */
  207 	if (dwc->three_stage_setup) {
  208 		unsigned        direction;
  209 
  210 		direction = dwc->ep0_expect_in;
  211 		dwc->ep0state = EP0_DATA_PHASE;
  212 
  213 		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
  214 
  215 		dep->flags &= ~DWC3_EP0_DIR_IN;
  216 	}
  217 
  218 	return 0;
  219 }
  220 
  221 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
  222 		gfp_t gfp_flags)
  223 {
  224 	struct dwc3_request		*req = to_dwc3_request(request);
  225 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
  226 	struct dwc3			*dwc = dep->dwc;
  227 
  228 	unsigned long			flags;
  229 
  230 	int				ret;
  231 
  232 	spin_lock_irqsave(&dwc->lock, flags);
  233 	if (!dep->endpoint.desc) {
  234 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
  235 				request, dep->name);
  236 		ret = -ESHUTDOWN;
  237 		goto out;
  238 	}
  239 
  240 	/* we share one TRB for ep0/1 */
  241 	if (!list_empty(&dep->request_list)) {
  242 		ret = -EBUSY;
  243 		goto out;
  244 	}
  245 
  246 	dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
  247 			request, dep->name, request->length,
  248 			dwc3_ep0_state_string(dwc->ep0state));
  249 
  250 	ret = __dwc3_gadget_ep0_queue(dep, req);
  251 
  252 out:
  253 	spin_unlock_irqrestore(&dwc->lock, flags);
  254 
  255 	return ret;
  256 }
  257 
  258 static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
  259 {
  260 	struct dwc3_ep		*dep;
  261 
  262 	/* reinitialize physical ep1 */
  263 	dep = dwc->eps[1];
  264 	dep->flags = DWC3_EP_ENABLED;
  265 
  266 	/* stall is always issued on EP0 */
  267 	dep = dwc->eps[0];
  268 	__dwc3_gadget_ep_set_halt(dep, 1);
  269 	dep->flags = DWC3_EP_ENABLED;
  270 	dwc->delayed_status = false;
  271 
  272 	if (!list_empty(&dep->request_list)) {
  273 		struct dwc3_request	*req;
  274 
  275 		req = next_request(&dep->request_list);
  276 		dwc3_gadget_giveback(dep, req, -ECONNRESET);
  277 	}
  278 
  279 	dwc->ep0state = EP0_SETUP_PHASE;
  280 	dwc3_ep0_out_start(dwc);
  281 }
  282 
  283 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
  284 {
  285 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
  286 	struct dwc3			*dwc = dep->dwc;
  287 
  288 	dwc3_ep0_stall_and_restart(dwc);
  289 
  290 	return 0;
  291 }
  292 
  293 void dwc3_ep0_out_start(struct dwc3 *dwc)
  294 {
  295 	int				ret;
  296 
  297 	ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
  298 			DWC3_TRBCTL_CONTROL_SETUP);
  299 	WARN_ON(ret < 0);
  300 }
  301 
  302 static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
  303 {
  304 	struct dwc3_ep		*dep;
  305 	u32			windex = le16_to_cpu(wIndex_le);
  306 	u32			epnum;
  307 
  308 	epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
  309 	if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  310 		epnum |= 1;
  311 
  312 	dep = dwc->eps[epnum];
  313 	if (dep->flags & DWC3_EP_ENABLED)
  314 		return dep;
  315 
  316 	return NULL;
  317 }
  318 
  319 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
  320 {
  321 }
  322 /*
  323  * ch 9.4.5
  324  */
  325 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
  326 		struct usb_ctrlrequest *ctrl)
  327 {
  328 	struct dwc3_ep		*dep;
  329 	u32			recip;
  330 	u32			reg;
  331 	u16			usb_status = 0;
  332 	__le16			*response_pkt;
  333 
  334 	recip = ctrl->bRequestType & USB_RECIP_MASK;
  335 	switch (recip) {
  336 	case USB_RECIP_DEVICE:
  337 		/*
  338 		 * LTM will be set once we know how to set this in HW.
  339 		 */
  340 		usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
  341 
  342 		if (dwc->speed == DWC3_DSTS_SUPERSPEED) {
  343 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  344 			if (reg & DWC3_DCTL_INITU1ENA)
  345 				usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
  346 			if (reg & DWC3_DCTL_INITU2ENA)
  347 				usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
  348 		}
  349 
  350 		break;
  351 
  352 	case USB_RECIP_INTERFACE:
  353 		/*
  354 		 * Function Remote Wake Capable	D0
  355 		 * Function Remote Wakeup	D1
  356 		 */
  357 		break;
  358 
  359 	case USB_RECIP_ENDPOINT:
  360 		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
  361 		if (!dep)
  362 			return -EINVAL;
  363 
  364 		if (dep->flags & DWC3_EP_STALL)
  365 			usb_status = 1 << USB_ENDPOINT_HALT;
  366 		break;
  367 	default:
  368 		return -EINVAL;
  369 	}
  370 
  371 	response_pkt = (__le16 *) dwc->setup_buf;
  372 	*response_pkt = cpu_to_le16(usb_status);
  373 
  374 	dep = dwc->eps[0];
  375 	dwc->ep0_usb_req.dep = dep;
  376 	dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
  377 	dwc->ep0_usb_req.request.buf = dwc->setup_buf;
  378 	dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
  379 
  380 	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  381 }
  382 
  383 static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
  384 		struct usb_ctrlrequest *ctrl, int set)
  385 {
  386 	struct dwc3_ep		*dep;
  387 	u32			recip;
  388 	u32			wValue;
  389 	u32			wIndex;
  390 	u32			reg;
  391 	int			ret;
  392 	enum usb_device_state	state;
  393 
  394 	wValue = le16_to_cpu(ctrl->wValue);
  395 	wIndex = le16_to_cpu(ctrl->wIndex);
  396 	recip = ctrl->bRequestType & USB_RECIP_MASK;
  397 	state = dwc->gadget.state;
  398 
  399 	switch (recip) {
  400 	case USB_RECIP_DEVICE:
  401 
  402 		switch (wValue) {
  403 		case USB_DEVICE_REMOTE_WAKEUP:
  404 			break;
  405 		/*
  406 		 * 9.4.1 says only only for SS, in AddressState only for
  407 		 * default control pipe
  408 		 */
  409 		case USB_DEVICE_U1_ENABLE:
  410 			if (state != USB_STATE_CONFIGURED)
  411 				return -EINVAL;
  412 			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  413 				return -EINVAL;
  414 
  415 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  416 			if (set)
  417 				reg |= DWC3_DCTL_INITU1ENA;
  418 			else
  419 				reg &= ~DWC3_DCTL_INITU1ENA;
  420 			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  421 			break;
  422 
  423 		case USB_DEVICE_U2_ENABLE:
  424 			if (state != USB_STATE_CONFIGURED)
  425 				return -EINVAL;
  426 			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  427 				return -EINVAL;
  428 
  429 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  430 			if (set)
  431 				reg |= DWC3_DCTL_INITU2ENA;
  432 			else
  433 				reg &= ~DWC3_DCTL_INITU2ENA;
  434 			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  435 			break;
  436 
  437 		case USB_DEVICE_LTM_ENABLE:
  438 			return -EINVAL;
  439 			break;
  440 
  441 		case USB_DEVICE_TEST_MODE:
  442 			if ((wIndex & 0xff) != 0)
  443 				return -EINVAL;
  444 			if (!set)
  445 				return -EINVAL;
  446 
  447 			dwc->test_mode_nr = wIndex >> 8;
  448 			dwc->test_mode = true;
  449 			break;
  450 		default:
  451 			return -EINVAL;
  452 		}
  453 		break;
  454 
  455 	case USB_RECIP_INTERFACE:
  456 		switch (wValue) {
  457 		case USB_INTRF_FUNC_SUSPEND:
  458 			if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
  459 				/* XXX enable Low power suspend */
  460 				;
  461 			if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
  462 				/* XXX enable remote wakeup */
  463 				;
  464 			break;
  465 		default:
  466 			return -EINVAL;
  467 		}
  468 		break;
  469 
  470 	case USB_RECIP_ENDPOINT:
  471 		switch (wValue) {
  472 		case USB_ENDPOINT_HALT:
  473 			dep = dwc3_wIndex_to_dep(dwc, wIndex);
  474 			if (!dep)
  475 				return -EINVAL;
  476 			if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
  477 				break;
  478 			ret = __dwc3_gadget_ep_set_halt(dep, set);
  479 			if (ret)
  480 				return -EINVAL;
  481 			break;
  482 		default:
  483 			return -EINVAL;
  484 		}
  485 		break;
  486 
  487 	default:
  488 		return -EINVAL;
  489 	}
  490 
  491 	return 0;
  492 }
  493 
  494 static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  495 {
  496 	enum usb_device_state state = dwc->gadget.state;
  497 	u32 addr;
  498 	u32 reg;
  499 
  500 	addr = le16_to_cpu(ctrl->wValue);
  501 	if (addr > 127) {
  502 		dev_dbg(dwc->dev, "invalid device address %d\n", addr);
  503 		return -EINVAL;
  504 	}
  505 
  506 	if (state == USB_STATE_CONFIGURED) {
  507 		dev_dbg(dwc->dev, "trying to set address when configured\n");
  508 		return -EINVAL;
  509 	}
  510 
  511 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  512 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  513 	reg |= DWC3_DCFG_DEVADDR(addr);
  514 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  515 
  516 	if (addr)
  517 		usb_gadget_set_state(&dwc->gadget, USB_STATE_ADDRESS);
  518 	else
  519 		usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
  520 
  521 	return 0;
  522 }
  523 
  524 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  525 {
  526 	int ret;
  527 
  528 	spin_unlock(&dwc->lock);
  529 	ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
  530 	spin_lock(&dwc->lock);
  531 	return ret;
  532 }
  533 
  534 static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  535 {
  536 	enum usb_device_state state = dwc->gadget.state;
  537 	u32 cfg;
  538 	int ret;
  539 	u32 reg;
  540 
  541 	dwc->start_config_issued = false;
  542 	cfg = le16_to_cpu(ctrl->wValue);
  543 
  544 	switch (state) {
  545 	case USB_STATE_DEFAULT:
  546 		return -EINVAL;
  547 		break;
  548 
  549 	case USB_STATE_ADDRESS:
  550 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
  551 		/* if the cfg matches and the cfg is non zero */
  552 		if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
  553 
  554 			/*
  555 			 * only change state if set_config has already
  556 			 * been processed. If gadget driver returns
  557 			 * USB_GADGET_DELAYED_STATUS, we will wait
  558 			 * to change the state on the next usb_ep_queue()
  559 			 */
  560 			if (ret == 0)
  561 				usb_gadget_set_state(&dwc->gadget,
  562 						USB_STATE_CONFIGURED);
  563 
  564 			/*
  565 			 * Enable transition to U1/U2 state when
  566 			 * nothing is pending from application.
  567 			 */
  568 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  569 			reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
  570 			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  571 
  572 			dwc->resize_fifos = true;
  573 			dev_dbg(dwc->dev, "resize fifos flag SET\n");
  574 		}
  575 		break;
  576 
  577 	case USB_STATE_CONFIGURED:
  578 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
  579 		if (!cfg && !ret)
  580 			usb_gadget_set_state(&dwc->gadget,
  581 					USB_STATE_ADDRESS);
  582 		break;
  583 	default:
  584 		ret = -EINVAL;
  585 	}
  586 	return ret;
  587 }
  588 
  589 static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
  590 {
  591 	struct dwc3_ep	*dep = to_dwc3_ep(ep);
  592 	struct dwc3	*dwc = dep->dwc;
  593 
  594 	u32		param = 0;
  595 	u32		reg;
  596 
  597 	struct timing {
  598 		u8	u1sel;
  599 		u8	u1pel;
  600 		u16	u2sel;
  601 		u16	u2pel;
  602 	} __packed timing;
  603 
  604 	int		ret;
  605 
  606 	memcpy(&timing, req->buf, sizeof(timing));
  607 
  608 	dwc->u1sel = timing.u1sel;
  609 	dwc->u1pel = timing.u1pel;
  610 	dwc->u2sel = le16_to_cpu(timing.u2sel);
  611 	dwc->u2pel = le16_to_cpu(timing.u2pel);
  612 
  613 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  614 	if (reg & DWC3_DCTL_INITU2ENA)
  615 		param = dwc->u2pel;
  616 	if (reg & DWC3_DCTL_INITU1ENA)
  617 		param = dwc->u1pel;
  618 
  619 	/*
  620 	 * According to Synopsys Databook, if parameter is
  621 	 * greater than 125, a value of zero should be
  622 	 * programmed in the register.
  623 	 */
  624 	if (param > 125)
  625 		param = 0;
  626 
  627 	/* now that we have the time, issue DGCMD Set Sel */
  628 	ret = dwc3_send_gadget_generic_command(dwc,
  629 			DWC3_DGCMD_SET_PERIODIC_PAR, param);
  630 	WARN_ON(ret < 0);
  631 }
  632 
  633 static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  634 {
  635 	struct dwc3_ep	*dep;
  636 	enum usb_device_state state = dwc->gadget.state;
  637 	u16		wLength;
  638 	u16		wValue;
  639 
  640 	if (state == USB_STATE_DEFAULT)
  641 		return -EINVAL;
  642 
  643 	wValue = le16_to_cpu(ctrl->wValue);
  644 	wLength = le16_to_cpu(ctrl->wLength);
  645 
  646 	if (wLength != 6) {
  647 		dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
  648 				wLength);
  649 		return -EINVAL;
  650 	}
  651 
  652 	/*
  653 	 * To handle Set SEL we need to receive 6 bytes from Host. So let's
  654 	 * queue a usb_request for 6 bytes.
  655 	 *
  656 	 * Remember, though, this controller can't handle non-wMaxPacketSize
  657 	 * aligned transfers on the OUT direction, so we queue a request for
  658 	 * wMaxPacketSize instead.
  659 	 */
  660 	dep = dwc->eps[0];
  661 	dwc->ep0_usb_req.dep = dep;
  662 	dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
  663 	dwc->ep0_usb_req.request.buf = dwc->setup_buf;
  664 	dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
  665 
  666 	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  667 }
  668 
  669 static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  670 {
  671 	u16		wLength;
  672 	u16		wValue;
  673 	u16		wIndex;
  674 
  675 	wValue = le16_to_cpu(ctrl->wValue);
  676 	wLength = le16_to_cpu(ctrl->wLength);
  677 	wIndex = le16_to_cpu(ctrl->wIndex);
  678 
  679 	if (wIndex || wLength)
  680 		return -EINVAL;
  681 
  682 	/*
  683 	 * REVISIT It's unclear from Databook what to do with this
  684 	 * value. For now, just cache it.
  685 	 */
  686 	dwc->isoch_delay = wValue;
  687 
  688 	return 0;
  689 }
  690 
  691 static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  692 {
  693 	int ret;
  694 
  695 	switch (ctrl->bRequest) {
  696 	case USB_REQ_GET_STATUS:
  697 		dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
  698 		ret = dwc3_ep0_handle_status(dwc, ctrl);
  699 		break;
  700 	case USB_REQ_CLEAR_FEATURE:
  701 		dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
  702 		ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
  703 		break;
  704 	case USB_REQ_SET_FEATURE:
  705 		dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
  706 		ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
  707 		break;
  708 	case USB_REQ_SET_ADDRESS:
  709 		dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
  710 		ret = dwc3_ep0_set_address(dwc, ctrl);
  711 		break;
  712 	case USB_REQ_SET_CONFIGURATION:
  713 		dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
  714 		ret = dwc3_ep0_set_config(dwc, ctrl);
  715 		break;
  716 	case USB_REQ_SET_SEL:
  717 		dev_vdbg(dwc->dev, "USB_REQ_SET_SEL\n");
  718 		ret = dwc3_ep0_set_sel(dwc, ctrl);
  719 		break;
  720 	case USB_REQ_SET_ISOCH_DELAY:
  721 		dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
  722 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
  723 		break;
  724 	default:
  725 		dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
  726 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
  727 		break;
  728 	}
  729 
  730 	return ret;
  731 }
  732 
  733 static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
  734 		const struct dwc3_event_depevt *event)
  735 {
  736 	struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
  737 	int ret = -EINVAL;
  738 	u32 len;
  739 
  740 	if (!dwc->gadget_driver)
  741 		goto out;
  742 
  743 	len = le16_to_cpu(ctrl->wLength);
  744 	if (!len) {
  745 		dwc->three_stage_setup = false;
  746 		dwc->ep0_expect_in = false;
  747 		dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  748 	} else {
  749 		dwc->three_stage_setup = true;
  750 		dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
  751 		dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
  752 	}
  753 
  754 	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  755 		ret = dwc3_ep0_std_request(dwc, ctrl);
  756 	else
  757 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
  758 
  759 	if (ret == USB_GADGET_DELAYED_STATUS)
  760 		dwc->delayed_status = true;
  761 
  762 out:
  763 	if (ret < 0)
  764 		dwc3_ep0_stall_and_restart(dwc);
  765 }
  766 
  767 static void dwc3_ep0_complete_data(struct dwc3 *dwc,
  768 		const struct dwc3_event_depevt *event)
  769 {
  770 	struct dwc3_request	*r = NULL;
  771 	struct usb_request	*ur;
  772 	struct dwc3_trb		*trb;
  773 	struct dwc3_ep		*ep0;
  774 	u32			transferred;
  775 	u32			status;
  776 	u32			length;
  777 	u8			epnum;
  778 
  779 	epnum = event->endpoint_number;
  780 	ep0 = dwc->eps[0];
  781 
  782 	dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  783 
  784 	r = next_request(&ep0->request_list);
  785 	ur = &r->request;
  786 
  787 	trb = dwc->ep0_trb;
  788 
  789 	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
  790 	if (status == DWC3_TRBSTS_SETUP_PENDING) {
  791 		dev_dbg(dwc->dev, "Setup Pending received\n");
  792 
  793 		if (r)
  794 			dwc3_gadget_giveback(ep0, r, -ECONNRESET);
  795 
  796 		return;
  797 	}
  798 
  799 	length = trb->size & DWC3_TRB_SIZE_MASK;
  800 
  801 	if (dwc->ep0_bounced) {
  802 		unsigned transfer_size = ur->length;
  803 		unsigned maxp = ep0->endpoint.maxpacket;
  804 
  805 		transfer_size += (maxp - (transfer_size % maxp));
  806 		transferred = min_t(u32, ur->length,
  807 				transfer_size - length);
  808 		memcpy(ur->buf, dwc->ep0_bounce, transferred);
  809 	} else {
  810 		transferred = ur->length - length;
  811 	}
  812 
  813 	ur->actual += transferred;
  814 
  815 	if ((epnum & 1) && ur->actual < ur->length) {
  816 		/* for some reason we did not get everything out */
  817 
  818 		dwc3_ep0_stall_and_restart(dwc);
  819 	} else {
  820 		/*
  821 		 * handle the case where we have to send a zero packet. This
  822 		 * seems to be case when req.length > maxpacket. Could it be?
  823 		 */
  824 		if (r)
  825 			dwc3_gadget_giveback(ep0, r, 0);
  826 	}
  827 }
  828 
  829 static void dwc3_ep0_complete_status(struct dwc3 *dwc,
  830 		const struct dwc3_event_depevt *event)
  831 {
  832 	struct dwc3_request	*r;
  833 	struct dwc3_ep		*dep;
  834 	struct dwc3_trb		*trb;
  835 	u32			status;
  836 
  837 	dep = dwc->eps[0];
  838 	trb = dwc->ep0_trb;
  839 
  840 	if (!list_empty(&dep->request_list)) {
  841 		r = next_request(&dep->request_list);
  842 
  843 		dwc3_gadget_giveback(dep, r, 0);
  844 	}
  845 
  846 	if (dwc->test_mode) {
  847 		int ret;
  848 
  849 		ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
  850 		if (ret < 0) {
  851 			dev_dbg(dwc->dev, "Invalid Test #%d\n",
  852 					dwc->test_mode_nr);
  853 			dwc3_ep0_stall_and_restart(dwc);
  854 			return;
  855 		}
  856 	}
  857 
  858 	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
  859 	if (status == DWC3_TRBSTS_SETUP_PENDING)
  860 		dev_dbg(dwc->dev, "Setup Pending received\n");
  861 
  862 	dwc->ep0state = EP0_SETUP_PHASE;
  863 	dwc3_ep0_out_start(dwc);
  864 }
  865 
  866 static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
  867 			const struct dwc3_event_depevt *event)
  868 {
  869 	struct dwc3_ep		*dep = dwc->eps[event->endpoint_number];
  870 
  871 	dep->flags &= ~DWC3_EP_BUSY;
  872 	dep->resource_index = 0;
  873 	dwc->setup_packet_pending = false;
  874 
  875 	switch (dwc->ep0state) {
  876 	case EP0_SETUP_PHASE:
  877 		dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
  878 		dwc3_ep0_inspect_setup(dwc, event);
  879 		break;
  880 
  881 	case EP0_DATA_PHASE:
  882 		dev_vdbg(dwc->dev, "Data Phase\n");
  883 		dwc3_ep0_complete_data(dwc, event);
  884 		break;
  885 
  886 	case EP0_STATUS_PHASE:
  887 		dev_vdbg(dwc->dev, "Status Phase\n");
  888 		dwc3_ep0_complete_status(dwc, event);
  889 		break;
  890 	default:
  891 		WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
  892 	}
  893 }
  894 
  895 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
  896 		struct dwc3_ep *dep, struct dwc3_request *req)
  897 {
  898 	int			ret;
  899 
  900 	req->direction = !!dep->number;
  901 
  902 	if (req->request.length == 0) {
  903 		ret = dwc3_ep0_start_trans(dwc, dep->number,
  904 				dwc->ctrl_req_addr, 0,
  905 				DWC3_TRBCTL_CONTROL_DATA);
  906 	} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
  907 			&& (dep->number == 0)) {
  908 		u32	transfer_size;
  909 		u32	maxpacket;
  910 
  911 		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
  912 				dep->number);
  913 		if (ret) {
  914 			dev_dbg(dwc->dev, "failed to map request\n");
  915 			return;
  916 		}
  917 
  918 		WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
  919 
  920 		maxpacket = dep->endpoint.maxpacket;
  921 		transfer_size = roundup(req->request.length, maxpacket);
  922 
  923 		dwc->ep0_bounced = true;
  924 
  925 		/*
  926 		 * REVISIT in case request length is bigger than
  927 		 * DWC3_EP0_BOUNCE_SIZE we will need two chained
  928 		 * TRBs to handle the transfer.
  929 		 */
  930 		ret = dwc3_ep0_start_trans(dwc, dep->number,
  931 				dwc->ep0_bounce_addr, transfer_size,
  932 				DWC3_TRBCTL_CONTROL_DATA);
  933 	} else {
  934 		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
  935 				dep->number);
  936 		if (ret) {
  937 			dev_dbg(dwc->dev, "failed to map request\n");
  938 			return;
  939 		}
  940 
  941 		ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
  942 				req->request.length, DWC3_TRBCTL_CONTROL_DATA);
  943 	}
  944 
  945 	WARN_ON(ret < 0);
  946 }
  947 
  948 static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
  949 {
  950 	struct dwc3		*dwc = dep->dwc;
  951 	u32			type;
  952 
  953 	type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
  954 		: DWC3_TRBCTL_CONTROL_STATUS2;
  955 
  956 	return dwc3_ep0_start_trans(dwc, dep->number,
  957 			dwc->ctrl_req_addr, 0, type);
  958 }
  959 
  960 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
  961 {
  962 	if (dwc->resize_fifos) {
  963 		dev_dbg(dwc->dev, "starting to resize fifos\n");
  964 		dwc3_gadget_resize_tx_fifos(dwc);
  965 		dwc->resize_fifos = 0;
  966 	}
  967 
  968 	WARN_ON(dwc3_ep0_start_control_status(dep));
  969 }
  970 
  971 static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
  972 		const struct dwc3_event_depevt *event)
  973 {
  974 	struct dwc3_ep		*dep = dwc->eps[event->endpoint_number];
  975 
  976 	__dwc3_ep0_do_control_status(dwc, dep);
  977 }
  978 
  979 static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
  980 {
  981 	struct dwc3_gadget_ep_cmd_params params;
  982 	u32			cmd;
  983 	int			ret;
  984 
  985 	if (!dep->resource_index)
  986 		return;
  987 
  988 	cmd = DWC3_DEPCMD_ENDTRANSFER;
  989 	cmd |= DWC3_DEPCMD_CMDIOC;
  990 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
  991 	memset(¶ms, 0, sizeof(params));
  992 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
  993 	WARN_ON_ONCE(ret);
  994 	dep->resource_index = 0;
  995 }
  996 
  997 static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
  998 		const struct dwc3_event_depevt *event)
  999 {
 1000 	dwc->setup_packet_pending = true;
 1001 
 1002 	switch (event->status) {
 1003 	case DEPEVT_STATUS_CONTROL_DATA:
 1004 		dev_vdbg(dwc->dev, "Control Data\n");
 1005 
 1006 		/*
 1007 		 * We already have a DATA transfer in the controller's cache,
 1008 		 * if we receive a XferNotReady(DATA) we will ignore it, unless
 1009 		 * it's for the wrong direction.
 1010 		 *
 1011 		 * In that case, we must issue END_TRANSFER command to the Data
 1012 		 * Phase we already have started and issue SetStall on the
 1013 		 * control endpoint.
 1014 		 */
 1015 		if (dwc->ep0_expect_in != event->endpoint_number) {
 1016 			struct dwc3_ep	*dep = dwc->eps[dwc->ep0_expect_in];
 1017 
 1018 			dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
 1019 			dwc3_ep0_end_control_data(dwc, dep);
 1020 			dwc3_ep0_stall_and_restart(dwc);
 1021 			return;
 1022 		}
 1023 
 1024 		break;
 1025 
 1026 	case DEPEVT_STATUS_CONTROL_STATUS:
 1027 		if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
 1028 			return;
 1029 
 1030 		dev_vdbg(dwc->dev, "Control Status\n");
 1031 
 1032 		dwc->ep0state = EP0_STATUS_PHASE;
 1033 
 1034 		if (dwc->delayed_status) {
 1035 			WARN_ON_ONCE(event->endpoint_number != 1);
 1036 			dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
 1037 			return;
 1038 		}
 1039 
 1040 		dwc3_ep0_do_control_status(dwc, event);
 1041 	}
 1042 }
 1043 
 1044 void dwc3_ep0_interrupt(struct dwc3 *dwc,
 1045 		const struct dwc3_event_depevt *event)
 1046 {
 1047 	u8			epnum = event->endpoint_number;
 1048 
 1049 	dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
 1050 			dwc3_ep_event_string(event->endpoint_event),
 1051 			epnum >> 1, (epnum & 1) ? "in" : "out",
 1052 			dwc3_ep0_state_string(dwc->ep0state));
 1053 
 1054 	switch (event->endpoint_event) {
 1055 	case DWC3_DEPEVT_XFERCOMPLETE:
 1056 		dwc3_ep0_xfer_complete(dwc, event);
 1057 		break;
 1058 
 1059 	case DWC3_DEPEVT_XFERNOTREADY:
 1060 		dwc3_ep0_xfernotready(dwc, event);
 1061 		break;
 1062 
 1063 	case DWC3_DEPEVT_XFERINPROGRESS:
 1064 	case DWC3_DEPEVT_RXTXFIFOEVT:
 1065 	case DWC3_DEPEVT_STREAMEVT:
 1066 	case DWC3_DEPEVT_EPCMDCMPLT:
 1067 		break;
 1068 	}
 1069 }                 1 
    2 extern void ldv_spin_lock(void);
    3 extern void ldv_spin_unlock(void);
    4 extern int ldv_spin_trylock(void);
    5 
    6 #include <linux/kernel.h>
    7 #include <linux/module.h>
    8 #include <linux/slab.h>
    9 
   10 extern void *ldv_undefined_pointer(void);
   11 extern void ldv_check_alloc_flags(gfp_t flags);
   12 extern void ldv_check_alloc_nonatomic(void);
   13 /* Returns an arbitrary page in addition to checking flags */
   14 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   15 
   16 /**
   17  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
   18  *
   19  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
   20  *
   21  * Authors: Felipe Balbi <balbi@ti.com>,
   22  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
   23  *
   24  * This program is free software: you can redistribute it and/or modify
   25  * it under the terms of the GNU General Public License version 2  of
   26  * the License as published by the Free Software Foundation.
   27  *
   28  * This program is distributed in the hope that it will be useful,
   29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   31  * GNU General Public License for more details.
   32  */
   33 
   34 #include <linux/kernel.h>
   35 #include <linux/delay.h>
   36 #include <linux/slab.h>
   37 #include <linux/spinlock.h>
   38 #include <linux/platform_device.h>
   39 #include <linux/pm_runtime.h>
   40 #include <linux/interrupt.h>
   41 #include <linux/io.h>
   42 #include <linux/list.h>
   43 #include <linux/dma-mapping.h>
   44 
   45 #include <linux/usb/ch9.h>
   46 #include <linux/usb/gadget.h>
   47 
   48 #include "core.h"
   49 #include "gadget.h"
   50 #include "io.h"
   51 
   52 /**
   53  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
   54  * @dwc: pointer to our context structure
   55  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
   56  *
   57  * Caller should take care of locking. This function will
   58  * return 0 on success or -EINVAL if wrong Test Selector
   59  * is passed
   60  */
   61 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
   62 {
   63 	u32		reg;
   64 
   65 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
   66 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
   67 
   68 	switch (mode) {
   69 	case TEST_J:
   70 	case TEST_K:
   71 	case TEST_SE0_NAK:
   72 	case TEST_PACKET:
   73 	case TEST_FORCE_EN:
   74 		reg |= mode << 1;
   75 		break;
   76 	default:
   77 		return -EINVAL;
   78 	}
   79 
   80 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
   81 
   82 	return 0;
   83 }
   84 
   85 /**
   86  * dwc3_gadget_get_link_state - Gets current state of USB Link
   87  * @dwc: pointer to our context structure
   88  *
   89  * Caller should take care of locking. This function will
   90  * return the link state on success (>= 0) or -ETIMEDOUT.
   91  */
   92 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
   93 {
   94 	u32		reg;
   95 
   96 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
   97 
   98 	return DWC3_DSTS_USBLNKST(reg);
   99 }
  100 
  101 /**
  102  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
  103  * @dwc: pointer to our context structure
  104  * @state: the state to put link into
  105  *
  106  * Caller should take care of locking. This function will
  107  * return 0 on success or -ETIMEDOUT.
  108  */
  109 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
  110 {
  111 	int		retries = 10000;
  112 	u32		reg;
  113 
  114 	/*
  115 	 * Wait until device controller is ready. Only applies to 1.94a and
  116 	 * later RTL.
  117 	 */
  118 	if (dwc->revision >= DWC3_REVISION_194A) {
  119 		while (--retries) {
  120 			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  121 			if (reg & DWC3_DSTS_DCNRD)
  122 				udelay(5);
  123 			else
  124 				break;
  125 		}
  126 
  127 		if (retries <= 0)
  128 			return -ETIMEDOUT;
  129 	}
  130 
  131 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  132 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
  133 
  134 	/* set requested state */
  135 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
  136 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  137 
  138 	/*
  139 	 * The following code is racy when called from dwc3_gadget_wakeup,
  140 	 * and is not needed, at least on newer versions
  141 	 */
  142 	if (dwc->revision >= DWC3_REVISION_194A)
  143 		return 0;
  144 
  145 	/* wait for a change in DSTS */
  146 	retries = 10000;
  147 	while (--retries) {
  148 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  149 
  150 		if (DWC3_DSTS_USBLNKST(reg) == state)
  151 			return 0;
  152 
  153 		udelay(5);
  154 	}
  155 
  156 	dev_vdbg(dwc->dev, "link state change request timed out\n");
  157 
  158 	return -ETIMEDOUT;
  159 }
  160 
  161 /**
  162  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
  163  * @dwc: pointer to our context structure
  164  *
  165  * This function will a best effort FIFO allocation in order
  166  * to improve FIFO usage and throughput, while still allowing
  167  * us to enable as many endpoints as possible.
  168  *
  169  * Keep in mind that this operation will be highly dependent
  170  * on the configured size for RAM1 - which contains TxFifo -,
  171  * the amount of endpoints enabled on coreConsultant tool, and
  172  * the width of the Master Bus.
  173  *
  174  * In the ideal world, we would always be able to satisfy the
  175  * following equation:
  176  *
  177  * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
  178  * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
  179  *
  180  * Unfortunately, due to many variables that's not always the case.
  181  */
  182 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
  183 {
  184 	int		last_fifo_depth = 0;
  185 	int		ram1_depth;
  186 	int		fifo_size;
  187 	int		mdwidth;
  188 	int		num;
  189 
  190 	if (!dwc->needs_fifo_resize)
  191 		return 0;
  192 
  193 	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
  194 	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
  195 
  196 	/* MDWIDTH is represented in bits, we need it in bytes */
  197 	mdwidth >>= 3;
  198 
  199 	/*
  200 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
  201 	 * for each enabled endpoint, later patches will come to
  202 	 * improve this algorithm so that we better use the internal
  203 	 * FIFO space
  204 	 */
  205 	for (num = 0; num < dwc->num_in_eps; num++) {
  206 		/* bit0 indicates direction; 1 means IN ep */
  207 		struct dwc3_ep	*dep = dwc->eps[(num << 1) | 1];
  208 		int		mult = 1;
  209 		int		tmp;
  210 
  211 		if (!(dep->flags & DWC3_EP_ENABLED))
  212 			continue;
  213 
  214 		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
  215 				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
  216 			mult = 3;
  217 
  218 		/*
  219 		 * REVISIT: the following assumes we will always have enough
  220 		 * space available on the FIFO RAM for all possible use cases.
  221 		 * Make sure that's true somehow and change FIFO allocation
  222 		 * accordingly.
  223 		 *
  224 		 * If we have Bulk or Isochronous endpoints, we want
  225 		 * them to be able to be very, very fast. So we're giving
  226 		 * those endpoints a fifo_size which is enough for 3 full
  227 		 * packets
  228 		 */
  229 		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
  230 		tmp += mdwidth;
  231 
  232 		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
  233 
  234 		fifo_size |= (last_fifo_depth << 16);
  235 
  236 		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
  237 				dep->name, last_fifo_depth, fifo_size & 0xffff);
  238 
  239 		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
  240 
  241 		last_fifo_depth += (fifo_size & 0xffff);
  242 	}
  243 
  244 	return 0;
  245 }
  246 
  247 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
  248 		int status)
  249 {
  250 	struct dwc3			*dwc = dep->dwc;
  251 	int				i;
  252 
  253 	if (req->queued) {
  254 		i = 0;
  255 		do {
  256 			dep->busy_slot++;
  257 			/*
  258 			 * Skip LINK TRB. We can't use req->trb and check for
  259 			 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
  260 			 * just completed (not the LINK TRB).
  261 			 */
  262 			if (((dep->busy_slot & DWC3_TRB_MASK) ==
  263 				DWC3_TRB_NUM- 1) &&
  264 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
  265 				dep->busy_slot++;
  266 		} while(++i < req->request.num_mapped_sgs);
  267 		req->queued = false;
  268 	}
  269 	list_del(&req->list);
  270 	req->trb = NULL;
  271 
  272 	if (req->request.status == -EINPROGRESS)
  273 		req->request.status = status;
  274 
  275 	if (dwc->ep0_bounced && dep->number == 0)
  276 		dwc->ep0_bounced = false;
  277 	else
  278 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
  279 				req->direction);
  280 
  281 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
  282 			req, dep->name, req->request.actual,
  283 			req->request.length, status);
  284 
  285 	spin_unlock(&dwc->lock);
  286 	req->request.complete(&dep->endpoint, &req->request);
  287 	spin_lock(&dwc->lock);
  288 }
  289 
  290 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
  291 {
  292 	switch (cmd) {
  293 	case DWC3_DEPCMD_DEPSTARTCFG:
  294 		return "Start New Configuration";
  295 	case DWC3_DEPCMD_ENDTRANSFER:
  296 		return "End Transfer";
  297 	case DWC3_DEPCMD_UPDATETRANSFER:
  298 		return "Update Transfer";
  299 	case DWC3_DEPCMD_STARTTRANSFER:
  300 		return "Start Transfer";
  301 	case DWC3_DEPCMD_CLEARSTALL:
  302 		return "Clear Stall";
  303 	case DWC3_DEPCMD_SETSTALL:
  304 		return "Set Stall";
  305 	case DWC3_DEPCMD_GETEPSTATE:
  306 		return "Get Endpoint State";
  307 	case DWC3_DEPCMD_SETTRANSFRESOURCE:
  308 		return "Set Endpoint Transfer Resource";
  309 	case DWC3_DEPCMD_SETEPCONFIG:
  310 		return "Set Endpoint Configuration";
  311 	default:
  312 		return "UNKNOWN command";
  313 	}
  314 }
  315 
  316 static const char *dwc3_gadget_generic_cmd_string(u8 cmd)
  317 {
  318 	switch (cmd) {
  319 	case DWC3_DGCMD_SET_LMP:
  320 		return "Set LMP";
  321 	case DWC3_DGCMD_SET_PERIODIC_PAR:
  322 		return "Set Periodic Parameters";
  323 	case DWC3_DGCMD_XMIT_FUNCTION:
  324 		return "Transmit Function Wake Device Notification";
  325 	case DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO:
  326 		return "Set Scratchpad Buffer Array Address Lo";
  327 	case DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI:
  328 		return "Set Scratchpad Buffer Array Address Hi";
  329 	case DWC3_DGCMD_SELECTED_FIFO_FLUSH:
  330 		return "Selected FIFO Flush";
  331 	case DWC3_DGCMD_ALL_FIFO_FLUSH:
  332 		return "All FIFO Flush";
  333 	case DWC3_DGCMD_SET_ENDPOINT_NRDY:
  334 		return "Set Endpoint NRDY";
  335 	case DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK:
  336 		return "Run SoC Bus Loopback Test";
  337 	default:
  338 		return "UNKNOWN";
  339 	}
  340 }
  341 
  342 static const char *dwc3_gadget_link_string(enum dwc3_link_state link_state)
  343 {
  344 	switch (link_state) {
  345 	case DWC3_LINK_STATE_U0:
  346 		return "U0";
  347 	case DWC3_LINK_STATE_U1:
  348 		return "U1";
  349 	case DWC3_LINK_STATE_U2:
  350 		return "U2";
  351 	case DWC3_LINK_STATE_U3:
  352 		return "U3";
  353 	case DWC3_LINK_STATE_SS_DIS:
  354 		return "SS.Disabled";
  355 	case DWC3_LINK_STATE_RX_DET:
  356 		return "RX.Detect";
  357 	case DWC3_LINK_STATE_SS_INACT:
  358 		return "SS.Inactive";
  359 	case DWC3_LINK_STATE_POLL:
  360 		return "Polling";
  361 	case DWC3_LINK_STATE_RECOV:
  362 		return "Recovery";
  363 	case DWC3_LINK_STATE_HRESET:
  364 		return "Hot Reset";
  365 	case DWC3_LINK_STATE_CMPLY:
  366 		return "Compliance";
  367 	case DWC3_LINK_STATE_LPBK:
  368 		return "Loopback";
  369 	case DWC3_LINK_STATE_RESET:
  370 		return "Reset";
  371 	case DWC3_LINK_STATE_RESUME:
  372 		return "Resume";
  373 	default:
  374 		return "UNKNOWN link state\n";
  375 	}
  376 }
  377 
  378 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
  379 {
  380 	u32		timeout = 500;
  381 	u32		reg;
  382 
  383 	dev_vdbg(dwc->dev, "generic cmd '%s' [%d] param %08x\n",
  384 			dwc3_gadget_generic_cmd_string(cmd), cmd, param);
  385 
  386 	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
  387 	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
  388 
  389 	do {
  390 		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
  391 		if (!(reg & DWC3_DGCMD_CMDACT)) {
  392 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
  393 					DWC3_DGCMD_STATUS(reg));
  394 			return 0;
  395 		}
  396 
  397 		/*
  398 		 * We can't sleep here, because it's also called from
  399 		 * interrupt context.
  400 		 */
  401 		timeout--;
  402 		if (!timeout)
  403 			return -ETIMEDOUT;
  404 		udelay(1);
  405 	} while (1);
  406 }
  407 
  408 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
  409 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
  410 {
  411 	struct dwc3_ep		*dep = dwc->eps[ep];
  412 	u32			timeout = 500;
  413 	u32			reg;
  414 
  415 	dev_vdbg(dwc->dev, "%s: cmd '%s' [%d] params %08x %08x %08x\n",
  416 			dep->name,
  417 			dwc3_gadget_ep_cmd_string(cmd), cmd, params->param0,
  418 			params->param1, params->param2);
  419 
  420 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
  421 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
  422 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
  423 
  424 	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
  425 	do {
  426 		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
  427 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
  428 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
  429 					DWC3_DEPCMD_STATUS(reg));
  430 			return 0;
  431 		}
  432 
  433 		/*
  434 		 * We can't sleep here, because it is also called from
  435 		 * interrupt context.
  436 		 */
  437 		timeout--;
  438 		if (!timeout)
  439 			return -ETIMEDOUT;
  440 
  441 		udelay(1);
  442 	} while (1);
  443 }
  444 
  445 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
  446 		struct dwc3_trb *trb)
  447 {
  448 	u32		offset = (char *) trb - (char *) dep->trb_pool;
  449 
  450 	return dep->trb_pool_dma + offset;
  451 }
  452 
  453 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
  454 {
  455 	struct dwc3		*dwc = dep->dwc;
  456 
  457 	if (dep->trb_pool)
  458 		return 0;
  459 
  460 	if (dep->number == 0 || dep->number == 1)
  461 		return 0;
  462 
  463 	dep->trb_pool = dma_alloc_coherent(dwc->dev,
  464 			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
  465 			&dep->trb_pool_dma, GFP_KERNEL);
  466 	if (!dep->trb_pool) {
  467 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
  468 				dep->name);
  469 		return -ENOMEM;
  470 	}
  471 
  472 	return 0;
  473 }
  474 
  475 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
  476 {
  477 	struct dwc3		*dwc = dep->dwc;
  478 
  479 	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
  480 			dep->trb_pool, dep->trb_pool_dma);
  481 
  482 	dep->trb_pool = NULL;
  483 	dep->trb_pool_dma = 0;
  484 }
  485 
  486 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
  487 {
  488 	struct dwc3_gadget_ep_cmd_params params;
  489 	u32			cmd;
  490 
  491 	memset(¶ms, 0x00, sizeof(params));
  492 
  493 	if (dep->number != 1) {
  494 		cmd = DWC3_DEPCMD_DEPSTARTCFG;
  495 		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
  496 		if (dep->number > 1) {
  497 			if (dwc->start_config_issued)
  498 				return 0;
  499 			dwc->start_config_issued = true;
  500 			cmd |= DWC3_DEPCMD_PARAM(2);
  501 		}
  502 
  503 		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
  504 	}
  505 
  506 	return 0;
  507 }
  508 
  509 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
  510 		const struct usb_endpoint_descriptor *desc,
  511 		const struct usb_ss_ep_comp_descriptor *comp_desc,
  512 		bool ignore, bool restore)
  513 {
  514 	struct dwc3_gadget_ep_cmd_params params;
  515 
  516 	memset(¶ms, 0x00, sizeof(params));
  517 
  518 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
  519 		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
  520 
  521 	/* Burst size is only needed in SuperSpeed mode */
  522 	if (dwc->gadget.speed == USB_SPEED_SUPER) {
  523 		u32 burst = dep->endpoint.maxburst - 1;
  524 
  525 		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
  526 	}
  527 
  528 	if (ignore)
  529 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
  530 
  531 	if (restore) {
  532 		params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
  533 		params.param2 |= dep->saved_state;
  534 	}
  535 
  536 	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
  537 		| DWC3_DEPCFG_XFER_NOT_READY_EN;
  538 
  539 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
  540 		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
  541 			| DWC3_DEPCFG_STREAM_EVENT_EN;
  542 		dep->stream_capable = true;
  543 	}
  544 
  545 	if (usb_endpoint_xfer_isoc(desc))
  546 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
  547 
  548 	/*
  549 	 * We are doing 1:1 mapping for endpoints, meaning
  550 	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
  551 	 * so on. We consider the direction bit as part of the physical
  552 	 * endpoint number. So USB endpoint 0x81 is 0x03.
  553 	 */
  554 	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
  555 
  556 	/*
  557 	 * We must use the lower 16 TX FIFOs even though
  558 	 * HW might have more
  559 	 */
  560 	if (dep->direction)
  561 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
  562 
  563 	if (desc->bInterval) {
  564 		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
  565 		dep->interval = 1 << (desc->bInterval - 1);
  566 	}
  567 
  568 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
  569 			DWC3_DEPCMD_SETEPCONFIG, ¶ms);
  570 }
  571 
  572 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
  573 {
  574 	struct dwc3_gadget_ep_cmd_params params;
  575 
  576 	memset(¶ms, 0x00, sizeof(params));
  577 
  578 	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
  579 
  580 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
  581 			DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms);
  582 }
  583 
  584 /**
  585  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
  586  * @dep: endpoint to be initialized
  587  * @desc: USB Endpoint Descriptor
  588  *
  589  * Caller should take care of locking
  590  */
  591 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
  592 		const struct usb_endpoint_descriptor *desc,
  593 		const struct usb_ss_ep_comp_descriptor *comp_desc,
  594 		bool ignore, bool restore)
  595 {
  596 	struct dwc3		*dwc = dep->dwc;
  597 	u32			reg;
  598 	int			ret;
  599 
  600 	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
  601 
  602 	if (!(dep->flags & DWC3_EP_ENABLED)) {
  603 		ret = dwc3_gadget_start_config(dwc, dep);
  604 		if (ret)
  605 			return ret;
  606 	}
  607 
  608 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
  609 			restore);
  610 	if (ret)
  611 		return ret;
  612 
  613 	if (!(dep->flags & DWC3_EP_ENABLED)) {
  614 		struct dwc3_trb	*trb_st_hw;
  615 		struct dwc3_trb	*trb_link;
  616 
  617 		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
  618 		if (ret)
  619 			return ret;
  620 
  621 		dep->endpoint.desc = desc;
  622 		dep->comp_desc = comp_desc;
  623 		dep->type = usb_endpoint_type(desc);
  624 		dep->flags |= DWC3_EP_ENABLED;
  625 
  626 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
  627 		reg |= DWC3_DALEPENA_EP(dep->number);
  628 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
  629 
  630 		if (!usb_endpoint_xfer_isoc(desc))
  631 			return 0;
  632 
  633 		memset(&trb_link, 0, sizeof(trb_link));
  634 
  635 		/* Link TRB for ISOC. The HWO bit is never reset */
  636 		trb_st_hw = &dep->trb_pool[0];
  637 
  638 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
  639 
  640 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
  641 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
  642 		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
  643 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
  644 	}
  645 
  646 	return 0;
  647 }
  648 
  649 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
  650 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
  651 {
  652 	struct dwc3_request		*req;
  653 
  654 	if (!list_empty(&dep->req_queued)) {
  655 		dwc3_stop_active_transfer(dwc, dep->number, true);
  656 
  657 		/* - giveback all requests to gadget driver */
  658 		while (!list_empty(&dep->req_queued)) {
  659 			req = next_request(&dep->req_queued);
  660 
  661 			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
  662 		}
  663 	}
  664 
  665 	while (!list_empty(&dep->request_list)) {
  666 		req = next_request(&dep->request_list);
  667 
  668 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
  669 	}
  670 }
  671 
  672 /**
  673  * __dwc3_gadget_ep_disable - Disables a HW endpoint
  674  * @dep: the endpoint to disable
  675  *
  676  * This function also removes requests which are currently processed ny the
  677  * hardware and those which are not yet scheduled.
  678  * Caller should take care of locking.
  679  */
  680 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
  681 {
  682 	struct dwc3		*dwc = dep->dwc;
  683 	u32			reg;
  684 
  685 	dwc3_remove_requests(dwc, dep);
  686 
  687 	/* make sure HW endpoint isn't stalled */
  688 	if (dep->flags & DWC3_EP_STALL)
  689 		__dwc3_gadget_ep_set_halt(dep, 0);
  690 
  691 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
  692 	reg &= ~DWC3_DALEPENA_EP(dep->number);
  693 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
  694 
  695 	dep->stream_capable = false;
  696 	dep->endpoint.desc = NULL;
  697 	dep->comp_desc = NULL;
  698 	dep->type = 0;
  699 	dep->flags = 0;
  700 
  701 	return 0;
  702 }
  703 
  704 /* -------------------------------------------------------------------------- */
  705 
  706 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
  707 		const struct usb_endpoint_descriptor *desc)
  708 {
  709 	return -EINVAL;
  710 }
  711 
  712 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
  713 {
  714 	return -EINVAL;
  715 }
  716 
  717 /* -------------------------------------------------------------------------- */
  718 
  719 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
  720 		const struct usb_endpoint_descriptor *desc)
  721 {
  722 	struct dwc3_ep			*dep;
  723 	struct dwc3			*dwc;
  724 	unsigned long			flags;
  725 	int				ret;
  726 
  727 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
  728 		pr_debug("dwc3: invalid parameters\n");
  729 		return -EINVAL;
  730 	}
  731 
  732 	if (!desc->wMaxPacketSize) {
  733 		pr_debug("dwc3: missing wMaxPacketSize\n");
  734 		return -EINVAL;
  735 	}
  736 
  737 	dep = to_dwc3_ep(ep);
  738 	dwc = dep->dwc;
  739 
  740 	if (dep->flags & DWC3_EP_ENABLED) {
  741 		dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
  742 				dep->name);
  743 		return 0;
  744 	}
  745 
  746 	switch (usb_endpoint_type(desc)) {
  747 	case USB_ENDPOINT_XFER_CONTROL:
  748 		strlcat(dep->name, "-control", sizeof(dep->name));
  749 		break;
  750 	case USB_ENDPOINT_XFER_ISOC:
  751 		strlcat(dep->name, "-isoc", sizeof(dep->name));
  752 		break;
  753 	case USB_ENDPOINT_XFER_BULK:
  754 		strlcat(dep->name, "-bulk", sizeof(dep->name));
  755 		break;
  756 	case USB_ENDPOINT_XFER_INT:
  757 		strlcat(dep->name, "-int", sizeof(dep->name));
  758 		break;
  759 	default:
  760 		dev_err(dwc->dev, "invalid endpoint transfer type\n");
  761 	}
  762 
  763 	spin_lock_irqsave(&dwc->lock, flags);
  764 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
  765 	spin_unlock_irqrestore(&dwc->lock, flags);
  766 
  767 	return ret;
  768 }
  769 
  770 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
  771 {
  772 	struct dwc3_ep			*dep;
  773 	struct dwc3			*dwc;
  774 	unsigned long			flags;
  775 	int				ret;
  776 
  777 	if (!ep) {
  778 		pr_debug("dwc3: invalid parameters\n");
  779 		return -EINVAL;
  780 	}
  781 
  782 	dep = to_dwc3_ep(ep);
  783 	dwc = dep->dwc;
  784 
  785 	if (!(dep->flags & DWC3_EP_ENABLED)) {
  786 		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
  787 				dep->name);
  788 		return 0;
  789 	}
  790 
  791 	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
  792 			dep->number >> 1,
  793 			(dep->number & 1) ? "in" : "out");
  794 
  795 	spin_lock_irqsave(&dwc->lock, flags);
  796 	ret = __dwc3_gadget_ep_disable(dep);
  797 	spin_unlock_irqrestore(&dwc->lock, flags);
  798 
  799 	return ret;
  800 }
  801 
  802 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
  803 	gfp_t gfp_flags)
  804 {
  805 	struct dwc3_request		*req;
  806 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
  807 	struct dwc3			*dwc = dep->dwc;
  808 
  809 	req = kzalloc(sizeof(*req), gfp_flags);
  810 	if (!req) {
  811 		dev_err(dwc->dev, "not enough memory\n");
  812 		return NULL;
  813 	}
  814 
  815 	req->epnum	= dep->number;
  816 	req->dep	= dep;
  817 
  818 	return &req->request;
  819 }
  820 
  821 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
  822 		struct usb_request *request)
  823 {
  824 	struct dwc3_request		*req = to_dwc3_request(request);
  825 
  826 	kfree(req);
  827 }
  828 
  829 /**
  830  * dwc3_prepare_one_trb - setup one TRB from one request
  831  * @dep: endpoint for which this request is prepared
  832  * @req: dwc3_request pointer
  833  */
  834 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
  835 		struct dwc3_request *req, dma_addr_t dma,
  836 		unsigned length, unsigned last, unsigned chain, unsigned node)
  837 {
  838 	struct dwc3		*dwc = dep->dwc;
  839 	struct dwc3_trb		*trb;
  840 
  841 	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
  842 			dep->name, req, (unsigned long long) dma,
  843 			length, last ? " last" : "",
  844 			chain ? " chain" : "");
  845 
  846 	/* Skip the LINK-TRB on ISOC */
  847 	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
  848 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
  849 		dep->free_slot++;
  850 
  851 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
  852 
  853 	if (!req->trb) {
  854 		dwc3_gadget_move_request_queued(req);
  855 		req->trb = trb;
  856 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
  857 		req->start_slot = dep->free_slot & DWC3_TRB_MASK;
  858 	}
  859 
  860 	dep->free_slot++;
  861 
  862 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
  863 	trb->bpl = lower_32_bits(dma);
  864 	trb->bph = upper_32_bits(dma);
  865 
  866 	switch (usb_endpoint_type(dep->endpoint.desc)) {
  867 	case USB_ENDPOINT_XFER_CONTROL:
  868 		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
  869 		break;
  870 
  871 	case USB_ENDPOINT_XFER_ISOC:
  872 		if (!node)
  873 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
  874 		else
  875 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
  876 		break;
  877 
  878 	case USB_ENDPOINT_XFER_BULK:
  879 	case USB_ENDPOINT_XFER_INT:
  880 		trb->ctrl = DWC3_TRBCTL_NORMAL;
  881 		break;
  882 	default:
  883 		/*
  884 		 * This is only possible with faulty memory because we
  885 		 * checked it already :)
  886 		 */
  887 		BUG();
  888 	}
  889 
  890 	if (!req->request.no_interrupt && !chain)
  891 		trb->ctrl |= DWC3_TRB_CTRL_IOC;
  892 
  893 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
  894 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
  895 		trb->ctrl |= DWC3_TRB_CTRL_CSP;
  896 	} else if (last) {
  897 		trb->ctrl |= DWC3_TRB_CTRL_LST;
  898 	}
  899 
  900 	if (chain)
  901 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
  902 
  903 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
  904 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
  905 
  906 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
  907 }
  908 
  909 /*
  910  * dwc3_prepare_trbs - setup TRBs from requests
  911  * @dep: endpoint for which requests are being prepared
  912  * @starting: true if the endpoint is idle and no requests are queued.
  913  *
  914  * The function goes through the requests list and sets up TRBs for the
  915  * transfers. The function returns once there are no more TRBs available or
  916  * it runs out of requests.
  917  */
  918 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
  919 {
  920 	struct dwc3_request	*req, *n;
  921 	u32			trbs_left;
  922 	u32			max;
  923 	unsigned int		last_one = 0;
  924 
  925 	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
  926 
  927 	/* the first request must not be queued */
  928 	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
  929 
  930 	/* Can't wrap around on a non-isoc EP since there's no link TRB */
  931 	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
  932 		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
  933 		if (trbs_left > max)
  934 			trbs_left = max;
  935 	}
  936 
  937 	/*
  938 	 * If busy & slot are equal than it is either full or empty. If we are
  939 	 * starting to process requests then we are empty. Otherwise we are
  940 	 * full and don't do anything
  941 	 */
  942 	if (!trbs_left) {
  943 		if (!starting)
  944 			return;
  945 		trbs_left = DWC3_TRB_NUM;
  946 		/*
  947 		 * In case we start from scratch, we queue the ISOC requests
  948 		 * starting from slot 1. This is done because we use ring
  949 		 * buffer and have no LST bit to stop us. Instead, we place
  950 		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
  951 		 * after the first request so we start at slot 1 and have
  952 		 * 7 requests proceed before we hit the first IOC.
  953 		 * Other transfer types don't use the ring buffer and are
  954 		 * processed from the first TRB until the last one. Since we
  955 		 * don't wrap around we have to start at the beginning.
  956 		 */
  957 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
  958 			dep->busy_slot = 1;
  959 			dep->free_slot = 1;
  960 		} else {
  961 			dep->busy_slot = 0;
  962 			dep->free_slot = 0;
  963 		}
  964 	}
  965 
  966 	/* The last TRB is a link TRB, not used for xfer */
  967 	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
  968 		return;
  969 
  970 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
  971 		unsigned	length;
  972 		dma_addr_t	dma;
  973 		last_one = false;
  974 
  975 		if (req->request.num_mapped_sgs > 0) {
  976 			struct usb_request *request = &req->request;
  977 			struct scatterlist *sg = request->sg;
  978 			struct scatterlist *s;
  979 			int		i;
  980 
  981 			for_each_sg(sg, s, request->num_mapped_sgs, i) {
  982 				unsigned chain = true;
  983 
  984 				length = sg_dma_len(s);
  985 				dma = sg_dma_address(s);
  986 
  987 				if (i == (request->num_mapped_sgs - 1) ||
  988 						sg_is_last(s)) {
  989 					if (list_is_last(&req->list,
  990 							&dep->request_list))
  991 						last_one = true;
  992 					chain = false;
  993 				}
  994 
  995 				trbs_left--;
  996 				if (!trbs_left)
  997 					last_one = true;
  998 
  999 				if (last_one)
 1000 					chain = false;
 1001 
 1002 				dwc3_prepare_one_trb(dep, req, dma, length,
 1003 						last_one, chain, i);
 1004 
 1005 				if (last_one)
 1006 					break;
 1007 			}
 1008 		} else {
 1009 			dma = req->request.dma;
 1010 			length = req->request.length;
 1011 			trbs_left--;
 1012 
 1013 			if (!trbs_left)
 1014 				last_one = 1;
 1015 
 1016 			/* Is this the last request? */
 1017 			if (list_is_last(&req->list, &dep->request_list))
 1018 				last_one = 1;
 1019 
 1020 			dwc3_prepare_one_trb(dep, req, dma, length,
 1021 					last_one, false, 0);
 1022 
 1023 			if (last_one)
 1024 				break;
 1025 		}
 1026 	}
 1027 }
 1028 
 1029 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
 1030 		int start_new)
 1031 {
 1032 	struct dwc3_gadget_ep_cmd_params params;
 1033 	struct dwc3_request		*req;
 1034 	struct dwc3			*dwc = dep->dwc;
 1035 	int				ret;
 1036 	u32				cmd;
 1037 
 1038 	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
 1039 		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
 1040 		return -EBUSY;
 1041 	}
 1042 	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
 1043 
 1044 	/*
 1045 	 * If we are getting here after a short-out-packet we don't enqueue any
 1046 	 * new requests as we try to set the IOC bit only on the last request.
 1047 	 */
 1048 	if (start_new) {
 1049 		if (list_empty(&dep->req_queued))
 1050 			dwc3_prepare_trbs(dep, start_new);
 1051 
 1052 		/* req points to the first request which will be sent */
 1053 		req = next_request(&dep->req_queued);
 1054 	} else {
 1055 		dwc3_prepare_trbs(dep, start_new);
 1056 
 1057 		/*
 1058 		 * req points to the first request where HWO changed from 0 to 1
 1059 		 */
 1060 		req = next_request(&dep->req_queued);
 1061 	}
 1062 	if (!req) {
 1063 		dep->flags |= DWC3_EP_PENDING_REQUEST;
 1064 		return 0;
 1065 	}
 1066 
 1067 	memset(¶ms, 0, sizeof(params));
 1068 
 1069 	if (start_new) {
 1070 		params.param0 = upper_32_bits(req->trb_dma);
 1071 		params.param1 = lower_32_bits(req->trb_dma);
 1072 		cmd = DWC3_DEPCMD_STARTTRANSFER;
 1073 	} else {
 1074 		cmd = DWC3_DEPCMD_UPDATETRANSFER;
 1075 	}
 1076 
 1077 	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
 1078 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
 1079 	if (ret < 0) {
 1080 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
 1081 
 1082 		/*
 1083 		 * FIXME we need to iterate over the list of requests
 1084 		 * here and stop, unmap, free and del each of the linked
 1085 		 * requests instead of what we do now.
 1086 		 */
 1087 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
 1088 				req->direction);
 1089 		list_del(&req->list);
 1090 		return ret;
 1091 	}
 1092 
 1093 	dep->flags |= DWC3_EP_BUSY;
 1094 
 1095 	if (start_new) {
 1096 		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
 1097 				dep->number);
 1098 		WARN_ON_ONCE(!dep->resource_index);
 1099 	}
 1100 
 1101 	return 0;
 1102 }
 1103 
 1104 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
 1105 		struct dwc3_ep *dep, u32 cur_uf)
 1106 {
 1107 	u32 uf;
 1108 
 1109 	if (list_empty(&dep->request_list)) {
 1110 		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
 1111 			dep->name);
 1112 		dep->flags |= DWC3_EP_PENDING_REQUEST;
 1113 		return;
 1114 	}
 1115 
 1116 	/* 4 micro frames in the future */
 1117 	uf = cur_uf + dep->interval * 4;
 1118 
 1119 	__dwc3_gadget_kick_transfer(dep, uf, 1);
 1120 }
 1121 
 1122 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
 1123 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
 1124 {
 1125 	u32 cur_uf, mask;
 1126 
 1127 	mask = ~(dep->interval - 1);
 1128 	cur_uf = event->parameters & mask;
 1129 
 1130 	__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
 1131 }
 1132 
 1133 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 1134 {
 1135 	struct dwc3		*dwc = dep->dwc;
 1136 	int			ret;
 1137 
 1138 	req->request.actual	= 0;
 1139 	req->request.status	= -EINPROGRESS;
 1140 	req->direction		= dep->direction;
 1141 	req->epnum		= dep->number;
 1142 
 1143 	/*
 1144 	 * We only add to our list of requests now and
 1145 	 * start consuming the list once we get XferNotReady
 1146 	 * IRQ.
 1147 	 *
 1148 	 * That way, we avoid doing anything that we don't need
 1149 	 * to do now and defer it until the point we receive a
 1150 	 * particular token from the Host side.
 1151 	 *
 1152 	 * This will also avoid Host cancelling URBs due to too
 1153 	 * many NAKs.
 1154 	 */
 1155 	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
 1156 			dep->direction);
 1157 	if (ret)
 1158 		return ret;
 1159 
 1160 	list_add_tail(&req->list, &dep->request_list);
 1161 
 1162 	/*
 1163 	 * There are a few special cases:
 1164 	 *
 1165 	 * 1. XferNotReady with empty list of requests. We need to kick the
 1166 	 *    transfer here in that situation, otherwise we will be NAKing
 1167 	 *    forever. If we get XferNotReady before gadget driver has a
 1168 	 *    chance to queue a request, we will ACK the IRQ but won't be
 1169 	 *    able to receive the data until the next request is queued.
 1170 	 *    The following code is handling exactly that.
 1171 	 *
 1172 	 */
 1173 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
 1174 		/*
 1175 		 * If xfernotready is already elapsed and it is a case
 1176 		 * of isoc transfer, then issue END TRANSFER, so that
 1177 		 * you can receive xfernotready again and can have
 1178 		 * notion of current microframe.
 1179 		 */
 1180 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 1181 			if (list_empty(&dep->req_queued)) {
 1182 				dwc3_stop_active_transfer(dwc, dep->number, true);
 1183 				dep->flags = DWC3_EP_ENABLED;
 1184 			}
 1185 			return 0;
 1186 		}
 1187 
 1188 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
 1189 		if (ret && ret != -EBUSY)
 1190 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
 1191 					dep->name);
 1192 		return ret;
 1193 	}
 1194 
 1195 	/*
 1196 	 * 2. XferInProgress on Isoc EP with an active transfer. We need to
 1197 	 *    kick the transfer here after queuing a request, otherwise the
 1198 	 *    core may not see the modified TRB(s).
 1199 	 */
 1200 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
 1201 			(dep->flags & DWC3_EP_BUSY) &&
 1202 			!(dep->flags & DWC3_EP_MISSED_ISOC)) {
 1203 		WARN_ON_ONCE(!dep->resource_index);
 1204 		ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
 1205 				false);
 1206 		if (ret && ret != -EBUSY)
 1207 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
 1208 					dep->name);
 1209 		return ret;
 1210 	}
 1211 
 1212 	/*
 1213 	 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
 1214 	 * right away, otherwise host will not know we have streams to be
 1215 	 * handled.
 1216 	 */
 1217 	if (dep->stream_capable) {
 1218 		int	ret;
 1219 
 1220 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
 1221 		if (ret && ret != -EBUSY) {
 1222 			struct dwc3	*dwc = dep->dwc;
 1223 
 1224 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
 1225 					dep->name);
 1226 		}
 1227 	}
 1228 
 1229 	return 0;
 1230 }
 1231 
 1232 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
 1233 	gfp_t gfp_flags)
 1234 {
 1235 	struct dwc3_request		*req = to_dwc3_request(request);
 1236 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
 1237 	struct dwc3			*dwc = dep->dwc;
 1238 
 1239 	unsigned long			flags;
 1240 
 1241 	int				ret;
 1242 
 1243 	if (!dep->endpoint.desc) {
 1244 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
 1245 				request, ep->name);
 1246 		return -ESHUTDOWN;
 1247 	}
 1248 
 1249 	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
 1250 			request, ep->name, request->length);
 1251 
 1252 	spin_lock_irqsave(&dwc->lock, flags);
 1253 	ret = __dwc3_gadget_ep_queue(dep, req);
 1254 	spin_unlock_irqrestore(&dwc->lock, flags);
 1255 
 1256 	return ret;
 1257 }
 1258 
 1259 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
 1260 		struct usb_request *request)
 1261 {
 1262 	struct dwc3_request		*req = to_dwc3_request(request);
 1263 	struct dwc3_request		*r = NULL;
 1264 
 1265 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
 1266 	struct dwc3			*dwc = dep->dwc;
 1267 
 1268 	unsigned long			flags;
 1269 	int				ret = 0;
 1270 
 1271 	spin_lock_irqsave(&dwc->lock, flags);
 1272 
 1273 	list_for_each_entry(r, &dep->request_list, list) {
 1274 		if (r == req)
 1275 			break;
 1276 	}
 1277 
 1278 	if (r != req) {
 1279 		list_for_each_entry(r, &dep->req_queued, list) {
 1280 			if (r == req)
 1281 				break;
 1282 		}
 1283 		if (r == req) {
 1284 			/* wait until it is processed */
 1285 			dwc3_stop_active_transfer(dwc, dep->number, true);
 1286 			goto out1;
 1287 		}
 1288 		dev_err(dwc->dev, "request %p was not queued to %s\n",
 1289 				request, ep->name);
 1290 		ret = -EINVAL;
 1291 		goto out0;
 1292 	}
 1293 
 1294 out1:
 1295 	/* giveback the request */
 1296 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
 1297 
 1298 out0:
 1299 	spin_unlock_irqrestore(&dwc->lock, flags);
 1300 
 1301 	return ret;
 1302 }
 1303 
 1304 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
 1305 {
 1306 	struct dwc3_gadget_ep_cmd_params	params;
 1307 	struct dwc3				*dwc = dep->dwc;
 1308 	int					ret;
 1309 
 1310 	memset(¶ms, 0x00, sizeof(params));
 1311 
 1312 	if (value) {
 1313 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
 1314 			DWC3_DEPCMD_SETSTALL, ¶ms);
 1315 		if (ret)
 1316 			dev_err(dwc->dev, "failed to set STALL on %s\n",
 1317 					dep->name);
 1318 		else
 1319 			dep->flags |= DWC3_EP_STALL;
 1320 	} else {
 1321 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
 1322 			DWC3_DEPCMD_CLEARSTALL, ¶ms);
 1323 		if (ret)
 1324 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
 1325 					dep->name);
 1326 		else
 1327 			dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
 1328 	}
 1329 
 1330 	return ret;
 1331 }
 1332 
 1333 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
 1334 {
 1335 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
 1336 	struct dwc3			*dwc = dep->dwc;
 1337 
 1338 	unsigned long			flags;
 1339 
 1340 	int				ret;
 1341 
 1342 	spin_lock_irqsave(&dwc->lock, flags);
 1343 
 1344 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 1345 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
 1346 		ret = -EINVAL;
 1347 		goto out;
 1348 	}
 1349 
 1350 	ret = __dwc3_gadget_ep_set_halt(dep, value);
 1351 out:
 1352 	spin_unlock_irqrestore(&dwc->lock, flags);
 1353 
 1354 	return ret;
 1355 }
 1356 
 1357 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
 1358 {
 1359 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
 1360 	struct dwc3			*dwc = dep->dwc;
 1361 	unsigned long			flags;
 1362 
 1363 	spin_lock_irqsave(&dwc->lock, flags);
 1364 	dep->flags |= DWC3_EP_WEDGE;
 1365 	spin_unlock_irqrestore(&dwc->lock, flags);
 1366 
 1367 	if (dep->number == 0 || dep->number == 1)
 1368 		return dwc3_gadget_ep0_set_halt(ep, 1);
 1369 	else
 1370 		return dwc3_gadget_ep_set_halt(ep, 1);
 1371 }
 1372 
 1373 /* -------------------------------------------------------------------------- */
 1374 
 1375 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
 1376 	.bLength	= USB_DT_ENDPOINT_SIZE,
 1377 	.bDescriptorType = USB_DT_ENDPOINT,
 1378 	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
 1379 };
 1380 
 1381 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
 1382 	.enable		= dwc3_gadget_ep0_enable,
 1383 	.disable	= dwc3_gadget_ep0_disable,
 1384 	.alloc_request	= dwc3_gadget_ep_alloc_request,
 1385 	.free_request	= dwc3_gadget_ep_free_request,
 1386 	.queue		= dwc3_gadget_ep0_queue,
 1387 	.dequeue	= dwc3_gadget_ep_dequeue,
 1388 	.set_halt	= dwc3_gadget_ep0_set_halt,
 1389 	.set_wedge	= dwc3_gadget_ep_set_wedge,
 1390 };
 1391 
 1392 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
 1393 	.enable		= dwc3_gadget_ep_enable,
 1394 	.disable	= dwc3_gadget_ep_disable,
 1395 	.alloc_request	= dwc3_gadget_ep_alloc_request,
 1396 	.free_request	= dwc3_gadget_ep_free_request,
 1397 	.queue		= dwc3_gadget_ep_queue,
 1398 	.dequeue	= dwc3_gadget_ep_dequeue,
 1399 	.set_halt	= dwc3_gadget_ep_set_halt,
 1400 	.set_wedge	= dwc3_gadget_ep_set_wedge,
 1401 };
 1402 
 1403 /* -------------------------------------------------------------------------- */
 1404 
 1405 static int dwc3_gadget_get_frame(struct usb_gadget *g)
 1406 {
 1407 	struct dwc3		*dwc = gadget_to_dwc(g);
 1408 	u32			reg;
 1409 
 1410 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 1411 	return DWC3_DSTS_SOFFN(reg);
 1412 }
 1413 
 1414 static int dwc3_gadget_wakeup(struct usb_gadget *g)
 1415 {
 1416 	struct dwc3		*dwc = gadget_to_dwc(g);
 1417 
 1418 	unsigned long		timeout;
 1419 	unsigned long		flags;
 1420 
 1421 	u32			reg;
 1422 
 1423 	int			ret = 0;
 1424 
 1425 	u8			link_state;
 1426 	u8			speed;
 1427 
 1428 	spin_lock_irqsave(&dwc->lock, flags);
 1429 
 1430 	/*
 1431 	 * According to the Databook Remote wakeup request should
 1432 	 * be issued only when the device is in early suspend state.
 1433 	 *
 1434 	 * We can check that via USB Link State bits in DSTS register.
 1435 	 */
 1436 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 1437 
 1438 	speed = reg & DWC3_DSTS_CONNECTSPD;
 1439 	if (speed == DWC3_DSTS_SUPERSPEED) {
 1440 		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
 1441 		ret = -EINVAL;
 1442 		goto out;
 1443 	}
 1444 
 1445 	link_state = DWC3_DSTS_USBLNKST(reg);
 1446 
 1447 	switch (link_state) {
 1448 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
 1449 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
 1450 		break;
 1451 	default:
 1452 		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
 1453 				link_state);
 1454 		ret = -EINVAL;
 1455 		goto out;
 1456 	}
 1457 
 1458 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
 1459 	if (ret < 0) {
 1460 		dev_err(dwc->dev, "failed to put link in Recovery\n");
 1461 		goto out;
 1462 	}
 1463 
 1464 	/* Recent versions do this automatically */
 1465 	if (dwc->revision < DWC3_REVISION_194A) {
 1466 		/* write zeroes to Link Change Request */
 1467 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 1468 		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
 1469 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 1470 	}
 1471 
 1472 	/* poll until Link State changes to ON */
 1473 	timeout = jiffies + msecs_to_jiffies(100);
 1474 
 1475 	while (!time_after(jiffies, timeout)) {
 1476 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 1477 
 1478 		/* in HS, means ON */
 1479 		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
 1480 			break;
 1481 	}
 1482 
 1483 	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
 1484 		dev_err(dwc->dev, "failed to send remote wakeup\n");
 1485 		ret = -EINVAL;
 1486 	}
 1487 
 1488 out:
 1489 	spin_unlock_irqrestore(&dwc->lock, flags);
 1490 
 1491 	return ret;
 1492 }
 1493 
 1494 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
 1495 		int is_selfpowered)
 1496 {
 1497 	struct dwc3		*dwc = gadget_to_dwc(g);
 1498 	unsigned long		flags;
 1499 
 1500 	spin_lock_irqsave(&dwc->lock, flags);
 1501 	dwc->is_selfpowered = !!is_selfpowered;
 1502 	spin_unlock_irqrestore(&dwc->lock, flags);
 1503 
 1504 	return 0;
 1505 }
 1506 
 1507 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 1508 {
 1509 	u32			reg;
 1510 	u32			timeout = 500;
 1511 
 1512 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 1513 	if (is_on) {
 1514 		if (dwc->revision <= DWC3_REVISION_187A) {
 1515 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
 1516 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
 1517 		}
 1518 
 1519 		if (dwc->revision >= DWC3_REVISION_194A)
 1520 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
 1521 		reg |= DWC3_DCTL_RUN_STOP;
 1522 
 1523 		if (dwc->has_hibernation)
 1524 			reg |= DWC3_DCTL_KEEP_CONNECT;
 1525 
 1526 		dwc->pullups_connected = true;
 1527 	} else {
 1528 		reg &= ~DWC3_DCTL_RUN_STOP;
 1529 
 1530 		if (dwc->has_hibernation && !suspend)
 1531 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
 1532 
 1533 		dwc->pullups_connected = false;
 1534 	}
 1535 
 1536 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 1537 
 1538 	do {
 1539 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 1540 		if (is_on) {
 1541 			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
 1542 				break;
 1543 		} else {
 1544 			if (reg & DWC3_DSTS_DEVCTRLHLT)
 1545 				break;
 1546 		}
 1547 		timeout--;
 1548 		if (!timeout)
 1549 			return -ETIMEDOUT;
 1550 		udelay(1);
 1551 	} while (1);
 1552 
 1553 	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
 1554 			dwc->gadget_driver
 1555 			? dwc->gadget_driver->function : "no-function",
 1556 			is_on ? "connect" : "disconnect");
 1557 
 1558 	return 0;
 1559 }
 1560 
 1561 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 1562 {
 1563 	struct dwc3		*dwc = gadget_to_dwc(g);
 1564 	unsigned long		flags;
 1565 	int			ret;
 1566 
 1567 	is_on = !!is_on;
 1568 
 1569 	spin_lock_irqsave(&dwc->lock, flags);
 1570 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
 1571 	spin_unlock_irqrestore(&dwc->lock, flags);
 1572 
 1573 	return ret;
 1574 }
 1575 
 1576 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
 1577 {
 1578 	u32			reg;
 1579 
 1580 	/* Enable all but Start and End of Frame IRQs */
 1581 	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
 1582 			DWC3_DEVTEN_EVNTOVERFLOWEN |
 1583 			DWC3_DEVTEN_CMDCMPLTEN |
 1584 			DWC3_DEVTEN_ERRTICERREN |
 1585 			DWC3_DEVTEN_WKUPEVTEN |
 1586 			DWC3_DEVTEN_ULSTCNGEN |
 1587 			DWC3_DEVTEN_CONNECTDONEEN |
 1588 			DWC3_DEVTEN_USBRSTEN |
 1589 			DWC3_DEVTEN_DISCONNEVTEN);
 1590 
 1591 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
 1592 }
 1593 
 1594 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
 1595 {
 1596 	/* mask all interrupts */
 1597 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
 1598 }
 1599 
 1600 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 1601 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
 1602 
 1603 static int dwc3_gadget_start(struct usb_gadget *g,
 1604 		struct usb_gadget_driver *driver)
 1605 {
 1606 	struct dwc3		*dwc = gadget_to_dwc(g);
 1607 	struct dwc3_ep		*dep;
 1608 	unsigned long		flags;
 1609 	int			ret = 0;
 1610 	int			irq;
 1611 	u32			reg;
 1612 
 1613 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
 1614 	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
 1615 			IRQF_SHARED, "dwc3", dwc);
 1616 	if (ret) {
 1617 		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
 1618 				irq, ret);
 1619 		goto err0;
 1620 	}
 1621 
 1622 	spin_lock_irqsave(&dwc->lock, flags);
 1623 
 1624 	if (dwc->gadget_driver) {
 1625 		dev_err(dwc->dev, "%s is already bound to %s\n",
 1626 				dwc->gadget.name,
 1627 				dwc->gadget_driver->driver.name);
 1628 		ret = -EBUSY;
 1629 		goto err1;
 1630 	}
 1631 
 1632 	dwc->gadget_driver	= driver;
 1633 
 1634 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 1635 	reg &= ~(DWC3_DCFG_SPEED_MASK);
 1636 
 1637 	/**
 1638 	 * WORKAROUND: DWC3 revision < 2.20a have an issue
 1639 	 * which would cause metastability state on Run/Stop
 1640 	 * bit if we try to force the IP to USB2-only mode.
 1641 	 *
 1642 	 * Because of that, we cannot configure the IP to any
 1643 	 * speed other than the SuperSpeed
 1644 	 *
 1645 	 * Refers to:
 1646 	 *
 1647 	 * STAR#9000525659: Clock Domain Crossing on DCTL in
 1648 	 * USB 2.0 Mode
 1649 	 */
 1650 	if (dwc->revision < DWC3_REVISION_220A) {
 1651 		reg |= DWC3_DCFG_SUPERSPEED;
 1652 	} else {
 1653 		switch (dwc->maximum_speed) {
 1654 		case USB_SPEED_LOW:
 1655 			reg |= DWC3_DSTS_LOWSPEED;
 1656 			break;
 1657 		case USB_SPEED_FULL:
 1658 			reg |= DWC3_DSTS_FULLSPEED1;
 1659 			break;
 1660 		case USB_SPEED_HIGH:
 1661 			reg |= DWC3_DSTS_HIGHSPEED;
 1662 			break;
 1663 		case USB_SPEED_SUPER:	/* FALLTHROUGH */
 1664 		case USB_SPEED_UNKNOWN:	/* FALTHROUGH */
 1665 		default:
 1666 			reg |= DWC3_DSTS_SUPERSPEED;
 1667 		}
 1668 	}
 1669 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 1670 
 1671 	dwc->start_config_issued = false;
 1672 
 1673 	/* Start with SuperSpeed Default */
 1674 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 1675 
 1676 	dep = dwc->eps[0];
 1677 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
 1678 			false);
 1679 	if (ret) {
 1680 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 1681 		goto err2;
 1682 	}
 1683 
 1684 	dep = dwc->eps[1];
 1685 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
 1686 			false);
 1687 	if (ret) {
 1688 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 1689 		goto err3;
 1690 	}
 1691 
 1692 	/* begin to receive SETUP packets */
 1693 	dwc->ep0state = EP0_SETUP_PHASE;
 1694 	dwc3_ep0_out_start(dwc);
 1695 
 1696 	dwc3_gadget_enable_irq(dwc);
 1697 
 1698 	spin_unlock_irqrestore(&dwc->lock, flags);
 1699 
 1700 	return 0;
 1701 
 1702 err3:
 1703 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 1704 
 1705 err2:
 1706 	dwc->gadget_driver = NULL;
 1707 
 1708 err1:
 1709 	spin_unlock_irqrestore(&dwc->lock, flags);
 1710 
 1711 	free_irq(irq, dwc);
 1712 
 1713 err0:
 1714 	return ret;
 1715 }
 1716 
 1717 static int dwc3_gadget_stop(struct usb_gadget *g,
 1718 		struct usb_gadget_driver *driver)
 1719 {
 1720 	struct dwc3		*dwc = gadget_to_dwc(g);
 1721 	unsigned long		flags;
 1722 	int			irq;
 1723 
 1724 	spin_lock_irqsave(&dwc->lock, flags);
 1725 
 1726 	dwc3_gadget_disable_irq(dwc);
 1727 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 1728 	__dwc3_gadget_ep_disable(dwc->eps[1]);
 1729 
 1730 	dwc->gadget_driver	= NULL;
 1731 
 1732 	spin_unlock_irqrestore(&dwc->lock, flags);
 1733 
 1734 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
 1735 	free_irq(irq, dwc);
 1736 
 1737 	return 0;
 1738 }
 1739 
 1740 static const struct usb_gadget_ops dwc3_gadget_ops = {
 1741 	.get_frame		= dwc3_gadget_get_frame,
 1742 	.wakeup			= dwc3_gadget_wakeup,
 1743 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
 1744 	.pullup			= dwc3_gadget_pullup,
 1745 	.udc_start		= dwc3_gadget_start,
 1746 	.udc_stop		= dwc3_gadget_stop,
 1747 };
 1748 
 1749 /* -------------------------------------------------------------------------- */
 1750 
 1751 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
 1752 		u8 num, u32 direction)
 1753 {
 1754 	struct dwc3_ep			*dep;
 1755 	u8				i;
 1756 
 1757 	for (i = 0; i < num; i++) {
 1758 		u8 epnum = (i << 1) | (!!direction);
 1759 
 1760 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
 1761 		if (!dep) {
 1762 			dev_err(dwc->dev, "can't allocate endpoint %d\n",
 1763 					epnum);
 1764 			return -ENOMEM;
 1765 		}
 1766 
 1767 		dep->dwc = dwc;
 1768 		dep->number = epnum;
 1769 		dep->direction = !!direction;
 1770 		dwc->eps[epnum] = dep;
 1771 
 1772 		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
 1773 				(epnum & 1) ? "in" : "out");
 1774 
 1775 		dep->endpoint.name = dep->name;
 1776 
 1777 		dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
 1778 
 1779 		if (epnum == 0 || epnum == 1) {
 1780 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
 1781 			dep->endpoint.maxburst = 1;
 1782 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
 1783 			if (!epnum)
 1784 				dwc->gadget.ep0 = &dep->endpoint;
 1785 		} else {
 1786 			int		ret;
 1787 
 1788 			usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
 1789 			dep->endpoint.max_streams = 15;
 1790 			dep->endpoint.ops = &dwc3_gadget_ep_ops;
 1791 			list_add_tail(&dep->endpoint.ep_list,
 1792 					&dwc->gadget.ep_list);
 1793 
 1794 			ret = dwc3_alloc_trb_pool(dep);
 1795 			if (ret)
 1796 				return ret;
 1797 		}
 1798 
 1799 		INIT_LIST_HEAD(&dep->request_list);
 1800 		INIT_LIST_HEAD(&dep->req_queued);
 1801 	}
 1802 
 1803 	return 0;
 1804 }
 1805 
 1806 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
 1807 {
 1808 	int				ret;
 1809 
 1810 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
 1811 
 1812 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
 1813 	if (ret < 0) {
 1814 		dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
 1815 		return ret;
 1816 	}
 1817 
 1818 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
 1819 	if (ret < 0) {
 1820 		dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
 1821 		return ret;
 1822 	}
 1823 
 1824 	return 0;
 1825 }
 1826 
 1827 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
 1828 {
 1829 	struct dwc3_ep			*dep;
 1830 	u8				epnum;
 1831 
 1832 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
 1833 		dep = dwc->eps[epnum];
 1834 		if (!dep)
 1835 			continue;
 1836 		/*
 1837 		 * Physical endpoints 0 and 1 are special; they form the
 1838 		 * bi-directional USB endpoint 0.
 1839 		 *
 1840 		 * For those two physical endpoints, we don't allocate a TRB
 1841 		 * pool nor do we add them the endpoints list. Due to that, we
 1842 		 * shouldn't do these two operations otherwise we would end up
 1843 		 * with all sorts of bugs when removing dwc3.ko.
 1844 		 */
 1845 		if (epnum != 0 && epnum != 1) {
 1846 			dwc3_free_trb_pool(dep);
 1847 			list_del(&dep->endpoint.ep_list);
 1848 		}
 1849 
 1850 		kfree(dep);
 1851 	}
 1852 }
 1853 
 1854 /* -------------------------------------------------------------------------- */
 1855 
 1856 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
 1857 		struct dwc3_request *req, struct dwc3_trb *trb,
 1858 		const struct dwc3_event_depevt *event, int status)
 1859 {
 1860 	unsigned int		count;
 1861 	unsigned int		s_pkt = 0;
 1862 	unsigned int		trb_status;
 1863 
 1864 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
 1865 		/*
 1866 		 * We continue despite the error. There is not much we
 1867 		 * can do. If we don't clean it up we loop forever. If
 1868 		 * we skip the TRB then it gets overwritten after a
 1869 		 * while since we use them in a ring buffer. A BUG()
 1870 		 * would help. Lets hope that if this occurs, someone
 1871 		 * fixes the root cause instead of looking away :)
 1872 		 */
 1873 		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
 1874 				dep->name, trb);
 1875 	count = trb->size & DWC3_TRB_SIZE_MASK;
 1876 
 1877 	if (dep->direction) {
 1878 		if (count) {
 1879 			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
 1880 			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
 1881 				dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
 1882 						dep->name);
 1883 				/*
 1884 				 * If missed isoc occurred and there is
 1885 				 * no request queued then issue END
 1886 				 * TRANSFER, so that core generates
 1887 				 * next xfernotready and we will issue
 1888 				 * a fresh START TRANSFER.
 1889 				 * If there are still queued request
 1890 				 * then wait, do not issue either END
 1891 				 * or UPDATE TRANSFER, just attach next
 1892 				 * request in request_list during
 1893 				 * giveback.If any future queued request
 1894 				 * is successfully transferred then we
 1895 				 * will issue UPDATE TRANSFER for all
 1896 				 * request in the request_list.
 1897 				 */
 1898 				dep->flags |= DWC3_EP_MISSED_ISOC;
 1899 			} else {
 1900 				dev_err(dwc->dev, "incomplete IN transfer %s\n",
 1901 						dep->name);
 1902 				status = -ECONNRESET;
 1903 			}
 1904 		} else {
 1905 			dep->flags &= ~DWC3_EP_MISSED_ISOC;
 1906 		}
 1907 	} else {
 1908 		if (count && (event->status & DEPEVT_STATUS_SHORT))
 1909 			s_pkt = 1;
 1910 	}
 1911 
 1912 	/*
 1913 	 * We assume here we will always receive the entire data block
 1914 	 * which we should receive. Meaning, if we program RX to
 1915 	 * receive 4K but we receive only 2K, we assume that's all we
 1916 	 * should receive and we simply bounce the request back to the
 1917 	 * gadget driver for further processing.
 1918 	 */
 1919 	req->request.actual += req->request.length - count;
 1920 	if (s_pkt)
 1921 		return 1;
 1922 	if ((event->status & DEPEVT_STATUS_LST) &&
 1923 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
 1924 				DWC3_TRB_CTRL_HWO)))
 1925 		return 1;
 1926 	if ((event->status & DEPEVT_STATUS_IOC) &&
 1927 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
 1928 		return 1;
 1929 	return 0;
 1930 }
 1931 
 1932 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
 1933 		const struct dwc3_event_depevt *event, int status)
 1934 {
 1935 	struct dwc3_request	*req;
 1936 	struct dwc3_trb		*trb;
 1937 	unsigned int		slot;
 1938 	unsigned int		i;
 1939 	int			ret;
 1940 
 1941 	do {
 1942 		req = next_request(&dep->req_queued);
 1943 		if (!req) {
 1944 			WARN_ON_ONCE(1);
 1945 			return 1;
 1946 		}
 1947 		i = 0;
 1948 		do {
 1949 			slot = req->start_slot + i;
 1950 			if ((slot == DWC3_TRB_NUM - 1) &&
 1951 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
 1952 				slot++;
 1953 			slot %= DWC3_TRB_NUM;
 1954 			trb = &dep->trb_pool[slot];
 1955 
 1956 			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
 1957 					event, status);
 1958 			if (ret)
 1959 				break;
 1960 		}while (++i < req->request.num_mapped_sgs);
 1961 
 1962 		dwc3_gadget_giveback(dep, req, status);
 1963 
 1964 		if (ret)
 1965 			break;
 1966 	} while (1);
 1967 
 1968 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
 1969 			list_empty(&dep->req_queued)) {
 1970 		if (list_empty(&dep->request_list)) {
 1971 			/*
 1972 			 * If there is no entry in request list then do
 1973 			 * not issue END TRANSFER now. Just set PENDING
 1974 			 * flag, so that END TRANSFER is issued when an
 1975 			 * entry is added into request list.
 1976 			 */
 1977 			dep->flags = DWC3_EP_PENDING_REQUEST;
 1978 		} else {
 1979 			dwc3_stop_active_transfer(dwc, dep->number, true);
 1980 			dep->flags = DWC3_EP_ENABLED;
 1981 		}
 1982 		return 1;
 1983 	}
 1984 
 1985 	return 1;
 1986 }
 1987 
 1988 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
 1989 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
 1990 		int start_new)
 1991 {
 1992 	unsigned		status = 0;
 1993 	int			clean_busy;
 1994 
 1995 	if (event->status & DEPEVT_STATUS_BUSERR)
 1996 		status = -ECONNRESET;
 1997 
 1998 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
 1999 	if (clean_busy)
 2000 		dep->flags &= ~DWC3_EP_BUSY;
 2001 
 2002 	/*
 2003 	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
 2004 	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
 2005 	 */
 2006 	if (dwc->revision < DWC3_REVISION_183A) {
 2007 		u32		reg;
 2008 		int		i;
 2009 
 2010 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
 2011 			dep = dwc->eps[i];
 2012 
 2013 			if (!(dep->flags & DWC3_EP_ENABLED))
 2014 				continue;
 2015 
 2016 			if (!list_empty(&dep->req_queued))
 2017 				return;
 2018 		}
 2019 
 2020 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 2021 		reg |= dwc->u1u2;
 2022 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2023 
 2024 		dwc->u1u2 = 0;
 2025 	}
 2026 }
 2027 
 2028 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 2029 		const struct dwc3_event_depevt *event)
 2030 {
 2031 	struct dwc3_ep		*dep;
 2032 	u8			epnum = event->endpoint_number;
 2033 
 2034 	dep = dwc->eps[epnum];
 2035 
 2036 	if (!(dep->flags & DWC3_EP_ENABLED))
 2037 		return;
 2038 
 2039 	dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
 2040 			dwc3_ep_event_string(event->endpoint_event));
 2041 
 2042 	if (epnum == 0 || epnum == 1) {
 2043 		dwc3_ep0_interrupt(dwc, event);
 2044 		return;
 2045 	}
 2046 
 2047 	switch (event->endpoint_event) {
 2048 	case DWC3_DEPEVT_XFERCOMPLETE:
 2049 		dep->resource_index = 0;
 2050 
 2051 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 2052 			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
 2053 					dep->name);
 2054 			return;
 2055 		}
 2056 
 2057 		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
 2058 		break;
 2059 	case DWC3_DEPEVT_XFERINPROGRESS:
 2060 		if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 2061 			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
 2062 					dep->name);
 2063 			return;
 2064 		}
 2065 
 2066 		dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
 2067 		break;
 2068 	case DWC3_DEPEVT_XFERNOTREADY:
 2069 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 2070 			dwc3_gadget_start_isoc(dwc, dep, event);
 2071 		} else {
 2072 			int ret;
 2073 
 2074 			dev_vdbg(dwc->dev, "%s: reason %s\n",
 2075 					dep->name, event->status &
 2076 					DEPEVT_STATUS_TRANSFER_ACTIVE
 2077 					? "Transfer Active"
 2078 					: "Transfer Not Active");
 2079 
 2080 			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
 2081 			if (!ret || ret == -EBUSY)
 2082 				return;
 2083 
 2084 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
 2085 					dep->name);
 2086 		}
 2087 
 2088 		break;
 2089 	case DWC3_DEPEVT_STREAMEVT:
 2090 		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
 2091 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
 2092 					dep->name);
 2093 			return;
 2094 		}
 2095 
 2096 		switch (event->status) {
 2097 		case DEPEVT_STREAMEVT_FOUND:
 2098 			dev_vdbg(dwc->dev, "Stream %d found and started\n",
 2099 					event->parameters);
 2100 
 2101 			break;
 2102 		case DEPEVT_STREAMEVT_NOTFOUND:
 2103 			/* FALLTHROUGH */
 2104 		default:
 2105 			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
 2106 		}
 2107 		break;
 2108 	case DWC3_DEPEVT_RXTXFIFOEVT:
 2109 		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
 2110 		break;
 2111 	case DWC3_DEPEVT_EPCMDCMPLT:
 2112 		dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
 2113 		break;
 2114 	}
 2115 }
 2116 
 2117 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
 2118 {
 2119 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
 2120 		spin_unlock(&dwc->lock);
 2121 		dwc->gadget_driver->disconnect(&dwc->gadget);
 2122 		spin_lock(&dwc->lock);
 2123 	}
 2124 }
 2125 
 2126 static void dwc3_suspend_gadget(struct dwc3 *dwc)
 2127 {
 2128 	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
 2129 		spin_unlock(&dwc->lock);
 2130 		dwc->gadget_driver->suspend(&dwc->gadget);
 2131 		spin_lock(&dwc->lock);
 2132 	}
 2133 }
 2134 
 2135 static void dwc3_resume_gadget(struct dwc3 *dwc)
 2136 {
 2137 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
 2138 		spin_unlock(&dwc->lock);
 2139 		dwc->gadget_driver->resume(&dwc->gadget);
 2140 		spin_lock(&dwc->lock);
 2141 	}
 2142 }
 2143 
 2144 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
 2145 {
 2146 	struct dwc3_ep *dep;
 2147 	struct dwc3_gadget_ep_cmd_params params;
 2148 	u32 cmd;
 2149 	int ret;
 2150 
 2151 	dep = dwc->eps[epnum];
 2152 
 2153 	if (!dep->resource_index)
 2154 		return;
 2155 
 2156 	/*
 2157 	 * NOTICE: We are violating what the Databook says about the
 2158 	 * EndTransfer command. Ideally we would _always_ wait for the
 2159 	 * EndTransfer Command Completion IRQ, but that's causing too
 2160 	 * much trouble synchronizing between us and gadget driver.
 2161 	 *
 2162 	 * We have discussed this with the IP Provider and it was
 2163 	 * suggested to giveback all requests here, but give HW some
 2164 	 * extra time to synchronize with the interconnect. We're using
 2165 	 * an arbitraty 100us delay for that.
 2166 	 *
 2167 	 * Note also that a similar handling was tested by Synopsys
 2168 	 * (thanks a lot Paul) and nothing bad has come out of it.
 2169 	 * In short, what we're doing is:
 2170 	 *
 2171 	 * - Issue EndTransfer WITH CMDIOC bit set
 2172 	 * - Wait 100us
 2173 	 */
 2174 
 2175 	cmd = DWC3_DEPCMD_ENDTRANSFER;
 2176 	cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
 2177 	cmd |= DWC3_DEPCMD_CMDIOC;
 2178 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
 2179 	memset(¶ms, 0, sizeof(params));
 2180 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
 2181 	WARN_ON_ONCE(ret);
 2182 	dep->resource_index = 0;
 2183 	dep->flags &= ~DWC3_EP_BUSY;
 2184 	udelay(100);
 2185 }
 2186 
 2187 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
 2188 {
 2189 	u32 epnum;
 2190 
 2191 	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
 2192 		struct dwc3_ep *dep;
 2193 
 2194 		dep = dwc->eps[epnum];
 2195 		if (!dep)
 2196 			continue;
 2197 
 2198 		if (!(dep->flags & DWC3_EP_ENABLED))
 2199 			continue;
 2200 
 2201 		dwc3_remove_requests(dwc, dep);
 2202 	}
 2203 }
 2204 
 2205 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 2206 {
 2207 	u32 epnum;
 2208 
 2209 	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
 2210 		struct dwc3_ep *dep;
 2211 		struct dwc3_gadget_ep_cmd_params params;
 2212 		int ret;
 2213 
 2214 		dep = dwc->eps[epnum];
 2215 		if (!dep)
 2216 			continue;
 2217 
 2218 		if (!(dep->flags & DWC3_EP_STALL))
 2219 			continue;
 2220 
 2221 		dep->flags &= ~DWC3_EP_STALL;
 2222 
 2223 		memset(¶ms, 0, sizeof(params));
 2224 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
 2225 				DWC3_DEPCMD_CLEARSTALL, ¶ms);
 2226 		WARN_ON_ONCE(ret);
 2227 	}
 2228 }
 2229 
 2230 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
 2231 {
 2232 	int			reg;
 2233 
 2234 	dev_vdbg(dwc->dev, "%s\n", __func__);
 2235 
 2236 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 2237 	reg &= ~DWC3_DCTL_INITU1ENA;
 2238 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2239 
 2240 	reg &= ~DWC3_DCTL_INITU2ENA;
 2241 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2242 
 2243 	dwc3_disconnect_gadget(dwc);
 2244 	dwc->start_config_issued = false;
 2245 
 2246 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
 2247 	dwc->setup_packet_pending = false;
 2248 }
 2249 
 2250 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 2251 {
 2252 	u32			reg;
 2253 
 2254 	dev_vdbg(dwc->dev, "%s\n", __func__);
 2255 
 2256 	/*
 2257 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
 2258 	 * would cause a missing Disconnect Event if there's a
 2259 	 * pending Setup Packet in the FIFO.
 2260 	 *
 2261 	 * There's no suggested workaround on the official Bug
 2262 	 * report, which states that "unless the driver/application
 2263 	 * is doing any special handling of a disconnect event,
 2264 	 * there is no functional issue".
 2265 	 *
 2266 	 * Unfortunately, it turns out that we _do_ some special
 2267 	 * handling of a disconnect event, namely complete all
 2268 	 * pending transfers, notify gadget driver of the
 2269 	 * disconnection, and so on.
 2270 	 *
 2271 	 * Our suggested workaround is to follow the Disconnect
 2272 	 * Event steps here, instead, based on a setup_packet_pending
 2273 	 * flag. Such flag gets set whenever we have a XferNotReady
 2274 	 * event on EP0 and gets cleared on XferComplete for the
 2275 	 * same endpoint.
 2276 	 *
 2277 	 * Refers to:
 2278 	 *
 2279 	 * STAR#9000466709: RTL: Device : Disconnect event not
 2280 	 * generated if setup packet pending in FIFO
 2281 	 */
 2282 	if (dwc->revision < DWC3_REVISION_188A) {
 2283 		if (dwc->setup_packet_pending)
 2284 			dwc3_gadget_disconnect_interrupt(dwc);
 2285 	}
 2286 
 2287 	/* after reset -> Default State */
 2288 	usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
 2289 
 2290 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
 2291 		dwc3_disconnect_gadget(dwc);
 2292 
 2293 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 2294 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
 2295 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2296 	dwc->test_mode = false;
 2297 
 2298 	dwc3_stop_active_transfers(dwc);
 2299 	dwc3_clear_stall_all_ep(dwc);
 2300 	dwc->start_config_issued = false;
 2301 
 2302 	/* Reset device address to zero */
 2303 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 2304 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
 2305 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 2306 }
 2307 
 2308 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
 2309 {
 2310 	u32 reg;
 2311 	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
 2312 
 2313 	/*
 2314 	 * We change the clock only at SS but I dunno why I would want to do
 2315 	 * this. Maybe it becomes part of the power saving plan.
 2316 	 */
 2317 
 2318 	if (speed != DWC3_DSTS_SUPERSPEED)
 2319 		return;
 2320 
 2321 	/*
 2322 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
 2323 	 * each time on Connect Done.
 2324 	 */
 2325 	if (!usb30_clock)
 2326 		return;
 2327 
 2328 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 2329 	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
 2330 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 2331 }
 2332 
 2333 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 2334 {
 2335 	struct dwc3_ep		*dep;
 2336 	int			ret;
 2337 	u32			reg;
 2338 	u8			speed;
 2339 
 2340 	dev_vdbg(dwc->dev, "%s\n", __func__);
 2341 
 2342 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 2343 	speed = reg & DWC3_DSTS_CONNECTSPD;
 2344 	dwc->speed = speed;
 2345 
 2346 	dwc3_update_ram_clk_sel(dwc, speed);
 2347 
 2348 	switch (speed) {
 2349 	case DWC3_DCFG_SUPERSPEED:
 2350 		/*
 2351 		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
 2352 		 * would cause a missing USB3 Reset event.
 2353 		 *
 2354 		 * In such situations, we should force a USB3 Reset
 2355 		 * event by calling our dwc3_gadget_reset_interrupt()
 2356 		 * routine.
 2357 		 *
 2358 		 * Refers to:
 2359 		 *
 2360 		 * STAR#9000483510: RTL: SS : USB3 reset event may
 2361 		 * not be generated always when the link enters poll
 2362 		 */
 2363 		if (dwc->revision < DWC3_REVISION_190A)
 2364 			dwc3_gadget_reset_interrupt(dwc);
 2365 
 2366 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 2367 		dwc->gadget.ep0->maxpacket = 512;
 2368 		dwc->gadget.speed = USB_SPEED_SUPER;
 2369 		break;
 2370 	case DWC3_DCFG_HIGHSPEED:
 2371 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
 2372 		dwc->gadget.ep0->maxpacket = 64;
 2373 		dwc->gadget.speed = USB_SPEED_HIGH;
 2374 		break;
 2375 	case DWC3_DCFG_FULLSPEED2:
 2376 	case DWC3_DCFG_FULLSPEED1:
 2377 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
 2378 		dwc->gadget.ep0->maxpacket = 64;
 2379 		dwc->gadget.speed = USB_SPEED_FULL;
 2380 		break;
 2381 	case DWC3_DCFG_LOWSPEED:
 2382 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
 2383 		dwc->gadget.ep0->maxpacket = 8;
 2384 		dwc->gadget.speed = USB_SPEED_LOW;
 2385 		break;
 2386 	}
 2387 
 2388 	/* Enable USB2 LPM Capability */
 2389 
 2390 	if ((dwc->revision > DWC3_REVISION_194A)
 2391 			&& (speed != DWC3_DCFG_SUPERSPEED)) {
 2392 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 2393 		reg |= DWC3_DCFG_LPM_CAP;
 2394 		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 2395 
 2396 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 2397 		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
 2398 
 2399 		/*
 2400 		 * TODO: This should be configurable. For now using
 2401 		 * maximum allowed HIRD threshold value of 0b1100
 2402 		 */
 2403 		reg |= DWC3_DCTL_HIRD_THRES(12);
 2404 
 2405 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2406 	} else {
 2407 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 2408 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
 2409 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2410 	}
 2411 
 2412 	dep = dwc->eps[0];
 2413 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
 2414 			false);
 2415 	if (ret) {
 2416 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 2417 		return;
 2418 	}
 2419 
 2420 	dep = dwc->eps[1];
 2421 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
 2422 			false);
 2423 	if (ret) {
 2424 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 2425 		return;
 2426 	}
 2427 
 2428 	/*
 2429 	 * Configure PHY via GUSB3PIPECTLn if required.
 2430 	 *
 2431 	 * Update GTXFIFOSIZn
 2432 	 *
 2433 	 * In both cases reset values should be sufficient.
 2434 	 */
 2435 }
 2436 
 2437 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
 2438 {
 2439 	dev_vdbg(dwc->dev, "%s\n", __func__);
 2440 
 2441 	/*
 2442 	 * TODO take core out of low power mode when that's
 2443 	 * implemented.
 2444 	 */
 2445 
 2446 	dwc->gadget_driver->resume(&dwc->gadget);
 2447 }
 2448 
 2449 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
 2450 		unsigned int evtinfo)
 2451 {
 2452 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
 2453 	unsigned int		pwropt;
 2454 
 2455 	/*
 2456 	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
 2457 	 * Hibernation mode enabled which would show up when device detects
 2458 	 * host-initiated U3 exit.
 2459 	 *
 2460 	 * In that case, device will generate a Link State Change Interrupt
 2461 	 * from U3 to RESUME which is only necessary if Hibernation is
 2462 	 * configured in.
 2463 	 *
 2464 	 * There are no functional changes due to such spurious event and we
 2465 	 * just need to ignore it.
 2466 	 *
 2467 	 * Refers to:
 2468 	 *
 2469 	 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
 2470 	 * operational mode
 2471 	 */
 2472 	pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
 2473 	if ((dwc->revision < DWC3_REVISION_250A) &&
 2474 			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
 2475 		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
 2476 				(next == DWC3_LINK_STATE_RESUME)) {
 2477 			dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
 2478 			return;
 2479 		}
 2480 	}
 2481 
 2482 	/*
 2483 	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
 2484 	 * on the link partner, the USB session might do multiple entry/exit
 2485 	 * of low power states before a transfer takes place.
 2486 	 *
 2487 	 * Due to this problem, we might experience lower throughput. The
 2488 	 * suggested workaround is to disable DCTL[12:9] bits if we're
 2489 	 * transitioning from U1/U2 to U0 and enable those bits again
 2490 	 * after a transfer completes and there are no pending transfers
 2491 	 * on any of the enabled endpoints.
 2492 	 *
 2493 	 * This is the first half of that workaround.
 2494 	 *
 2495 	 * Refers to:
 2496 	 *
 2497 	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
 2498 	 * core send LGO_Ux entering U0
 2499 	 */
 2500 	if (dwc->revision < DWC3_REVISION_183A) {
 2501 		if (next == DWC3_LINK_STATE_U0) {
 2502 			u32	u1u2;
 2503 			u32	reg;
 2504 
 2505 			switch (dwc->link_state) {
 2506 			case DWC3_LINK_STATE_U1:
 2507 			case DWC3_LINK_STATE_U2:
 2508 				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 2509 				u1u2 = reg & (DWC3_DCTL_INITU2ENA
 2510 						| DWC3_DCTL_ACCEPTU2ENA
 2511 						| DWC3_DCTL_INITU1ENA
 2512 						| DWC3_DCTL_ACCEPTU1ENA);
 2513 
 2514 				if (!dwc->u1u2)
 2515 					dwc->u1u2 = reg & u1u2;
 2516 
 2517 				reg &= ~u1u2;
 2518 
 2519 				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 2520 				break;
 2521 			default:
 2522 				/* do nothing */
 2523 				break;
 2524 			}
 2525 		}
 2526 	}
 2527 
 2528 	switch (next) {
 2529 	case DWC3_LINK_STATE_U1:
 2530 		if (dwc->speed == USB_SPEED_SUPER)
 2531 			dwc3_suspend_gadget(dwc);
 2532 		break;
 2533 	case DWC3_LINK_STATE_U2:
 2534 	case DWC3_LINK_STATE_U3:
 2535 		dwc3_suspend_gadget(dwc);
 2536 		break;
 2537 	case DWC3_LINK_STATE_RESUME:
 2538 		dwc3_resume_gadget(dwc);
 2539 		break;
 2540 	default:
 2541 		/* do nothing */
 2542 		break;
 2543 	}
 2544 
 2545 	dev_vdbg(dwc->dev, "link change: %s [%d] -> %s [%d]\n",
 2546 			dwc3_gadget_link_string(dwc->link_state),
 2547 			dwc->link_state, dwc3_gadget_link_string(next), next);
 2548 
 2549 	dwc->link_state = next;
 2550 }
 2551 
 2552 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
 2553 		unsigned int evtinfo)
 2554 {
 2555 	unsigned int is_ss = evtinfo & BIT(4);
 2556 
 2557 	/**
 2558 	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
 2559 	 * have a known issue which can cause USB CV TD.9.23 to fail
 2560 	 * randomly.
 2561 	 *
 2562 	 * Because of this issue, core could generate bogus hibernation
 2563 	 * events which SW needs to ignore.
 2564 	 *
 2565 	 * Refers to:
 2566 	 *
 2567 	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
 2568 	 * Device Fallback from SuperSpeed
 2569 	 */
 2570 	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
 2571 		return;
 2572 
 2573 	/* enter hibernation here */
 2574 }
 2575 
 2576 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 2577 		const struct dwc3_event_devt *event)
 2578 {
 2579 	switch (event->type) {
 2580 	case DWC3_DEVICE_EVENT_DISCONNECT:
 2581 		dwc3_gadget_disconnect_interrupt(dwc);
 2582 		break;
 2583 	case DWC3_DEVICE_EVENT_RESET:
 2584 		dwc3_gadget_reset_interrupt(dwc);
 2585 		break;
 2586 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
 2587 		dwc3_gadget_conndone_interrupt(dwc);
 2588 		break;
 2589 	case DWC3_DEVICE_EVENT_WAKEUP:
 2590 		dwc3_gadget_wakeup_interrupt(dwc);
 2591 		break;
 2592 	case DWC3_DEVICE_EVENT_HIBER_REQ:
 2593 		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
 2594 					"unexpected hibernation event\n"))
 2595 			break;
 2596 
 2597 		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
 2598 		break;
 2599 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
 2600 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
 2601 		break;
 2602 	case DWC3_DEVICE_EVENT_EOPF:
 2603 		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
 2604 		break;
 2605 	case DWC3_DEVICE_EVENT_SOF:
 2606 		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
 2607 		break;
 2608 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
 2609 		dev_vdbg(dwc->dev, "Erratic Error\n");
 2610 		break;
 2611 	case DWC3_DEVICE_EVENT_CMD_CMPL:
 2612 		dev_vdbg(dwc->dev, "Command Complete\n");
 2613 		break;
 2614 	case DWC3_DEVICE_EVENT_OVERFLOW:
 2615 		dev_vdbg(dwc->dev, "Overflow\n");
 2616 		break;
 2617 	default:
 2618 		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
 2619 	}
 2620 }
 2621 
 2622 static void dwc3_process_event_entry(struct dwc3 *dwc,
 2623 		const union dwc3_event *event)
 2624 {
 2625 	/* Endpoint IRQ, handle it and return early */
 2626 	if (event->type.is_devspec == 0) {
 2627 		/* depevt */
 2628 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
 2629 	}
 2630 
 2631 	switch (event->type.type) {
 2632 	case DWC3_EVENT_TYPE_DEV:
 2633 		dwc3_gadget_interrupt(dwc, &event->devt);
 2634 		break;
 2635 	/* REVISIT what to do with Carkit and I2C events ? */
 2636 	default:
 2637 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
 2638 	}
 2639 }
 2640 
 2641 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
 2642 {
 2643 	struct dwc3_event_buffer *evt;
 2644 	irqreturn_t ret = IRQ_NONE;
 2645 	int left;
 2646 	u32 reg;
 2647 
 2648 	evt = dwc->ev_buffs[buf];
 2649 	left = evt->count;
 2650 
 2651 	if (!(evt->flags & DWC3_EVENT_PENDING))
 2652 		return IRQ_NONE;
 2653 
 2654 	while (left > 0) {
 2655 		union dwc3_event event;
 2656 
 2657 		event.raw = *(u32 *) (evt->buf + evt->lpos);
 2658 
 2659 		dwc3_process_event_entry(dwc, &event);
 2660 
 2661 		/*
 2662 		 * FIXME we wrap around correctly to the next entry as
 2663 		 * almost all entries are 4 bytes in size. There is one
 2664 		 * entry which has 12 bytes which is a regular entry
 2665 		 * followed by 8 bytes data. ATM I don't know how
 2666 		 * things are organized if we get next to the a
 2667 		 * boundary so I worry about that once we try to handle
 2668 		 * that.
 2669 		 */
 2670 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
 2671 		left -= 4;
 2672 
 2673 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
 2674 	}
 2675 
 2676 	evt->count = 0;
 2677 	evt->flags &= ~DWC3_EVENT_PENDING;
 2678 	ret = IRQ_HANDLED;
 2679 
 2680 	/* Unmask interrupt */
 2681 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
 2682 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
 2683 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
 2684 
 2685 	return ret;
 2686 }
 2687 
 2688 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
 2689 {
 2690 	struct dwc3 *dwc = _dwc;
 2691 	unsigned long flags;
 2692 	irqreturn_t ret = IRQ_NONE;
 2693 	int i;
 2694 
 2695 	spin_lock_irqsave(&dwc->lock, flags);
 2696 
 2697 	for (i = 0; i < dwc->num_event_buffers; i++)
 2698 		ret |= dwc3_process_event_buf(dwc, i);
 2699 
 2700 	spin_unlock_irqrestore(&dwc->lock, flags);
 2701 
 2702 	return ret;
 2703 }
 2704 
 2705 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
 2706 {
 2707 	struct dwc3_event_buffer *evt;
 2708 	u32 count;
 2709 	u32 reg;
 2710 
 2711 	evt = dwc->ev_buffs[buf];
 2712 
 2713 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
 2714 	count &= DWC3_GEVNTCOUNT_MASK;
 2715 	if (!count)
 2716 		return IRQ_NONE;
 2717 
 2718 	evt->count = count;
 2719 	evt->flags |= DWC3_EVENT_PENDING;
 2720 
 2721 	/* Mask interrupt */
 2722 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
 2723 	reg |= DWC3_GEVNTSIZ_INTMASK;
 2724 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
 2725 
 2726 	return IRQ_WAKE_THREAD;
 2727 }
 2728 
 2729 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
 2730 {
 2731 	struct dwc3			*dwc = _dwc;
 2732 	int				i;
 2733 	irqreturn_t			ret = IRQ_NONE;
 2734 
 2735 	spin_lock(&dwc->lock);
 2736 
 2737 	for (i = 0; i < dwc->num_event_buffers; i++) {
 2738 		irqreturn_t status;
 2739 
 2740 		status = dwc3_check_event_buf(dwc, i);
 2741 		if (status == IRQ_WAKE_THREAD)
 2742 			ret = status;
 2743 	}
 2744 
 2745 	spin_unlock(&dwc->lock);
 2746 
 2747 	return ret;
 2748 }
 2749 
 2750 /**
 2751  * dwc3_gadget_init - Initializes gadget related registers
 2752  * @dwc: pointer to our controller context structure
 2753  *
 2754  * Returns 0 on success otherwise negative errno.
 2755  */
 2756 int dwc3_gadget_init(struct dwc3 *dwc)
 2757 {
 2758 	int					ret;
 2759 
 2760 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 2761 			&dwc->ctrl_req_addr, GFP_KERNEL);
 2762 	if (!dwc->ctrl_req) {
 2763 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
 2764 		ret = -ENOMEM;
 2765 		goto err0;
 2766 	}
 2767 
 2768 	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
 2769 			&dwc->ep0_trb_addr, GFP_KERNEL);
 2770 	if (!dwc->ep0_trb) {
 2771 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
 2772 		ret = -ENOMEM;
 2773 		goto err1;
 2774 	}
 2775 
 2776 	dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
 2777 	if (!dwc->setup_buf) {
 2778 		dev_err(dwc->dev, "failed to allocate setup buffer\n");
 2779 		ret = -ENOMEM;
 2780 		goto err2;
 2781 	}
 2782 
 2783 	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
 2784 			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
 2785 			GFP_KERNEL);
 2786 	if (!dwc->ep0_bounce) {
 2787 		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
 2788 		ret = -ENOMEM;
 2789 		goto err3;
 2790 	}
 2791 
 2792 	dwc->gadget.ops			= &dwc3_gadget_ops;
 2793 	dwc->gadget.max_speed		= USB_SPEED_SUPER;
 2794 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
 2795 	dwc->gadget.sg_supported	= true;
 2796 	dwc->gadget.name		= "dwc3-gadget";
 2797 
 2798 	/*
 2799 	 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
 2800 	 * on ep out.
 2801 	 */
 2802 	dwc->gadget.quirk_ep_out_aligned_size = true;
 2803 
 2804 	/*
 2805 	 * REVISIT: Here we should clear all pending IRQs to be
 2806 	 * sure we're starting from a well known location.
 2807 	 */
 2808 
 2809 	ret = dwc3_gadget_init_endpoints(dwc);
 2810 	if (ret)
 2811 		goto err4;
 2812 
 2813 	ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
 2814 	if (ret) {
 2815 		dev_err(dwc->dev, "failed to register udc\n");
 2816 		goto err4;
 2817 	}
 2818 
 2819 	return 0;
 2820 
 2821 err4:
 2822 	dwc3_gadget_free_endpoints(dwc);
 2823 	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
 2824 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 2825 
 2826 err3:
 2827 	kfree(dwc->setup_buf);
 2828 
 2829 err2:
 2830 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
 2831 			dwc->ep0_trb, dwc->ep0_trb_addr);
 2832 
 2833 err1:
 2834 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 2835 			dwc->ctrl_req, dwc->ctrl_req_addr);
 2836 
 2837 err0:
 2838 	return ret;
 2839 }
 2840 
 2841 /* -------------------------------------------------------------------------- */
 2842 
 2843 void dwc3_gadget_exit(struct dwc3 *dwc)
 2844 {
 2845 	usb_del_gadget_udc(&dwc->gadget);
 2846 
 2847 	dwc3_gadget_free_endpoints(dwc);
 2848 
 2849 	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
 2850 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 2851 
 2852 	kfree(dwc->setup_buf);
 2853 
 2854 	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
 2855 			dwc->ep0_trb, dwc->ep0_trb_addr);
 2856 
 2857 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 2858 			dwc->ctrl_req, dwc->ctrl_req_addr);
 2859 }
 2860 
 2861 int dwc3_gadget_prepare(struct dwc3 *dwc)
 2862 {
 2863 	if (dwc->pullups_connected) {
 2864 		dwc3_gadget_disable_irq(dwc);
 2865 		dwc3_gadget_run_stop(dwc, true, true);
 2866 	}
 2867 
 2868 	return 0;
 2869 }
 2870 
 2871 void dwc3_gadget_complete(struct dwc3 *dwc)
 2872 {
 2873 	if (dwc->pullups_connected) {
 2874 		dwc3_gadget_enable_irq(dwc);
 2875 		dwc3_gadget_run_stop(dwc, true, false);
 2876 	}
 2877 }
 2878 
 2879 int dwc3_gadget_suspend(struct dwc3 *dwc)
 2880 {
 2881 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 2882 	__dwc3_gadget_ep_disable(dwc->eps[1]);
 2883 
 2884 	dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
 2885 
 2886 	return 0;
 2887 }
 2888 
 2889 int dwc3_gadget_resume(struct dwc3 *dwc)
 2890 {
 2891 	struct dwc3_ep		*dep;
 2892 	int			ret;
 2893 
 2894 	/* Start with SuperSpeed Default */
 2895 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 2896 
 2897 	dep = dwc->eps[0];
 2898 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
 2899 			false);
 2900 	if (ret)
 2901 		goto err0;
 2902 
 2903 	dep = dwc->eps[1];
 2904 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
 2905 			false);
 2906 	if (ret)
 2907 		goto err1;
 2908 
 2909 	/* begin to receive SETUP packets */
 2910 	dwc->ep0state = EP0_SETUP_PHASE;
 2911 	dwc3_ep0_out_start(dwc);
 2912 
 2913 	dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
 2914 
 2915 	return 0;
 2916 
 2917 err1:
 2918 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 2919 
 2920 err0:
 2921 	return ret;
 2922 }
 2923 
 2924 
 2925 
 2926 
 2927 
 2928 /* LDV_COMMENT_BEGIN_MAIN */
 2929 #ifdef LDV_MAIN2_sequence_infinite_withcheck_stateful
 2930 
 2931 /*###########################################################################*/
 2932 
 2933 /*############## Driver Environment Generator 0.2 output ####################*/
 2934 
 2935 /*###########################################################################*/
 2936 
 2937 
 2938 
 2939 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 2940 void ldv_check_final_state(void);
 2941 
 2942 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 2943 void ldv_check_return_value(int res);
 2944 
 2945 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 2946 void ldv_check_return_value_probe(int res);
 2947 
 2948 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 2949 void ldv_initialize(void);
 2950 
 2951 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 2952 void ldv_handler_precall(void);
 2953 
 2954 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 2955 int nondet_int(void);
 2956 
 2957 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 2958 int LDV_IN_INTERRUPT;
 2959 
 2960 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 2961 void ldv_main2_sequence_infinite_withcheck_stateful(void) {
 2962 
 2963 
 2964 
 2965 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 2966 	/*============================= VARIABLE DECLARATION PART   =============================*/
 2967 	/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 2968 	/* content: static int dwc3_gadget_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)*/
 2969 	/* LDV_COMMENT_END_PREP */
 2970 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep0_enable" */
 2971 	struct usb_ep * var_group1;
 2972 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep0_enable" */
 2973 	const struct usb_endpoint_descriptor * var_dwc3_gadget_ep0_enable_19_p1;
 2974 	/* content: static int dwc3_gadget_ep0_disable(struct usb_ep *ep)*/
 2975 	/* LDV_COMMENT_END_PREP */
 2976 	/* content: static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)*/
 2977 	/* LDV_COMMENT_END_PREP */
 2978 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep_alloc_request" */
 2979 	gfp_t  var_dwc3_gadget_ep_alloc_request_23_p1;
 2980 	/* content: static void dwc3_gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request)*/
 2981 	/* LDV_COMMENT_END_PREP */
 2982 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep_free_request" */
 2983 	struct usb_request * var_group2;
 2984 	/* content: static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request)*/
 2985 	/* LDV_COMMENT_END_PREP */
 2986 	/* content: static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)*/
 2987 	/* LDV_COMMENT_END_PREP */
 2988 
 2989 	/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 2990 	/* content: static int dwc3_gadget_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)*/
 2991 	/* LDV_COMMENT_END_PREP */
 2992 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep_enable" */
 2993 	const struct usb_endpoint_descriptor * var_dwc3_gadget_ep_enable_21_p1;
 2994 	/* content: static int dwc3_gadget_ep_disable(struct usb_ep *ep)*/
 2995 	/* LDV_COMMENT_END_PREP */
 2996 	/* content: static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)*/
 2997 	/* LDV_COMMENT_END_PREP */
 2998 	/* content: static void dwc3_gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request)*/
 2999 	/* LDV_COMMENT_END_PREP */
 3000 	/* content: static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags)*/
 3001 	/* LDV_COMMENT_END_PREP */
 3002 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep_queue" */
 3003 	gfp_t  var_dwc3_gadget_ep_queue_31_p2;
 3004 	/* content: static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request)*/
 3005 	/* LDV_COMMENT_END_PREP */
 3006 	/* content: static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)*/
 3007 	/* LDV_COMMENT_END_PREP */
 3008 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_ep_set_halt" */
 3009 	int  var_dwc3_gadget_ep_set_halt_34_p1;
 3010 	/* content: static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)*/
 3011 	/* LDV_COMMENT_END_PREP */
 3012 
 3013 	/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3014 	/* content: static int dwc3_gadget_get_frame(struct usb_gadget *g)*/
 3015 	/* LDV_COMMENT_END_PREP */
 3016 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_get_frame" */
 3017 	struct usb_gadget * var_group3;
 3018 	/* content: static int dwc3_gadget_wakeup(struct usb_gadget *g)*/
 3019 	/* LDV_COMMENT_END_PREP */
 3020 	/* content: static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, int is_selfpowered)*/
 3021 	/* LDV_COMMENT_END_PREP */
 3022 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_set_selfpowered" */
 3023 	int  var_dwc3_gadget_set_selfpowered_38_p1;
 3024 	/* content: static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)*/
 3025 	/* LDV_COMMENT_END_PREP */
 3026 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_pullup" */
 3027 	int  var_dwc3_gadget_pullup_40_p1;
 3028 	/* content: static int dwc3_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3029 	/* LDV_COMMENT_END_PREP */
 3030 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_gadget_start" */
 3031 	struct usb_gadget_driver * var_group4;
 3032 	/* content: static int dwc3_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3033 	/* LDV_COMMENT_END_PREP */
 3034 
 3035 	/** CALLBACK SECTION request_irq **/
 3036 	/* content: static irqreturn_t dwc3_interrupt(int irq, void *_dwc)*/
 3037 	/* LDV_COMMENT_END_PREP */
 3038 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_interrupt" */
 3039 	int  var_dwc3_interrupt_70_p0;
 3040 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_interrupt" */
 3041 	void * var_dwc3_interrupt_70_p1;
 3042 	/* content: static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)*/
 3043 	/* LDV_COMMENT_END_PREP */
 3044 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_thread_interrupt" */
 3045 	int  var_dwc3_thread_interrupt_68_p0;
 3046 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dwc3_thread_interrupt" */
 3047 	void * var_dwc3_thread_interrupt_68_p1;
 3048 
 3049 
 3050 
 3051 
 3052 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 3053 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 3054 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 3055 	LDV_IN_INTERRUPT=1;
 3056 
 3057 
 3058 
 3059 
 3060 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 3061 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 3062 	/*============================= FUNCTION CALL SECTION       =============================*/
 3063 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 3064 	ldv_initialize();
 3065 	
 3066 
 3067 	
 3068 
 3069 	
 3070 
 3071 	
 3072 
 3073 
 3074 	while(  nondet_int()
 3075 	) {
 3076 
 3077 		switch(nondet_int()) {
 3078 
 3079 			case 0: {
 3080 
 3081 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 3082 				
 3083 
 3084 				/* content: static int dwc3_gadget_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)*/
 3085 				/* LDV_COMMENT_END_PREP */
 3086 				/* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "dwc3_gadget_ep0_ops" */
 3087 				ldv_handler_precall();
 3088 				dwc3_gadget_ep0_enable( var_group1, var_dwc3_gadget_ep0_enable_19_p1);
 3089 				
 3090 
 3091 				
 3092 
 3093 			}
 3094 
 3095 			break;
 3096 			case 1: {
 3097 
 3098 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 3099 				
 3100 
 3101 				/* content: static int dwc3_gadget_ep0_disable(struct usb_ep *ep)*/
 3102 				/* LDV_COMMENT_END_PREP */
 3103 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "dwc3_gadget_ep0_ops" */
 3104 				ldv_handler_precall();
 3105 				dwc3_gadget_ep0_disable( var_group1);
 3106 				
 3107 
 3108 				
 3109 
 3110 			}
 3111 
 3112 			break;
 3113 			case 2: {
 3114 
 3115 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 3116 				
 3117 
 3118 				/* content: static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)*/
 3119 				/* LDV_COMMENT_END_PREP */
 3120 				/* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "dwc3_gadget_ep0_ops" */
 3121 				ldv_handler_precall();
 3122 				dwc3_gadget_ep_alloc_request( var_group1, var_dwc3_gadget_ep_alloc_request_23_p1);
 3123 				
 3124 
 3125 				
 3126 
 3127 			}
 3128 
 3129 			break;
 3130 			case 3: {
 3131 
 3132 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 3133 				
 3134 
 3135 				/* content: static void dwc3_gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request)*/
 3136 				/* LDV_COMMENT_END_PREP */
 3137 				/* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "dwc3_gadget_ep0_ops" */
 3138 				ldv_handler_precall();
 3139 				dwc3_gadget_ep_free_request( var_group1, var_group2);
 3140 				
 3141 
 3142 				
 3143 
 3144 			}
 3145 
 3146 			break;
 3147 			case 4: {
 3148 
 3149 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 3150 				
 3151 
 3152 				/* content: static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request)*/
 3153 				/* LDV_COMMENT_END_PREP */
 3154 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "dwc3_gadget_ep0_ops" */
 3155 				ldv_handler_precall();
 3156 				dwc3_gadget_ep_dequeue( var_group1, var_group2);
 3157 				
 3158 
 3159 				
 3160 
 3161 			}
 3162 
 3163 			break;
 3164 			case 5: {
 3165 
 3166 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep0_ops **/
 3167 				
 3168 
 3169 				/* content: static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)*/
 3170 				/* LDV_COMMENT_END_PREP */
 3171 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "dwc3_gadget_ep0_ops" */
 3172 				ldv_handler_precall();
 3173 				dwc3_gadget_ep_set_wedge( var_group1);
 3174 				
 3175 
 3176 				
 3177 
 3178 			}
 3179 
 3180 			break;
 3181 			case 6: {
 3182 
 3183 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3184 				
 3185 
 3186 				/* content: static int dwc3_gadget_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)*/
 3187 				/* LDV_COMMENT_END_PREP */
 3188 				/* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3189 				ldv_handler_precall();
 3190 				dwc3_gadget_ep_enable( var_group1, var_dwc3_gadget_ep_enable_21_p1);
 3191 				
 3192 
 3193 				
 3194 
 3195 			}
 3196 
 3197 			break;
 3198 			case 7: {
 3199 
 3200 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3201 				
 3202 
 3203 				/* content: static int dwc3_gadget_ep_disable(struct usb_ep *ep)*/
 3204 				/* LDV_COMMENT_END_PREP */
 3205 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3206 				ldv_handler_precall();
 3207 				dwc3_gadget_ep_disable( var_group1);
 3208 				
 3209 
 3210 				
 3211 
 3212 			}
 3213 
 3214 			break;
 3215 			case 8: {
 3216 
 3217 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3218 				
 3219 
 3220 				/* content: static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)*/
 3221 				/* LDV_COMMENT_END_PREP */
 3222 				/* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3223 				ldv_handler_precall();
 3224 				dwc3_gadget_ep_alloc_request( var_group1, var_dwc3_gadget_ep_alloc_request_23_p1);
 3225 				
 3226 
 3227 				
 3228 
 3229 			}
 3230 
 3231 			break;
 3232 			case 9: {
 3233 
 3234 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3235 				
 3236 
 3237 				/* content: static void dwc3_gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request)*/
 3238 				/* LDV_COMMENT_END_PREP */
 3239 				/* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3240 				ldv_handler_precall();
 3241 				dwc3_gadget_ep_free_request( var_group1, var_group2);
 3242 				
 3243 
 3244 				
 3245 
 3246 			}
 3247 
 3248 			break;
 3249 			case 10: {
 3250 
 3251 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3252 				
 3253 
 3254 				/* content: static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags)*/
 3255 				/* LDV_COMMENT_END_PREP */
 3256 				/* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3257 				ldv_handler_precall();
 3258 				dwc3_gadget_ep_queue( var_group1, var_group2, var_dwc3_gadget_ep_queue_31_p2);
 3259 				
 3260 
 3261 				
 3262 
 3263 			}
 3264 
 3265 			break;
 3266 			case 11: {
 3267 
 3268 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3269 				
 3270 
 3271 				/* content: static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request)*/
 3272 				/* LDV_COMMENT_END_PREP */
 3273 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3274 				ldv_handler_precall();
 3275 				dwc3_gadget_ep_dequeue( var_group1, var_group2);
 3276 				
 3277 
 3278 				
 3279 
 3280 			}
 3281 
 3282 			break;
 3283 			case 12: {
 3284 
 3285 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3286 				
 3287 
 3288 				/* content: static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)*/
 3289 				/* LDV_COMMENT_END_PREP */
 3290 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3291 				ldv_handler_precall();
 3292 				dwc3_gadget_ep_set_halt( var_group1, var_dwc3_gadget_ep_set_halt_34_p1);
 3293 				
 3294 
 3295 				
 3296 
 3297 			}
 3298 
 3299 			break;
 3300 			case 13: {
 3301 
 3302 				/** STRUCT: struct type: usb_ep_ops, struct name: dwc3_gadget_ep_ops **/
 3303 				
 3304 
 3305 				/* content: static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)*/
 3306 				/* LDV_COMMENT_END_PREP */
 3307 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "dwc3_gadget_ep_ops" */
 3308 				ldv_handler_precall();
 3309 				dwc3_gadget_ep_set_wedge( var_group1);
 3310 				
 3311 
 3312 				
 3313 
 3314 			}
 3315 
 3316 			break;
 3317 			case 14: {
 3318 
 3319 				/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3320 				
 3321 
 3322 				/* content: static int dwc3_gadget_get_frame(struct usb_gadget *g)*/
 3323 				/* LDV_COMMENT_END_PREP */
 3324 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame" from driver structure with callbacks "dwc3_gadget_ops" */
 3325 				ldv_handler_precall();
 3326 				dwc3_gadget_get_frame( var_group3);
 3327 				
 3328 
 3329 				
 3330 
 3331 			}
 3332 
 3333 			break;
 3334 			case 15: {
 3335 
 3336 				/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3337 				
 3338 
 3339 				/* content: static int dwc3_gadget_wakeup(struct usb_gadget *g)*/
 3340 				/* LDV_COMMENT_END_PREP */
 3341 				/* LDV_COMMENT_FUNCTION_CALL Function from field "wakeup" from driver structure with callbacks "dwc3_gadget_ops" */
 3342 				ldv_handler_precall();
 3343 				dwc3_gadget_wakeup( var_group3);
 3344 				
 3345 
 3346 				
 3347 
 3348 			}
 3349 
 3350 			break;
 3351 			case 16: {
 3352 
 3353 				/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3354 				
 3355 
 3356 				/* content: static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, int is_selfpowered)*/
 3357 				/* LDV_COMMENT_END_PREP */
 3358 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_selfpowered" from driver structure with callbacks "dwc3_gadget_ops" */
 3359 				ldv_handler_precall();
 3360 				dwc3_gadget_set_selfpowered( var_group3, var_dwc3_gadget_set_selfpowered_38_p1);
 3361 				
 3362 
 3363 				
 3364 
 3365 			}
 3366 
 3367 			break;
 3368 			case 17: {
 3369 
 3370 				/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3371 				
 3372 
 3373 				/* content: static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)*/
 3374 				/* LDV_COMMENT_END_PREP */
 3375 				/* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "dwc3_gadget_ops" */
 3376 				ldv_handler_precall();
 3377 				dwc3_gadget_pullup( var_group3, var_dwc3_gadget_pullup_40_p1);
 3378 				
 3379 
 3380 				
 3381 
 3382 			}
 3383 
 3384 			break;
 3385 			case 18: {
 3386 
 3387 				/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3388 				
 3389 
 3390 				/* content: static int dwc3_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3391 				/* LDV_COMMENT_END_PREP */
 3392 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "dwc3_gadget_ops" */
 3393 				ldv_handler_precall();
 3394 				dwc3_gadget_start( var_group3, var_group4);
 3395 				
 3396 
 3397 				
 3398 
 3399 			}
 3400 
 3401 			break;
 3402 			case 19: {
 3403 
 3404 				/** STRUCT: struct type: usb_gadget_ops, struct name: dwc3_gadget_ops **/
 3405 				
 3406 
 3407 				/* content: static int dwc3_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3408 				/* LDV_COMMENT_END_PREP */
 3409 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "dwc3_gadget_ops" */
 3410 				ldv_handler_precall();
 3411 				dwc3_gadget_stop( var_group3, var_group4);
 3412 				
 3413 
 3414 				
 3415 
 3416 			}
 3417 
 3418 			break;
 3419 			case 20: {
 3420 
 3421 				/** CALLBACK SECTION request_irq **/
 3422 				LDV_IN_INTERRUPT=2;
 3423 
 3424 				/* content: static irqreturn_t dwc3_interrupt(int irq, void *_dwc)*/
 3425 				/* LDV_COMMENT_END_PREP */
 3426 				/* LDV_COMMENT_FUNCTION_CALL */
 3427 				ldv_handler_precall();
 3428 				dwc3_interrupt( var_dwc3_interrupt_70_p0, var_dwc3_interrupt_70_p1);
 3429 				LDV_IN_INTERRUPT=1;
 3430 
 3431 				
 3432 
 3433 			}
 3434 
 3435 			break;
 3436 			case 21: {
 3437 
 3438 				/** CALLBACK SECTION request_irq **/
 3439 				LDV_IN_INTERRUPT=2;
 3440 
 3441 				/* content: static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)*/
 3442 				/* LDV_COMMENT_END_PREP */
 3443 				/* LDV_COMMENT_FUNCTION_CALL */
 3444 				ldv_handler_precall();
 3445 				dwc3_thread_interrupt( var_dwc3_thread_interrupt_68_p0, var_dwc3_thread_interrupt_68_p1);
 3446 				LDV_IN_INTERRUPT=1;
 3447 
 3448 				
 3449 
 3450 			}
 3451 
 3452 			break;
 3453 			default: break;
 3454 
 3455 		}
 3456 
 3457 	}
 3458 
 3459 	ldv_module_exit: 
 3460 
 3461 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 3462 	ldv_final: ldv_check_final_state();
 3463 
 3464 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 3465 	return;
 3466 
 3467 }
 3468 #endif
 3469 
 3470 /* LDV_COMMENT_END_MAIN */                 1 
    2 
    3 /* Here is the definition of CHECK_WAIT_FLAGS(flags) macro. */
    4 #include "include/gfp.h"
    5 
    6 #include <verifier/rcv.h>
    7 
    8 
    9 #define LDV_ZERO_STATE 0
   10 
   11 
   12 /* There are 2 possible states of spin lock. */
   13 enum {
   14   LDV_SPIN_UNLOCKED = LDV_ZERO_STATE, /* Spin isn't locked. */
   15   LDV_SPIN_LOCKED /* Spin is locked. */
   16 };
   17 
   18 
   19 /* Spin isn't locked at the beginning. */
   20 int ldv_spin = LDV_SPIN_UNLOCKED;
   21 
   22 
   23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags') Check that a memory allocating function was called with a correct value of flags in spin locking. */
   24 void ldv_check_alloc_flags(gfp_t flags)
   25 {
   26   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */
   27   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags));
   28 }
   29 
   30 extern struct page *ldv_some_page(void);
   31 
   32 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags_and_return_some_page') Check that a memory allocating function was called with a correct value of flags in spin locking. */
   33 struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags)
   34 {
   35   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */
   36   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags));
   37   /* LDV_COMMENT_RETURN Return a page pointer (maybe NULL). */
   38   return ldv_some_page();
   39 }
   40 
   41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_nonatomic') Check that a memory allocating function was not calledin spin locking. */
   42 void ldv_check_alloc_nonatomic(void)
   43 {
   44   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then the memory allocating function should be called, because it implicitly uses GFP_KERNEL flag. */
   45   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED);
   46 }
   47 
   48 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock') Lock spin. */
   49 void ldv_spin_lock(void)
   50 {
   51   /* LDV_COMMENT_CHANGE_STATE Lock spin. */
   52   ldv_spin = LDV_SPIN_LOCKED;
   53 }
   54 
   55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock') Unlock spin. */
   56 void ldv_spin_unlock(void)
   57 {
   58   /* LDV_COMMENT_CHANGE_STATE Unlock spin. */
   59   ldv_spin = LDV_SPIN_UNLOCKED;
   60 }
   61 
   62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock') Try to lock spin. It should return 0 if spin wasn't locked. */
   63 int ldv_spin_trylock(void)
   64 {
   65   int is_lock;
   66 
   67   /* LDV_COMMENT_OTHER Do this to make nondetermined choice. */
   68   is_lock = ldv_undef_int();
   69 
   70   if (is_lock)
   71   {
   72     /* LDV_COMMENT_RETURN Don't lock spin and return 0. */
   73     return 0;
   74   }
   75   else
   76   {
   77     /* LDV_COMMENT_CHANGE_STATE Lock spin. */
   78     ldv_spin = LDV_SPIN_LOCKED;
   79     /* LDV_COMMENT_RETURN Return 1 since spin was locked. */
   80     return 1;
   81   }
   82 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 /* Return nondeterministic negative integer number. */
   29 static inline int ldv_undef_int_negative(void)
   30 {
   31   int ret = ldv_undef_int();
   32 
   33   ldv_assume(ret < 0);
   34 
   35   return ret;
   36 }
   37 /* Return nondeterministic nonpositive integer number. */
   38 static inline int ldv_undef_int_nonpositive(void)
   39 {
   40   int ret = ldv_undef_int();
   41 
   42   ldv_assume(ret <= 0);
   43 
   44   return ret;
   45 }
   46 
   47 /* Add explicit model for __builin_expect GCC function. Without the model a
   48    return value will be treated as nondetermined by verifiers. */
   49 long __builtin_expect(long exp, long c)
   50 {
   51   return exp;
   52 }
   53 
   54 /* This function causes the program to exit abnormally. GCC implements this
   55 function by using a target-dependent mechanism (such as intentionally executing
   56 an illegal instruction) or by calling abort. The mechanism used may vary from
   57 release to release so you should not rely on any particular implementation.
   58 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   59 void __builtin_trap(void)
   60 {
   61   ldv_assert(0);
   62 }
   63 
   64 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   65 #define LDV_PTR_MAX 2012
   66 
   67 #endif /* _LDV_RCV_H_ */                 1 #ifndef _LINUX_LIST_H
    2 #define _LINUX_LIST_H
    3 
    4 #include <linux/types.h>
    5 #include <linux/stddef.h>
    6 #include <linux/poison.h>
    7 #include <linux/const.h>
    8 
    9 /*
   10  * Simple doubly linked list implementation.
   11  *
   12  * Some of the internal functions ("__xxx") are useful when
   13  * manipulating whole lists rather than single entries, as
   14  * sometimes we already know the next/prev entries and we can
   15  * generate better code by using them directly rather than
   16  * using the generic single-entry routines.
   17  */
   18 
   19 #define LIST_HEAD_INIT(name) { &(name), &(name) }
   20 
   21 #define LIST_HEAD(name) \
   22 	struct list_head name = LIST_HEAD_INIT(name)
   23 
   24 static inline void INIT_LIST_HEAD(struct list_head *list)
   25 {
   26 	list->next = list;
   27 	list->prev = list;
   28 }
   29 
   30 /*
   31  * Insert a new entry between two known consecutive entries.
   32  *
   33  * This is only for internal list manipulation where we know
   34  * the prev/next entries already!
   35  */
   36 #ifndef CONFIG_DEBUG_LIST
   37 static inline void __list_add(struct list_head *new,
   38 			      struct list_head *prev,
   39 			      struct list_head *next)
   40 {
   41 	next->prev = new;
   42 	new->next = next;
   43 	new->prev = prev;
   44 	prev->next = new;
   45 }
   46 #else
   47 extern void __list_add(struct list_head *new,
   48 			      struct list_head *prev,
   49 			      struct list_head *next);
   50 #endif
   51 
   52 /**
   53  * list_add - add a new entry
   54  * @new: new entry to be added
   55  * @head: list head to add it after
   56  *
   57  * Insert a new entry after the specified head.
   58  * This is good for implementing stacks.
   59  */
   60 static inline void list_add(struct list_head *new, struct list_head *head)
   61 {
   62 	__list_add(new, head, head->next);
   63 }
   64 
   65 
   66 /**
   67  * list_add_tail - add a new entry
   68  * @new: new entry to be added
   69  * @head: list head to add it before
   70  *
   71  * Insert a new entry before the specified head.
   72  * This is useful for implementing queues.
   73  */
   74 static inline void list_add_tail(struct list_head *new, struct list_head *head)
   75 {
   76 	__list_add(new, head->prev, head);
   77 }
   78 
   79 /*
   80  * Delete a list entry by making the prev/next entries
   81  * point to each other.
   82  *
   83  * This is only for internal list manipulation where we know
   84  * the prev/next entries already!
   85  */
   86 static inline void __list_del(struct list_head * prev, struct list_head * next)
   87 {
   88 	next->prev = prev;
   89 	prev->next = next;
   90 }
   91 
   92 /**
   93  * list_del - deletes entry from list.
   94  * @entry: the element to delete from the list.
   95  * Note: list_empty() on entry does not return true after this, the entry is
   96  * in an undefined state.
   97  */
   98 #ifndef CONFIG_DEBUG_LIST
   99 static inline void __list_del_entry(struct list_head *entry)
  100 {
  101 	__list_del(entry->prev, entry->next);
  102 }
  103 
  104 static inline void list_del(struct list_head *entry)
  105 {
  106 	__list_del(entry->prev, entry->next);
  107 	entry->next = LIST_POISON1;
  108 	entry->prev = LIST_POISON2;
  109 }
  110 #else
  111 extern void __list_del_entry(struct list_head *entry);
  112 extern void list_del(struct list_head *entry);
  113 #endif
  114 
  115 /**
  116  * list_replace - replace old entry by new one
  117  * @old : the element to be replaced
  118  * @new : the new element to insert
  119  *
  120  * If @old was empty, it will be overwritten.
  121  */
  122 static inline void list_replace(struct list_head *old,
  123 				struct list_head *new)
  124 {
  125 	new->next = old->next;
  126 	new->next->prev = new;
  127 	new->prev = old->prev;
  128 	new->prev->next = new;
  129 }
  130 
  131 static inline void list_replace_init(struct list_head *old,
  132 					struct list_head *new)
  133 {
  134 	list_replace(old, new);
  135 	INIT_LIST_HEAD(old);
  136 }
  137 
  138 /**
  139  * list_del_init - deletes entry from list and reinitialize it.
  140  * @entry: the element to delete from the list.
  141  */
  142 static inline void list_del_init(struct list_head *entry)
  143 {
  144 	__list_del_entry(entry);
  145 	INIT_LIST_HEAD(entry);
  146 }
  147 
  148 /**
  149  * list_move - delete from one list and add as another's head
  150  * @list: the entry to move
  151  * @head: the head that will precede our entry
  152  */
  153 static inline void list_move(struct list_head *list, struct list_head *head)
  154 {
  155 	__list_del_entry(list);
  156 	list_add(list, head);
  157 }
  158 
  159 /**
  160  * list_move_tail - delete from one list and add as another's tail
  161  * @list: the entry to move
  162  * @head: the head that will follow our entry
  163  */
  164 static inline void list_move_tail(struct list_head *list,
  165 				  struct list_head *head)
  166 {
  167 	__list_del_entry(list);
  168 	list_add_tail(list, head);
  169 }
  170 
  171 /**
  172  * list_is_last - tests whether @list is the last entry in list @head
  173  * @list: the entry to test
  174  * @head: the head of the list
  175  */
  176 static inline int list_is_last(const struct list_head *list,
  177 				const struct list_head *head)
  178 {
  179 	return list->next == head;
  180 }
  181 
  182 /**
  183  * list_empty - tests whether a list is empty
  184  * @head: the list to test.
  185  */
  186 static inline int list_empty(const struct list_head *head)
  187 {
  188 	return head->next == head;
  189 }
  190 
  191 /**
  192  * list_empty_careful - tests whether a list is empty and not being modified
  193  * @head: the list to test
  194  *
  195  * Description:
  196  * tests whether a list is empty _and_ checks that no other CPU might be
  197  * in the process of modifying either member (next or prev)
  198  *
  199  * NOTE: using list_empty_careful() without synchronization
  200  * can only be safe if the only activity that can happen
  201  * to the list entry is list_del_init(). Eg. it cannot be used
  202  * if another CPU could re-list_add() it.
  203  */
  204 static inline int list_empty_careful(const struct list_head *head)
  205 {
  206 	struct list_head *next = head->next;
  207 	return (next == head) && (next == head->prev);
  208 }
  209 
  210 /**
  211  * list_rotate_left - rotate the list to the left
  212  * @head: the head of the list
  213  */
  214 static inline void list_rotate_left(struct list_head *head)
  215 {
  216 	struct list_head *first;
  217 
  218 	if (!list_empty(head)) {
  219 		first = head->next;
  220 		list_move_tail(first, head);
  221 	}
  222 }
  223 
  224 /**
  225  * list_is_singular - tests whether a list has just one entry.
  226  * @head: the list to test.
  227  */
  228 static inline int list_is_singular(const struct list_head *head)
  229 {
  230 	return !list_empty(head) && (head->next == head->prev);
  231 }
  232 
  233 static inline void __list_cut_position(struct list_head *list,
  234 		struct list_head *head, struct list_head *entry)
  235 {
  236 	struct list_head *new_first = entry->next;
  237 	list->next = head->next;
  238 	list->next->prev = list;
  239 	list->prev = entry;
  240 	entry->next = list;
  241 	head->next = new_first;
  242 	new_first->prev = head;
  243 }
  244 
  245 /**
  246  * list_cut_position - cut a list into two
  247  * @list: a new list to add all removed entries
  248  * @head: a list with entries
  249  * @entry: an entry within head, could be the head itself
  250  *	and if so we won't cut the list
  251  *
  252  * This helper moves the initial part of @head, up to and
  253  * including @entry, from @head to @list. You should
  254  * pass on @entry an element you know is on @head. @list
  255  * should be an empty list or a list you do not care about
  256  * losing its data.
  257  *
  258  */
  259 static inline void list_cut_position(struct list_head *list,
  260 		struct list_head *head, struct list_head *entry)
  261 {
  262 	if (list_empty(head))
  263 		return;
  264 	if (list_is_singular(head) &&
  265 		(head->next != entry && head != entry))
  266 		return;
  267 	if (entry == head)
  268 		INIT_LIST_HEAD(list);
  269 	else
  270 		__list_cut_position(list, head, entry);
  271 }
  272 
  273 static inline void __list_splice(const struct list_head *list,
  274 				 struct list_head *prev,
  275 				 struct list_head *next)
  276 {
  277 	struct list_head *first = list->next;
  278 	struct list_head *last = list->prev;
  279 
  280 	first->prev = prev;
  281 	prev->next = first;
  282 
  283 	last->next = next;
  284 	next->prev = last;
  285 }
  286 
  287 /**
  288  * list_splice - join two lists, this is designed for stacks
  289  * @list: the new list to add.
  290  * @head: the place to add it in the first list.
  291  */
  292 static inline void list_splice(const struct list_head *list,
  293 				struct list_head *head)
  294 {
  295 	if (!list_empty(list))
  296 		__list_splice(list, head, head->next);
  297 }
  298 
  299 /**
  300  * list_splice_tail - join two lists, each list being a queue
  301  * @list: the new list to add.
  302  * @head: the place to add it in the first list.
  303  */
  304 static inline void list_splice_tail(struct list_head *list,
  305 				struct list_head *head)
  306 {
  307 	if (!list_empty(list))
  308 		__list_splice(list, head->prev, head);
  309 }
  310 
  311 /**
  312  * list_splice_init - join two lists and reinitialise the emptied list.
  313  * @list: the new list to add.
  314  * @head: the place to add it in the first list.
  315  *
  316  * The list at @list is reinitialised
  317  */
  318 static inline void list_splice_init(struct list_head *list,
  319 				    struct list_head *head)
  320 {
  321 	if (!list_empty(list)) {
  322 		__list_splice(list, head, head->next);
  323 		INIT_LIST_HEAD(list);
  324 	}
  325 }
  326 
  327 /**
  328  * list_splice_tail_init - join two lists and reinitialise the emptied list
  329  * @list: the new list to add.
  330  * @head: the place to add it in the first list.
  331  *
  332  * Each of the lists is a queue.
  333  * The list at @list is reinitialised
  334  */
  335 static inline void list_splice_tail_init(struct list_head *list,
  336 					 struct list_head *head)
  337 {
  338 	if (!list_empty(list)) {
  339 		__list_splice(list, head->prev, head);
  340 		INIT_LIST_HEAD(list);
  341 	}
  342 }
  343 
  344 /**
  345  * list_entry - get the struct for this entry
  346  * @ptr:	the &struct list_head pointer.
  347  * @type:	the type of the struct this is embedded in.
  348  * @member:	the name of the list_struct within the struct.
  349  */
  350 #define list_entry(ptr, type, member) \
  351 	container_of(ptr, type, member)
  352 
  353 /**
  354  * list_first_entry - get the first element from a list
  355  * @ptr:	the list head to take the element from.
  356  * @type:	the type of the struct this is embedded in.
  357  * @member:	the name of the list_struct within the struct.
  358  *
  359  * Note, that list is expected to be not empty.
  360  */
  361 #define list_first_entry(ptr, type, member) \
  362 	list_entry((ptr)->next, type, member)
  363 
  364 /**
  365  * list_last_entry - get the last element from a list
  366  * @ptr:	the list head to take the element from.
  367  * @type:	the type of the struct this is embedded in.
  368  * @member:	the name of the list_struct within the struct.
  369  *
  370  * Note, that list is expected to be not empty.
  371  */
  372 #define list_last_entry(ptr, type, member) \
  373 	list_entry((ptr)->prev, type, member)
  374 
  375 /**
  376  * list_first_entry_or_null - get the first element from a list
  377  * @ptr:	the list head to take the element from.
  378  * @type:	the type of the struct this is embedded in.
  379  * @member:	the name of the list_struct within the struct.
  380  *
  381  * Note that if the list is empty, it returns NULL.
  382  */
  383 #define list_first_entry_or_null(ptr, type, member) \
  384 	(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
  385 
  386 /**
  387  * list_next_entry - get the next element in list
  388  * @pos:	the type * to cursor
  389  * @member:	the name of the list_struct within the struct.
  390  */
  391 #define list_next_entry(pos, member) \
  392 	list_entry((pos)->member.next, typeof(*(pos)), member)
  393 
  394 /**
  395  * list_prev_entry - get the prev element in list
  396  * @pos:	the type * to cursor
  397  * @member:	the name of the list_struct within the struct.
  398  */
  399 #define list_prev_entry(pos, member) \
  400 	list_entry((pos)->member.prev, typeof(*(pos)), member)
  401 
  402 /**
  403  * list_for_each	-	iterate over a list
  404  * @pos:	the &struct list_head to use as a loop cursor.
  405  * @head:	the head for your list.
  406  */
  407 #define list_for_each(pos, head) \
  408 	for (pos = (head)->next; pos != (head); pos = pos->next)
  409 
  410 /**
  411  * list_for_each_prev	-	iterate over a list backwards
  412  * @pos:	the &struct list_head to use as a loop cursor.
  413  * @head:	the head for your list.
  414  */
  415 #define list_for_each_prev(pos, head) \
  416 	for (pos = (head)->prev; pos != (head); pos = pos->prev)
  417 
  418 /**
  419  * list_for_each_safe - iterate over a list safe against removal of list entry
  420  * @pos:	the &struct list_head to use as a loop cursor.
  421  * @n:		another &struct list_head to use as temporary storage
  422  * @head:	the head for your list.
  423  */
  424 #define list_for_each_safe(pos, n, head) \
  425 	for (pos = (head)->next, n = pos->next; pos != (head); \
  426 		pos = n, n = pos->next)
  427 
  428 /**
  429  * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
  430  * @pos:	the &struct list_head to use as a loop cursor.
  431  * @n:		another &struct list_head to use as temporary storage
  432  * @head:	the head for your list.
  433  */
  434 #define list_for_each_prev_safe(pos, n, head) \
  435 	for (pos = (head)->prev, n = pos->prev; \
  436 	     pos != (head); \
  437 	     pos = n, n = pos->prev)
  438 
  439 /**
  440  * list_for_each_entry	-	iterate over list of given type
  441  * @pos:	the type * to use as a loop cursor.
  442  * @head:	the head for your list.
  443  * @member:	the name of the list_struct within the struct.
  444  */
  445 #define list_for_each_entry(pos, head, member)				\
  446 	for (pos = list_first_entry(head, typeof(*pos), member);	\
  447 	     &pos->member != (head);					\
  448 	     pos = list_next_entry(pos, member))
  449 
  450 /**
  451  * list_for_each_entry_reverse - iterate backwards over list of given type.
  452  * @pos:	the type * to use as a loop cursor.
  453  * @head:	the head for your list.
  454  * @member:	the name of the list_struct within the struct.
  455  */
  456 #define list_for_each_entry_reverse(pos, head, member)			\
  457 	for (pos = list_last_entry(head, typeof(*pos), member);		\
  458 	     &pos->member != (head); 					\
  459 	     pos = list_prev_entry(pos, member))
  460 
  461 /**
  462  * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
  463  * @pos:	the type * to use as a start point
  464  * @head:	the head of the list
  465  * @member:	the name of the list_struct within the struct.
  466  *
  467  * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
  468  */
  469 #define list_prepare_entry(pos, head, member) \
  470 	((pos) ? : list_entry(head, typeof(*pos), member))
  471 
  472 /**
  473  * list_for_each_entry_continue - continue iteration over list of given type
  474  * @pos:	the type * to use as a loop cursor.
  475  * @head:	the head for your list.
  476  * @member:	the name of the list_struct within the struct.
  477  *
  478  * Continue to iterate over list of given type, continuing after
  479  * the current position.
  480  */
  481 #define list_for_each_entry_continue(pos, head, member) 		\
  482 	for (pos = list_next_entry(pos, member);			\
  483 	     &pos->member != (head);					\
  484 	     pos = list_next_entry(pos, member))
  485 
  486 /**
  487  * list_for_each_entry_continue_reverse - iterate backwards from the given point
  488  * @pos:	the type * to use as a loop cursor.
  489  * @head:	the head for your list.
  490  * @member:	the name of the list_struct within the struct.
  491  *
  492  * Start to iterate over list of given type backwards, continuing after
  493  * the current position.
  494  */
  495 #define list_for_each_entry_continue_reverse(pos, head, member)		\
  496 	for (pos = list_prev_entry(pos, member);			\
  497 	     &pos->member != (head);					\
  498 	     pos = list_prev_entry(pos, member))
  499 
  500 /**
  501  * list_for_each_entry_from - iterate over list of given type from the current point
  502  * @pos:	the type * to use as a loop cursor.
  503  * @head:	the head for your list.
  504  * @member:	the name of the list_struct within the struct.
  505  *
  506  * Iterate over list of given type, continuing from current position.
  507  */
  508 #define list_for_each_entry_from(pos, head, member) 			\
  509 	for (; &pos->member != (head);					\
  510 	     pos = list_next_entry(pos, member))
  511 
  512 /**
  513  * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  514  * @pos:	the type * to use as a loop cursor.
  515  * @n:		another type * to use as temporary storage
  516  * @head:	the head for your list.
  517  * @member:	the name of the list_struct within the struct.
  518  */
  519 #define list_for_each_entry_safe(pos, n, head, member)			\
  520 	for (pos = list_first_entry(head, typeof(*pos), member),	\
  521 		n = list_next_entry(pos, member);			\
  522 	     &pos->member != (head); 					\
  523 	     pos = n, n = list_next_entry(n, member))
  524 
  525 /**
  526  * list_for_each_entry_safe_continue - continue list iteration safe against removal
  527  * @pos:	the type * to use as a loop cursor.
  528  * @n:		another type * to use as temporary storage
  529  * @head:	the head for your list.
  530  * @member:	the name of the list_struct within the struct.
  531  *
  532  * Iterate over list of given type, continuing after current point,
  533  * safe against removal of list entry.
  534  */
  535 #define list_for_each_entry_safe_continue(pos, n, head, member) 		\
  536 	for (pos = list_next_entry(pos, member), 				\
  537 		n = list_next_entry(pos, member);				\
  538 	     &pos->member != (head);						\
  539 	     pos = n, n = list_next_entry(n, member))
  540 
  541 /**
  542  * list_for_each_entry_safe_from - iterate over list from current point safe against removal
  543  * @pos:	the type * to use as a loop cursor.
  544  * @n:		another type * to use as temporary storage
  545  * @head:	the head for your list.
  546  * @member:	the name of the list_struct within the struct.
  547  *
  548  * Iterate over list of given type from current point, safe against
  549  * removal of list entry.
  550  */
  551 #define list_for_each_entry_safe_from(pos, n, head, member) 			\
  552 	for (n = list_next_entry(pos, member);					\
  553 	     &pos->member != (head);						\
  554 	     pos = n, n = list_next_entry(n, member))
  555 
  556 /**
  557  * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
  558  * @pos:	the type * to use as a loop cursor.
  559  * @n:		another type * to use as temporary storage
  560  * @head:	the head for your list.
  561  * @member:	the name of the list_struct within the struct.
  562  *
  563  * Iterate backwards over list of given type, safe against removal
  564  * of list entry.
  565  */
  566 #define list_for_each_entry_safe_reverse(pos, n, head, member)		\
  567 	for (pos = list_last_entry(head, typeof(*pos), member),		\
  568 		n = list_prev_entry(pos, member);			\
  569 	     &pos->member != (head); 					\
  570 	     pos = n, n = list_prev_entry(n, member))
  571 
  572 /**
  573  * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
  574  * @pos:	the loop cursor used in the list_for_each_entry_safe loop
  575  * @n:		temporary storage used in list_for_each_entry_safe
  576  * @member:	the name of the list_struct within the struct.
  577  *
  578  * list_safe_reset_next is not safe to use in general if the list may be
  579  * modified concurrently (eg. the lock is dropped in the loop body). An
  580  * exception to this is if the cursor element (pos) is pinned in the list,
  581  * and list_safe_reset_next is called after re-taking the lock and before
  582  * completing the current iteration of the loop body.
  583  */
  584 #define list_safe_reset_next(pos, n, member)				\
  585 	n = list_next_entry(pos, member)
  586 
  587 /*
  588  * Double linked lists with a single pointer list head.
  589  * Mostly useful for hash tables where the two pointer list head is
  590  * too wasteful.
  591  * You lose the ability to access the tail in O(1).
  592  */
  593 
  594 #define HLIST_HEAD_INIT { .first = NULL }
  595 #define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
  596 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  597 static inline void INIT_HLIST_NODE(struct hlist_node *h)
  598 {
  599 	h->next = NULL;
  600 	h->pprev = NULL;
  601 }
  602 
  603 static inline int hlist_unhashed(const struct hlist_node *h)
  604 {
  605 	return !h->pprev;
  606 }
  607 
  608 static inline int hlist_empty(const struct hlist_head *h)
  609 {
  610 	return !h->first;
  611 }
  612 
  613 static inline void __hlist_del(struct hlist_node *n)
  614 {
  615 	struct hlist_node *next = n->next;
  616 	struct hlist_node **pprev = n->pprev;
  617 	*pprev = next;
  618 	if (next)
  619 		next->pprev = pprev;
  620 }
  621 
  622 static inline void hlist_del(struct hlist_node *n)
  623 {
  624 	__hlist_del(n);
  625 	n->next = LIST_POISON1;
  626 	n->pprev = LIST_POISON2;
  627 }
  628 
  629 static inline void hlist_del_init(struct hlist_node *n)
  630 {
  631 	if (!hlist_unhashed(n)) {
  632 		__hlist_del(n);
  633 		INIT_HLIST_NODE(n);
  634 	}
  635 }
  636 
  637 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  638 {
  639 	struct hlist_node *first = h->first;
  640 	n->next = first;
  641 	if (first)
  642 		first->pprev = &n->next;
  643 	h->first = n;
  644 	n->pprev = &h->first;
  645 }
  646 
  647 /* next must be != NULL */
  648 static inline void hlist_add_before(struct hlist_node *n,
  649 					struct hlist_node *next)
  650 {
  651 	n->pprev = next->pprev;
  652 	n->next = next;
  653 	next->pprev = &n->next;
  654 	*(n->pprev) = n;
  655 }
  656 
  657 static inline void hlist_add_after(struct hlist_node *n,
  658 					struct hlist_node *next)
  659 {
  660 	next->next = n->next;
  661 	n->next = next;
  662 	next->pprev = &n->next;
  663 
  664 	if(next->next)
  665 		next->next->pprev  = &next->next;
  666 }
  667 
  668 /* after that we'll appear to be on some hlist and hlist_del will work */
  669 static inline void hlist_add_fake(struct hlist_node *n)
  670 {
  671 	n->pprev = &n->next;
  672 }
  673 
  674 /*
  675  * Move a list from one list head to another. Fixup the pprev
  676  * reference of the first entry if it exists.
  677  */
  678 static inline void hlist_move_list(struct hlist_head *old,
  679 				   struct hlist_head *new)
  680 {
  681 	new->first = old->first;
  682 	if (new->first)
  683 		new->first->pprev = &new->first;
  684 	old->first = NULL;
  685 }
  686 
  687 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  688 
  689 #define hlist_for_each(pos, head) \
  690 	for (pos = (head)->first; pos ; pos = pos->next)
  691 
  692 #define hlist_for_each_safe(pos, n, head) \
  693 	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  694 	     pos = n)
  695 
  696 #define hlist_entry_safe(ptr, type, member) \
  697 	({ typeof(ptr) ____ptr = (ptr); \
  698 	   ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
  699 	})
  700 
  701 /**
  702  * hlist_for_each_entry	- iterate over list of given type
  703  * @pos:	the type * to use as a loop cursor.
  704  * @head:	the head for your list.
  705  * @member:	the name of the hlist_node within the struct.
  706  */
  707 #define hlist_for_each_entry(pos, head, member)				\
  708 	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
  709 	     pos;							\
  710 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
  711 
  712 /**
  713  * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
  714  * @pos:	the type * to use as a loop cursor.
  715  * @member:	the name of the hlist_node within the struct.
  716  */
  717 #define hlist_for_each_entry_continue(pos, member)			\
  718 	for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
  719 	     pos;							\
  720 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
  721 
  722 /**
  723  * hlist_for_each_entry_from - iterate over a hlist continuing from current point
  724  * @pos:	the type * to use as a loop cursor.
  725  * @member:	the name of the hlist_node within the struct.
  726  */
  727 #define hlist_for_each_entry_from(pos, member)				\
  728 	for (; pos;							\
  729 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
  730 
  731 /**
  732  * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  733  * @pos:	the type * to use as a loop cursor.
  734  * @n:		another &struct hlist_node to use as temporary storage
  735  * @head:	the head for your list.
  736  * @member:	the name of the hlist_node within the struct.
  737  */
  738 #define hlist_for_each_entry_safe(pos, n, head, member) 		\
  739 	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
  740 	     pos && ({ n = pos->member.next; 1; });			\
  741 	     pos = hlist_entry_safe(n, typeof(*pos), member))
  742 
  743 #endif                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with a LOAD inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /*
  134  * Place this after a lock-acquisition primitive to guarantee that
  135  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
  136  * if the UNLOCK and LOCK are executed by the same CPU or if the
  137  * UNLOCK and LOCK operate on the same lock variable.
  138  */
  139 #ifndef smp_mb__after_unlock_lock
  140 #define smp_mb__after_unlock_lock()	do { } while (0)
  141 #endif
  142 
  143 /**
  144  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  145  * @lock: the spinlock in question.
  146  */
  147 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  148 
  149 #ifdef CONFIG_DEBUG_SPINLOCK
  150  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  152  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  153  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  154 #else
  155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  156 {
  157 	__acquire(lock);
  158 	arch_spin_lock(&lock->raw_lock);
  159 }
  160 
  161 static inline void
  162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  163 {
  164 	__acquire(lock);
  165 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  166 }
  167 
  168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  169 {
  170 	return arch_spin_trylock(&(lock)->raw_lock);
  171 }
  172 
  173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  174 {
  175 	arch_spin_unlock(&lock->raw_lock);
  176 	__release(lock);
  177 }
  178 #endif
  179 
  180 /*
  181  * Define the various spin_lock methods.  Note we define these
  182  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  183  * various methods are defined as nops in the case they are not
  184  * required.
  185  */
  186 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  187 
  188 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  189 
  190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  191 # define raw_spin_lock_nested(lock, subclass) \
  192 	_raw_spin_lock_nested(lock, subclass)
  193 
  194 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  195 	 do {								\
  196 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  197 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  198 	 } while (0)
  199 #else
  200 # define raw_spin_lock_nested(lock, subclass)		_raw_spin_lock(lock)
  201 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  202 #endif
  203 
  204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  205 
  206 #define raw_spin_lock_irqsave(lock, flags)			\
  207 	do {						\
  208 		typecheck(unsigned long, flags);	\
  209 		flags = _raw_spin_lock_irqsave(lock);	\
  210 	} while (0)
  211 
  212 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  214 	do {								\
  215 		typecheck(unsigned long, flags);			\
  216 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  217 	} while (0)
  218 #else
  219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  220 	do {								\
  221 		typecheck(unsigned long, flags);			\
  222 		flags = _raw_spin_lock_irqsave(lock);			\
  223 	} while (0)
  224 #endif
  225 
  226 #else
  227 
  228 #define raw_spin_lock_irqsave(lock, flags)		\
  229 	do {						\
  230 		typecheck(unsigned long, flags);	\
  231 		_raw_spin_lock_irqsave(lock, flags);	\
  232 	} while (0)
  233 
  234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  235 	raw_spin_lock_irqsave(lock, flags)
  236 
  237 #endif
  238 
  239 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  240 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  241 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  242 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  243 
  244 #define raw_spin_unlock_irqrestore(lock, flags)		\
  245 	do {							\
  246 		typecheck(unsigned long, flags);		\
  247 		_raw_spin_unlock_irqrestore(lock, flags);	\
  248 	} while (0)
  249 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  250 
  251 #define raw_spin_trylock_bh(lock) \
  252 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  253 
  254 #define raw_spin_trylock_irq(lock) \
  255 ({ \
  256 	local_irq_disable(); \
  257 	raw_spin_trylock(lock) ? \
  258 	1 : ({ local_irq_enable(); 0;  }); \
  259 })
  260 
  261 #define raw_spin_trylock_irqsave(lock, flags) \
  262 ({ \
  263 	local_irq_save(flags); \
  264 	raw_spin_trylock(lock) ? \
  265 	1 : ({ local_irq_restore(flags); 0; }); \
  266 })
  267 
  268 /**
  269  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  270  * @lock: the spinlock in question.
  271  */
  272 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  273 
  274 /* Include rwlock functions */
  275 #include <linux/rwlock.h>
  276 
  277 /*
  278  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  279  */
  280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  281 # include <linux/spinlock_api_smp.h>
  282 #else
  283 # include <linux/spinlock_api_up.h>
  284 #endif
  285 
  286 /*
  287  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  288  */
  289 
  290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  291 {
  292 	return &lock->rlock;
  293 }
  294 
  295 #define spin_lock_init(_lock)				\
  296 do {							\
  297 	spinlock_check(_lock);				\
  298 	raw_spin_lock_init(&(_lock)->rlock);		\
  299 } while (0)
  300 
  301 static inline void spin_lock(spinlock_t *lock)
  302 {
  303 	raw_spin_lock(&lock->rlock);
  304 }
  305 
  306 static inline void spin_lock_bh(spinlock_t *lock)
  307 {
  308 	raw_spin_lock_bh(&lock->rlock);
  309 }
  310 
  311 static inline int spin_trylock(spinlock_t *lock)
  312 {
  313 	return raw_spin_trylock(&lock->rlock);
  314 }
  315 
  316 #define spin_lock_nested(lock, subclass)			\
  317 do {								\
  318 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  319 } while (0)
  320 
  321 #define spin_lock_nest_lock(lock, nest_lock)				\
  322 do {									\
  323 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  324 } while (0)
  325 
  326 static inline void spin_lock_irq(spinlock_t *lock)
  327 {
  328 	raw_spin_lock_irq(&lock->rlock);
  329 }
  330 
  331 #define spin_lock_irqsave(lock, flags)				\
  332 do {								\
  333 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  334 } while (0)
  335 
  336 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  337 do {									\
  338 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  339 } while (0)
  340 
  341 static inline void spin_unlock(spinlock_t *lock)
  342 {
  343 	raw_spin_unlock(&lock->rlock);
  344 }
  345 
  346 static inline void spin_unlock_bh(spinlock_t *lock)
  347 {
  348 	raw_spin_unlock_bh(&lock->rlock);
  349 }
  350 
  351 static inline void spin_unlock_irq(spinlock_t *lock)
  352 {
  353 	raw_spin_unlock_irq(&lock->rlock);
  354 }
  355 
  356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  357 {
  358 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  359 }
  360 
  361 static inline int spin_trylock_bh(spinlock_t *lock)
  362 {
  363 	return raw_spin_trylock_bh(&lock->rlock);
  364 }
  365 
  366 static inline int spin_trylock_irq(spinlock_t *lock)
  367 {
  368 	return raw_spin_trylock_irq(&lock->rlock);
  369 }
  370 
  371 #define spin_trylock_irqsave(lock, flags)			\
  372 ({								\
  373 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  374 })
  375 
  376 static inline void spin_unlock_wait(spinlock_t *lock)
  377 {
  378 	raw_spin_unlock_wait(&lock->rlock);
  379 }
  380 
  381 static inline int spin_is_locked(spinlock_t *lock)
  382 {
  383 	return raw_spin_is_locked(&lock->rlock);
  384 }
  385 
  386 static inline int spin_is_contended(spinlock_t *lock)
  387 {
  388 	return raw_spin_is_contended(&lock->rlock);
  389 }
  390 
  391 static inline int spin_can_lock(spinlock_t *lock)
  392 {
  393 	return raw_spin_can_lock(&lock->rlock);
  394 }
  395 
  396 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  397 
  398 /*
  399  * Pull the atomic_t declaration:
  400  * (asm-mips/atomic.h needs above definitions)
  401  */
  402 #include <linux/atomic.h>
  403 /**
  404  * atomic_dec_and_lock - lock on reaching reference count zero
  405  * @atomic: the atomic counter
  406  * @lock: the spinlock in question
  407  *
  408  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  409  * @lock.  Returns false for all other cases.
  410  */
  411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  412 #define atomic_dec_and_lock(atomic, lock) \
  413 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  414 
  415 #endif /* __LINUX_SPINLOCK_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | 
| linux-3.16-rc1.tar.xz | drivers/usb/dwc3/dwc3.ko | 43_1a | CPAchecker | Bug | Unreported | 2014-12-12 13:15:10 | 
Comment
dwc3_gadget_giveback should be called under spin_lock
[Home]