Error Trace
[Home]
Bug # 44
Show/hide error trace Error trace
{ 95 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 40 typedef __kernel_long_t __kernel_suseconds_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 32 typedef __u16 __le16; 34 typedef __u32 __le32; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 147 typedef u64 dma_addr_t; 158 typedef unsigned int gfp_t; 159 typedef unsigned int fmode_t; 160 typedef unsigned int oom_flags_t; 178 struct __anonstruct_atomic_t_6 { int counter; } ; 178 typedef struct __anonstruct_atomic_t_6 atomic_t; 183 struct __anonstruct_atomic64_t_7 { long counter; } ; 183 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 184 struct list_head { struct list_head *next; struct list_head *prev; } ; 189 struct hlist_node ; 189 struct hlist_head { struct hlist_node *first; } ; 193 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 204 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 59 struct __anonstruct_ldv_1016_9 { unsigned int a; unsigned int b; } ; 59 struct __anonstruct_ldv_1031_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 59 union __anonunion_ldv_1032_8 { struct __anonstruct_ldv_1016_9 ldv_1016; struct __anonstruct_ldv_1031_10 ldv_1031; } ; 59 struct desc_struct { union __anonunion_ldv_1032_8 ldv_1032; } ; 12 typedef unsigned long pteval_t; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 18 struct __anonstruct_pte_t_11 { pteval_t pte; } ; 18 typedef struct __anonstruct_pte_t_11 pte_t; 20 struct pgprot { pgprotval_t pgprot; } ; 242 typedef struct pgprot pgprot_t; 244 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ; 244 typedef struct __anonstruct_pgd_t_12 pgd_t; 332 struct page ; 332 typedef struct page *pgtable_t; 340 struct file ; 353 struct seq_file ; 390 struct thread_struct ; 392 struct mm_struct ; 393 struct task_struct ; 394 struct cpumask ; 327 struct arch_spinlock ; 18 typedef u16 __ticket_t; 19 typedef u32 __ticketpair_t; 20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ; 32 union __anonunion_ldv_1452_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ; 32 struct arch_spinlock { union __anonunion_ldv_1452_15 ldv_1452; } ; 33 typedef struct arch_spinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 142 typedef void (*ctor_fn_t)(); 48 struct device ; 400 struct file_operations ; 412 struct completion ; 416 struct pid ; 527 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 102 struct timespec ; 127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ; 79 union __anonunion_ldv_3000_20 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ; 79 struct math_emu_info { long ___orig_eip; union __anonunion_ldv_3000_20 ldv_3000; } ; 306 struct cpumask { unsigned long bits[128U]; } ; 14 typedef struct cpumask cpumask_t; 671 typedef struct cpumask *cpumask_var_t; 162 struct seq_operations ; 294 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 312 struct __anonstruct_ldv_5307_25 { u64 rip; u64 rdp; } ; 312 struct __anonstruct_ldv_5313_26 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 312 union __anonunion_ldv_5314_24 { struct __anonstruct_ldv_5307_25 ldv_5307; struct __anonstruct_ldv_5313_26 ldv_5313; } ; 312 union __anonunion_ldv_5323_27 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 312 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion_ldv_5314_24 ldv_5314; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion_ldv_5323_27 ldv_5323; } ; 346 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 367 struct ymmh_struct { u32 ymmh_space[64U]; } ; 372 struct lwp_struct { u8 reserved[128U]; } ; 377 struct bndregs_struct { u64 bndregs[8U]; } ; 381 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ; 386 struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2U]; u64 reserved2[5U]; } ; 392 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ; 401 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ; 409 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ; 465 struct kmem_cache ; 466 struct perf_event ; 467 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ; 23 typedef atomic64_t atomic_long_t; 35 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 26 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ; 530 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct_ldv_6364_31 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion_ldv_6365_30 { struct raw_spinlock rlock; struct __anonstruct_ldv_6364_31 ldv_6364; } ; 33 struct spinlock { union __anonunion_ldv_6365_30 ldv_6365; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_32 rwlock_t; 412 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 51 typedef struct seqcount seqcount_t; 433 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 13 struct timeval { __kernel_time_t tv_sec; __kernel_suseconds_t tv_usec; } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_34 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_34 kuid_t; 27 struct __anonstruct_kgid_t_35 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_35 kgid_t; 127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 34 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 39 typedef struct __wait_queue_head wait_queue_head_t; 98 struct __anonstruct_nodemask_t_36 { unsigned long bits[16U]; } ; 98 typedef struct __anonstruct_nodemask_t_36 nodemask_t; 814 struct optimistic_spin_queue ; 815 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ; 68 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 178 struct rw_semaphore ; 179 struct rw_semaphore { long count; raw_spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; struct optimistic_spin_queue *osq; struct lockdep_map dep_map; } ; 174 struct completion { unsigned int done; wait_queue_head_t wait; } ; 105 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 323 union ktime { s64 tv64; } ; 59 typedef union ktime ktime_t; 412 struct tvec_base ; 413 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 266 struct workqueue_struct ; 267 struct work_struct ; 53 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 106 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 546 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ; 553 struct dev_pm_qos ; 553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 614 struct dev_pm_domain { struct dev_pm_ops ops; } ; 22 struct __anonstruct_mm_context_t_101 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ; 22 typedef struct __anonstruct_mm_context_t_101 mm_context_t; 18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 40 struct rb_root { struct rb_node *rb_node; } ; 87 struct vm_area_struct ; 835 struct nsproxy ; 37 struct cred ; 24 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct_ldv_13972_136 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct_ldv_13976_137 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion_ldv_13977_135 { struct __anonstruct_ldv_13972_136 ldv_13972; struct __anonstruct_ldv_13976_137 ldv_13976; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion_ldv_13977_135 ldv_13977; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct xol_area ; 95 struct uprobes_state { struct xol_area *xol_area; } ; 133 struct address_space ; 134 union __anonunion_ldv_14086_138 { struct address_space *mapping; void *s_mem; } ; 134 union __anonunion_ldv_14092_140 { unsigned long index; void *freelist; bool pfmemalloc; } ; 134 struct __anonstruct_ldv_14102_144 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 134 union __anonunion_ldv_14104_143 { atomic_t _mapcount; struct __anonstruct_ldv_14102_144 ldv_14102; int units; } ; 134 struct __anonstruct_ldv_14106_142 { union __anonunion_ldv_14104_143 ldv_14104; atomic_t _count; } ; 134 union __anonunion_ldv_14108_141 { unsigned long counters; struct __anonstruct_ldv_14106_142 ldv_14106; unsigned int active; } ; 134 struct __anonstruct_ldv_14109_139 { union __anonunion_ldv_14092_140 ldv_14092; union __anonunion_ldv_14108_141 ldv_14108; } ; 134 struct __anonstruct_ldv_14116_146 { struct page *next; int pages; int pobjects; } ; 134 struct slab ; 134 union __anonunion_ldv_14121_145 { struct list_head lru; struct __anonstruct_ldv_14116_146 ldv_14116; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ; 134 union __anonunion_ldv_14127_147 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ; 134 struct page { unsigned long flags; union __anonunion_ldv_14086_138 ldv_14086; struct __anonstruct_ldv_14109_139 ldv_14109; union __anonunion_ldv_14121_145 ldv_14121; union __anonunion_ldv_14127_147 ldv_14127; unsigned long debug_flags; } ; 187 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 239 struct __anonstruct_linear_149 { struct rb_node rb; unsigned long rb_subtree_last; } ; 239 union __anonunion_shared_148 { struct __anonstruct_linear_149 linear; struct list_head nonlinear; } ; 239 struct anon_vma ; 239 struct vm_operations_struct ; 239 struct mempolicy ; 239 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_148 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ; 311 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 317 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 330 struct task_rss_stat { int events; int count[3U]; } ; 338 struct mm_rss_stat { atomic_long_t count[3U]; } ; 343 struct kioctx_table ; 344 struct linux_binfmt ; 344 struct mmu_notifier_mm ; 344 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 48 union __anonunion_ldv_14490_153 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 48 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion_ldv_14490_153 ldv_14490; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 185 struct dentry ; 186 struct iattr ; 187 struct super_block ; 188 struct file_system_type ; 189 struct kernfs_open_node ; 190 struct kernfs_iattrs ; 213 struct kernfs_root ; 213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; } ; 95 union __anonunion_ldv_14634_154 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 95 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion_ldv_14634_154 ldv_14634; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 137 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 154 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 170 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 186 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 462 struct sock ; 463 struct kobject ; 464 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 470 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 131 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 470 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 222 struct kernel_param ; 227 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 58 struct kparam_string ; 58 struct kparam_array ; 58 union __anonunion_ldv_15312_155 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion_ldv_15312_155 ldv_15312; } ; 70 struct kparam_string { unsigned int maxlen; char *string; } ; 76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 461 struct mod_arch_specific { } ; 36 struct module_param_attrs ; 36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 72 struct exception_table_entry ; 205 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 212 struct module_ref { unsigned long incs; unsigned long decs; } ; 226 struct module_sect_attrs ; 226 struct module_notes_attrs ; 226 struct tracepoint ; 226 struct ftrace_event_call ; 226 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 13 typedef unsigned long kernel_ulong_t; 39 struct usb_device_id { __u16 match_flags; __u16 idVendor; __u16 idProduct; __u16 bcdDevice_lo; __u16 bcdDevice_hi; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 bInterfaceNumber; kernel_ulong_t driver_info; } ; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ; 219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 253 struct usb_device_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __le16 idVendor; __le16 idProduct; __le16 bcdDevice; __u8 iManufacturer; __u8 iProduct; __u8 iSerialNumber; __u8 bNumConfigurations; } ; 275 struct usb_config_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumInterfaces; __u8 bConfigurationValue; __u8 iConfiguration; __u8 bmAttributes; __u8 bMaxPower; } ; 343 struct usb_interface_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bInterfaceNumber; __u8 bAlternateSetting; __u8 bNumEndpoints; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 iInterface; } ; 363 struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; __u8 bRefresh; __u8 bSynchAddress; } ; 613 struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; } ; 692 struct usb_interface_assoc_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bFirstInterface; __u8 bInterfaceCount; __u8 bFunctionClass; __u8 bFunctionSubClass; __u8 bFunctionProtocol; __u8 iFunction; } ; 751 struct usb_bos_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumDeviceCaps; } ; 801 struct usb_ext_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __le32 bmAttributes; } ; 811 struct usb_ss_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wSpeedSupported; __u8 bFunctionalitySupport; __u8 bU1devExitLat; __le16 bU2DevExitLat; } ; 840 struct usb_ss_container_id_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 ContainerID[16U]; } ; 905 enum usb_device_speed { USB_SPEED_UNKNOWN = 0, USB_SPEED_LOW = 1, USB_SPEED_FULL = 2, USB_SPEED_HIGH = 3, USB_SPEED_WIRELESS = 4, USB_SPEED_SUPER = 5 } ; 914 enum usb_device_state { USB_STATE_NOTATTACHED = 0, USB_STATE_ATTACHED = 1, USB_STATE_POWERED = 2, USB_STATE_RECONNECTING = 3, USB_STATE_UNAUTHENTICATED = 4, USB_STATE_DEFAULT = 5, USB_STATE_ADDRESS = 6, USB_STATE_CONFIGURED = 7, USB_STATE_SUSPENDED = 8 } ; 62 struct exception_table_entry { int insn; int fixup; } ; 61 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ; 163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 663 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 67 struct path ; 68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 196 struct pinctrl ; 197 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 42 struct dma_map_ops ; 42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 14 struct device_private ; 15 struct device_driver ; 16 struct driver_private ; 17 struct class ; 18 struct subsys_private ; 19 struct bus_type ; 20 struct device_node ; 21 struct iommu_ops ; 22 struct iommu_group ; 60 struct device_attribute ; 60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 138 struct device_type ; 195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 321 struct class_attribute ; 321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 640 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 649 struct acpi_device ; 650 struct acpi_dev_node { struct acpi_device *companion; } ; 656 struct dma_coherent_mem ; 656 struct cma ; 656 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 803 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct_ldv_19539_162 { spinlock_t lock; unsigned int count; } ; 114 union __anonunion_ldv_19540_161 { struct __anonstruct_ldv_19539_162 ldv_19539; } ; 114 struct lockref { union __anonunion_ldv_19540_161 ldv_19540; } ; 49 struct nameidata ; 50 struct vfsmount ; 51 struct __anonstruct_ldv_19563_164 { u32 hash; u32 len; } ; 51 union __anonunion_ldv_19565_163 { struct __anonstruct_ldv_19563_164 ldv_19563; u64 hash_len; } ; 51 struct qstr { union __anonunion_ldv_19565_163 ldv_19565; const unsigned char *name; } ; 90 struct dentry_operations ; 90 union __anonunion_d_u_165 { struct list_head d_child; struct callback_head d_rcu; } ; 90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_165 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ; 142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ; 477 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 27 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ; 30 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ; 58 struct __anonstruct_ldv_19926_167 { struct radix_tree_node *parent; void *private_data; } ; 58 union __anonunion_ldv_19928_166 { struct __anonstruct_ldv_19926_167 ldv_19926; struct callback_head callback_head; } ; 58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion_ldv_19928_166 ldv_19928; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 428 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 435 struct pid_namespace ; 435 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ; 26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 70 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 30 struct block_device ; 31 struct io_context ; 59 struct export_operations ; 61 struct iovec ; 62 struct kiocb ; 63 struct pipe_inode_info ; 64 struct poll_table_struct ; 65 struct kstatfs ; 66 struct swap_info_struct ; 67 struct iov_iter ; 69 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 253 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 176 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ; 76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ; 151 typedef struct fs_qfilestat fs_qfilestat_t; 152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ; 166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ; 196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ; 212 struct dquot ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_169 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_169 kprojid_t; 119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ; 152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 60 typedef long long qsize_t; 61 union __anonunion_ldv_20748_170 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 61 struct kqid { union __anonunion_ldv_20748_170 ldv_20748; enum quota_type type; } ; 178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ; 200 struct quota_format_type ; 201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ; 264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ; 302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ; 316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 334 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 380 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ; 411 struct writeback_control ; 323 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t ); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 382 struct backing_dev_info ; 383 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 405 struct request_queue ; 406 struct hd_struct ; 406 struct gendisk ; 406 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 478 struct posix_acl ; 479 struct inode_operations ; 479 union __anonunion_ldv_21164_173 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 479 union __anonunion_ldv_21184_174 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 479 struct file_lock ; 479 struct cdev ; 479 union __anonunion_ldv_21201_175 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ; 479 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion_ldv_21164_173 ldv_21164; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion_ldv_21184_174 ldv_21184; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion_ldv_21201_175 ldv_21201; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ; 715 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 723 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 746 union __anonunion_f_u_176 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 746 struct file { union __anonunion_f_u_176 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 836 struct files_struct ; 836 typedef struct files_struct *fl_owner_t; 837 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 842 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ; 860 struct nlm_lockowner ; 861 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_178 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_177 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_178 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_177 fl_u; } ; 963 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1157 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ; 1173 struct super_operations ; 1173 struct xattr_handler ; 1173 struct mtd_info ; 1173 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ; 1403 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1441 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1446 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ; 1488 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1535 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ; 1749 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 24 struct __anonstruct_sigset_t_179 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_179 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_181 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_182 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_183 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_184 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__sigfault_185 { void *_addr; short _addr_lsb; } ; 11 struct __anonstruct__sigpoll_186 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_187 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_180 { int _pad[28U]; struct __anonstruct__kill_181 _kill; struct __anonstruct__timer_182 _timer; struct __anonstruct__rt_183 _rt; struct __anonstruct__sigchld_184 _sigchld; struct __anonstruct__sigfault_185 _sigfault; struct __anonstruct__sigpoll_186 _sigpoll; struct __anonstruct__sigsys_187 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_180 _sifields; } ; 109 typedef struct siginfo siginfo_t; 11 struct user_struct ; 21 struct sigpending { struct list_head list; sigset_t signal; } ; 246 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 260 struct k_sigaction { struct sigaction sa; } ; 46 struct seccomp_filter ; 47 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 39 struct assoc_array_ptr ; 39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 123 union __anonunion_ldv_24022_190 { struct list_head graveyard_link; struct rb_node serial_node; } ; 123 struct key_user ; 123 union __anonunion_ldv_24030_191 { time_t expiry; time_t revoked_at; } ; 123 struct __anonstruct_ldv_24043_193 { struct key_type *type; char *description; } ; 123 union __anonunion_ldv_24044_192 { struct keyring_index_key index_key; struct __anonstruct_ldv_24043_193 ldv_24043; } ; 123 union __anonunion_type_data_194 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ; 123 union __anonunion_payload_196 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ; 123 union __anonunion_ldv_24059_195 { union __anonunion_payload_196 payload; struct assoc_array keys; } ; 123 struct key { atomic_t usage; key_serial_t serial; union __anonunion_ldv_24022_190 ldv_24022; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion_ldv_24030_191 ldv_24030; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion_ldv_24044_192 ldv_24044; union __anonunion_type_data_194 type_data; union __anonunion_ldv_24059_195 ldv_24059; } ; 356 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 125 struct futex_pi_state ; 126 struct robust_list_head ; 127 struct bio_list ; 128 struct fs_struct ; 129 struct perf_event_context ; 130 struct blk_plug ; 180 struct cfs_rq ; 181 struct task_group ; 426 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 465 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 473 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 480 struct cputime { cputime_t utime; cputime_t stime; } ; 492 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 512 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ; 554 struct autogroup ; 555 struct tty_struct ; 555 struct taskstats ; 555 struct tty_audit_buf ; 555 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 735 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 778 struct reclaim_state ; 779 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 794 struct task_delay_info { spinlock_t lock; unsigned int flags; struct timespec blkio_start; struct timespec blkio_end; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; struct timespec freepages_start; struct timespec freepages_end; u64 freepages_delay; u32 freepages_count; } ; 1061 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1069 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ; 1081 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1116 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1148 struct rt_rq ; 1148 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1164 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1222 struct mem_cgroup ; 1222 struct memcg_batch_info { int do_batch; struct mem_cgroup *memcg; unsigned long nr_pages; unsigned long memsw_nr_pages; } ; 1643 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ; 1650 struct sched_class ; 1650 struct css_set ; 1650 struct compat_robust_list_head ; 1650 struct numa_group ; 1650 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char no_new_privs; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct task_struct *pi_top_task; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults_memory; unsigned long total_numa_faults; unsigned long *numa_faults_buffer_memory; unsigned long *numa_faults_cpu; unsigned long *numa_faults_buffer_cpu; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; struct memcg_batch_info memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ; 272 struct usb_device ; 274 struct wusb_dev ; 275 struct ep_device ; 276 struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; unsigned char *extra; int extralen; int enabled; int streams; } ; 77 struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; struct usb_host_endpoint *endpoint; char *string; } ; 92 enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING = 1, USB_INTERFACE_BOUND = 2, USB_INTERFACE_UNBINDING = 3 } ; 99 struct usb_interface { struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; unsigned int num_altsetting; struct usb_interface_assoc_descriptor *intf_assoc; int minor; enum usb_interface_condition condition; unsigned char sysfs_files_created; unsigned char ep_devs_created; unsigned char unregistering; unsigned char needs_remote_wakeup; unsigned char needs_altsetting0; unsigned char needs_binding; unsigned char reset_running; unsigned char resetting_device; struct device dev; struct device *usb_dev; atomic_t pm_usage_cnt; struct work_struct reset_ws; } ; 206 struct usb_interface_cache { unsigned int num_altsetting; struct kref ref; struct usb_host_interface altsetting[0U]; } ; 235 struct usb_host_config { struct usb_config_descriptor desc; char *string; struct usb_interface_assoc_descriptor *intf_assoc[16U]; struct usb_interface *interface[32U]; struct usb_interface_cache *intf_cache[32U]; unsigned char *extra; int extralen; } ; 299 struct usb_host_bos { struct usb_bos_descriptor *desc; struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ss_container_id_descriptor *ss_id; } ; 311 struct usb_devmap { unsigned long devicemap[2U]; } ; 323 struct mon_bus ; 323 struct usb_bus { struct device *controller; int busnum; const char *bus_name; u8 uses_dma; u8 uses_pio_for_control; u8 otg_port; unsigned char is_b_host; unsigned char b_hnp_enable; unsigned char no_stop_on_short; unsigned char no_sg_constraint; unsigned int sg_tablesize; int devnum_next; struct usb_devmap devmap; struct usb_device *root_hub; struct usb_bus *hs_companion; struct list_head bus_list; struct mutex usb_address0_mutex; int bandwidth_allocated; int bandwidth_int_reqs; int bandwidth_isoc_reqs; unsigned int resuming_ports; struct mon_bus *mon_bus; int monitored; } ; 374 struct usb_tt ; 375 enum usb_device_removable { USB_DEVICE_REMOVABLE_UNKNOWN = 0, USB_DEVICE_REMOVABLE = 1, USB_DEVICE_FIXED = 2 } ; 388 struct usb2_lpm_parameters { unsigned int besl; int timeout; } ; 409 struct usb3_lpm_parameters { unsigned int mel; unsigned int pel; unsigned int sel; int timeout; } ; 448 struct usb_device { int devnum; char devpath[16U]; u32 route; enum usb_device_state state; enum usb_device_speed speed; struct usb_tt *tt; int ttport; unsigned int toggle[2U]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16U]; struct usb_host_endpoint *ep_out[16U]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; unsigned char can_submit; unsigned char persist_enabled; unsigned char have_langid; unsigned char authorized; unsigned char authenticated; unsigned char wusb; unsigned char lpm_capable; unsigned char usb2_hw_lpm_capable; unsigned char usb2_hw_lpm_besl_capable; unsigned char usb2_hw_lpm_enabled; unsigned char usb2_hw_lpm_allowed; unsigned char usb3_lpm_enabled; int string_langid; char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; unsigned long connect_time; unsigned char do_remote_wakeup; unsigned char reset_resume; unsigned char port_is_suspended; struct wusb_dev *wusb_dev; int slot_id; enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned int lpm_disable_count; } ; 1179 struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; unsigned int actual_length; int status; } ; 1221 struct urb ; 1222 struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned char poisoned; } ; 1241 struct scatterlist ; 1241 struct urb { struct kref kref; void *hcpriv; atomic_t use_count; atomic_t reject; int unlinked; struct list_head urb_list; struct list_head anchor_list; struct usb_anchor *anchor; struct usb_device *dev; struct usb_host_endpoint *ep; unsigned int pipe; unsigned int stream_id; int status; unsigned int transfer_flags; void *transfer_buffer; dma_addr_t transfer_dma; struct scatterlist *sg; int num_mapped_sgs; int num_sgs; u32 transfer_buffer_length; u32 actual_length; unsigned char *setup_packet; dma_addr_t setup_dma; int start_frame; int number_of_packets; int interval; int error_count; void *context; void (*complete)(struct urb *); struct usb_iso_packet_descriptor iso_frame_desc[0U]; } ; 368 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ; 48 struct kmem_cache_order_objects { unsigned long x; } ; 58 struct memcg_cache_params ; 58 struct kmem_cache_node ; 58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; struct kset *memcg_kset; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ; 501 struct __anonstruct_ldv_26726_199 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ; 501 struct __anonstruct_ldv_26732_200 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; } ; 501 union __anonunion_ldv_26733_198 { struct __anonstruct_ldv_26726_199 ldv_26726; struct __anonstruct_ldv_26732_200 ldv_26732; } ; 501 struct memcg_cache_params { bool is_root_cache; union __anonunion_ldv_26733_198 ldv_26733; } ; 32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ; 163 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ; 34 struct media_file_operations { struct module *owner; ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*open)(struct file *); int (*release)(struct file *); } ; 53 struct media_devnode { const struct media_file_operations *fops; struct device dev; struct cdev cdev; struct device *parent; int minor; unsigned long flags; void (*release)(struct media_devnode *); } ; 129 struct media_pipeline { } ; 132 struct media_pad ; 132 struct media_link { struct media_pad *source; struct media_pad *sink; struct media_link *reverse; unsigned long flags; } ; 40 struct media_entity ; 40 struct media_pad { struct media_entity *entity; u16 index; unsigned long flags; } ; 46 struct media_entity_operations { int (*link_setup)(struct media_entity *, const struct media_pad *, const struct media_pad *, u32 ); int (*link_validate)(struct media_link *); } ; 53 struct media_device ; 53 struct __anonstruct_v4l_207 { u32 major; u32 minor; } ; 53 struct __anonstruct_fb_208 { u32 major; u32 minor; } ; 53 struct __anonstruct_alsa_209 { u32 card; u32 device; u32 subdevice; } ; 53 union __anonunion_info_206 { struct __anonstruct_v4l_207 v4l; struct __anonstruct_fb_208 fb; struct __anonstruct_alsa_209 alsa; int dvb; } ; 53 struct media_entity { struct list_head list; struct media_device *parent; u32 id; const char *name; u32 type; u32 revision; unsigned long flags; u32 group_id; u16 num_pads; u16 num_links; u16 num_backlinks; u16 max_links; struct media_pad *pads; struct media_link *links; const struct media_entity_operations *ops; int stream_count; int use_count; struct media_pipeline *pipe; union __anonunion_info_206 info; } ; 155 struct media_device { struct device *dev; struct media_devnode devnode; char model[32U]; char serial[40U]; char bus_info[32U]; u32 hw_revision; u32 driver_version; u32 entity_id; struct list_head entities; spinlock_t lock; struct mutex graph_mutex; int (*link_notify)(struct media_link *, u32 , unsigned int); } ; 98 struct v4l2_edid { __u32 pad; __u32 start_block; __u32 blocks; __u32 reserved[5U]; __u8 *edid; } ; 562 enum v4l2_buf_type { V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, V4L2_BUF_TYPE_VBI_CAPTURE = 4, V4L2_BUF_TYPE_VBI_OUTPUT = 5, V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10, V4L2_BUF_TYPE_SDR_CAPTURE = 11, V4L2_BUF_TYPE_PRIVATE = 128 } ; 585 enum v4l2_memory { V4L2_MEMORY_MMAP = 1, V4L2_MEMORY_USERPTR = 2, V4L2_MEMORY_OVERLAY = 3, V4L2_MEMORY_DMABUF = 4 } ; 603 enum v4l2_priority { V4L2_PRIORITY_UNSET = 0, V4L2_PRIORITY_BACKGROUND = 1, V4L2_PRIORITY_INTERACTIVE = 2, V4L2_PRIORITY_RECORD = 3, V4L2_PRIORITY_DEFAULT = 2 } ; 611 struct v4l2_rect { __s32 left; __s32 top; __u32 width; __u32 height; } ; 216 struct v4l2_fract { __u32 numerator; __u32 denominator; } ; 242 struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 bytesperline; __u32 sizeimage; __u32 colorspace; __u32 priv; } ; 460 struct v4l2_frmsize_discrete { __u32 width; __u32 height; } ; 473 struct v4l2_frmsize_stepwise { __u32 min_width; __u32 max_width; __u32 step_width; __u32 min_height; __u32 max_height; __u32 step_height; } ; 482 union __anonunion_ldv_27597_211 { struct v4l2_frmsize_discrete discrete; struct v4l2_frmsize_stepwise stepwise; } ; 482 struct v4l2_frmsizeenum { __u32 index; __u32 pixel_format; __u32 type; union __anonunion_ldv_27597_211 ldv_27597; __u32 reserved[2U]; } ; 501 struct v4l2_frmival_stepwise { struct v4l2_fract min; struct v4l2_fract max; struct v4l2_fract step; } ; 510 union __anonunion_ldv_27616_212 { struct v4l2_fract discrete; struct v4l2_frmival_stepwise stepwise; } ; 510 struct v4l2_frmivalenum { __u32 index; __u32 pixel_format; __u32 width; __u32 height; __u32 type; union __anonunion_ldv_27616_212 ldv_27616; __u32 reserved[2U]; } ; 525 struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; __u8 seconds; __u8 minutes; __u8 hours; __u8 userbits[4U]; } ; 593 union __anonunion_m_213 { __u32 mem_offset; unsigned long userptr; __s32 fd; } ; 593 struct v4l2_plane { __u32 bytesused; __u32 length; union __anonunion_m_213 m; __u32 data_offset; __u32 reserved[11U]; } ; 625 union __anonunion_m_214 { __u32 offset; unsigned long userptr; struct v4l2_plane *planes; __s32 fd; } ; 625 struct v4l2_buffer { __u32 index; __u32 type; __u32 bytesused; __u32 flags; __u32 field; struct timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; __u32 memory; union __anonunion_m_214 m; __u32 length; __u32 reserved2; __u32 reserved; } ; 749 struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip *next; } ; 771 struct v4l2_window { struct v4l2_rect w; __u32 field; __u32 chromakey; struct v4l2_clip *clips; __u32 clipcount; void *bitmap; __u8 global_alpha; } ; 781 struct v4l2_captureparm { __u32 capability; __u32 capturemode; struct v4l2_fract timeperframe; __u32 extendedmode; __u32 readbuffers; __u32 reserved[4U]; } ; 793 struct v4l2_outputparm { __u32 capability; __u32 outputmode; struct v4l2_fract timeperframe; __u32 extendedmode; __u32 writebuffers; __u32 reserved[4U]; } ; 806 struct v4l2_cropcap { __u32 type; struct v4l2_rect bounds; struct v4l2_rect defrect; struct v4l2_fract pixelaspect; } ; 816 struct v4l2_crop { __u32 type; struct v4l2_rect c; } ; 848 typedef __u64 v4l2_std_id; 984 struct v4l2_bt_timings { __u32 width; __u32 height; __u32 interlaced; __u32 polarities; __u64 pixelclock; __u32 hfrontporch; __u32 hsync; __u32 hbackporch; __u32 vfrontporch; __u32 vsync; __u32 vbackporch; __u32 il_vfrontporch; __u32 il_vsync; __u32 il_vbackporch; __u32 standards; __u32 flags; __u32 reserved[14U]; } ; 1040 union __anonunion_ldv_27750_215 { struct v4l2_bt_timings bt; __u32 reserved[32U]; } ; 1040 struct v4l2_dv_timings { __u32 type; union __anonunion_ldv_27750_215 ldv_27750; } ; 1102 struct v4l2_enum_dv_timings { __u32 index; __u32 pad; __u32 reserved[2U]; struct v4l2_dv_timings timings; } ; 1120 struct v4l2_bt_timings_cap { __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; __u64 min_pixelclock; __u64 max_pixelclock; __u32 standards; __u32 capabilities; __u32 reserved[16U]; } ; 1143 union __anonunion_ldv_27773_216 { struct v4l2_bt_timings_cap bt; __u32 raw_data[32U]; } ; 1143 struct v4l2_dv_timings_cap { __u32 type; __u32 pad; __u32 reserved[2U]; union __anonunion_ldv_27773_216 ldv_27773; } ; 1231 struct v4l2_control { __u32 id; __s32 value; } ; 1248 union __anonunion_ldv_27804_217 { __s32 value; __s64 value64; char *string; } ; 1248 struct v4l2_ext_control { __u32 id; __u32 size; __u32 reserved2[1U]; union __anonunion_ldv_27804_217 ldv_27804; } ; 1259 struct v4l2_ext_controls { __u32 ctrl_class; __u32 count; __u32 error_idx; __u32 reserved[2U]; struct v4l2_ext_control *controls; } ; 1279 struct v4l2_queryctrl { __u32 id; __u32 type; __u8 name[32U]; __s32 minimum; __s32 maximum; __s32 step; __s32 default_value; __u32 flags; __u32 reserved[2U]; } ; 1296 union __anonunion_ldv_27837_218 { __u8 name[32U]; __s64 value; } ; 1296 struct v4l2_querymenu { __u32 id; __u32 index; union __anonunion_ldv_27837_218 ldv_27837; __u32 reserved; } ; 1307 struct v4l2_tuner { __u32 index; __u8 name[32U]; __u32 type; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 rxsubchans; __u32 audmode; __s32 signal; __s32 afc; __u32 reserved[4U]; } ; 1343 struct v4l2_modulator { __u32 index; __u8 name[32U]; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 txsubchans; __u32 reserved[4U]; } ; 1353 struct v4l2_frequency { __u32 tuner; __u32 type; __u32 frequency; __u32 reserved[8U]; } ; 1392 struct v4l2_frequency_band { __u32 tuner; __u32 type; __u32 index; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 modulation; __u32 reserved[9U]; } ; 1560 struct v4l2_vbi_format { __u32 sampling_rate; __u32 offset; __u32 samples_per_line; __u32 sample_format; __s32 start[2U]; __u32 count[2U]; __u32 flags; __u32 reserved[2U]; } ; 1580 struct v4l2_sliced_vbi_format { __u16 service_set; __u16 service_lines[2U][24U]; __u32 io_size; __u32 reserved[2U]; } ; 1602 struct v4l2_sliced_vbi_cap { __u16 service_set; __u16 service_lines[2U][24U]; __u32 type; __u32 reserved[3U]; } ; 1626 struct v4l2_sliced_vbi_data { __u32 id; __u32 field; __u32 line; __u32 reserved; __u8 data[48U]; } ; 1681 struct v4l2_plane_pix_format { __u32 sizeimage; __u16 bytesperline; __u16 reserved[7U]; } ; 1698 struct v4l2_pix_format_mplane { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 colorspace; struct v4l2_plane_pix_format plane_fmt[8U]; __u8 num_planes; __u8 reserved[11U]; } ; 1720 struct v4l2_sdr_format { __u32 pixelformat; __u8 reserved[28U]; } ; 1729 union __anonunion_fmt_226 { struct v4l2_pix_format pix; struct v4l2_pix_format_mplane pix_mp; struct v4l2_window win; struct v4l2_vbi_format vbi; struct v4l2_sliced_vbi_format sliced; struct v4l2_sdr_format sdr; __u8 raw_data[200U]; } ; 1729 struct v4l2_format { __u32 type; union __anonunion_fmt_226 fmt; } ; 1752 union __anonunion_parm_227 { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200U]; } ; 1752 struct v4l2_streamparm { __u32 type; union __anonunion_parm_227 parm; } ; 1826 struct v4l2_event_subscription { __u32 type; __u32 id; __u32 flags; __u32 reserved[5U]; } ; 1836 union __anonunion_ldv_28047_230 { __u32 addr; char name[32U]; } ; 1836 struct v4l2_dbg_match { __u32 type; union __anonunion_ldv_28047_230 ldv_28047; } ; 1862 struct v4l2_dbg_register { struct v4l2_dbg_match match; __u32 size; __u64 reg; __u64 val; } ; 1897 enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_FIXED = 1, V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 4097, V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 4098, V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 4099, V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 4100, V4L2_MBUS_FMT_BGR565_2X8_BE = 4101, V4L2_MBUS_FMT_BGR565_2X8_LE = 4102, V4L2_MBUS_FMT_RGB565_2X8_BE = 4103, V4L2_MBUS_FMT_RGB565_2X8_LE = 4104, V4L2_MBUS_FMT_RGB666_1X18 = 4105, V4L2_MBUS_FMT_RGB888_1X24 = 4106, V4L2_MBUS_FMT_RGB888_2X12_BE = 4107, V4L2_MBUS_FMT_RGB888_2X12_LE = 4108, V4L2_MBUS_FMT_ARGB8888_1X32 = 4109, V4L2_MBUS_FMT_Y8_1X8 = 8193, V4L2_MBUS_FMT_UV8_1X8 = 8213, V4L2_MBUS_FMT_UYVY8_1_5X8 = 8194, V4L2_MBUS_FMT_VYUY8_1_5X8 = 8195, V4L2_MBUS_FMT_YUYV8_1_5X8 = 8196, V4L2_MBUS_FMT_YVYU8_1_5X8 = 8197, V4L2_MBUS_FMT_UYVY8_2X8 = 8198, V4L2_MBUS_FMT_VYUY8_2X8 = 8199, V4L2_MBUS_FMT_YUYV8_2X8 = 8200, V4L2_MBUS_FMT_YVYU8_2X8 = 8201, V4L2_MBUS_FMT_Y10_1X10 = 8202, V4L2_MBUS_FMT_UYVY10_2X10 = 8216, V4L2_MBUS_FMT_VYUY10_2X10 = 8217, V4L2_MBUS_FMT_YUYV10_2X10 = 8203, V4L2_MBUS_FMT_YVYU10_2X10 = 8204, V4L2_MBUS_FMT_Y12_1X12 = 8211, V4L2_MBUS_FMT_UYVY8_1X16 = 8207, V4L2_MBUS_FMT_VYUY8_1X16 = 8208, V4L2_MBUS_FMT_YUYV8_1X16 = 8209, V4L2_MBUS_FMT_YVYU8_1X16 = 8210, V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 8212, V4L2_MBUS_FMT_UYVY10_1X20 = 8218, V4L2_MBUS_FMT_VYUY10_1X20 = 8219, V4L2_MBUS_FMT_YUYV10_1X20 = 8205, V4L2_MBUS_FMT_YVYU10_1X20 = 8206, V4L2_MBUS_FMT_YUV10_1X30 = 8214, V4L2_MBUS_FMT_AYUV8_1X32 = 8215, V4L2_MBUS_FMT_UYVY12_2X12 = 8220, V4L2_MBUS_FMT_VYUY12_2X12 = 8221, V4L2_MBUS_FMT_YUYV12_2X12 = 8222, V4L2_MBUS_FMT_YVYU12_2X12 = 8223, V4L2_MBUS_FMT_UYVY12_1X24 = 8224, V4L2_MBUS_FMT_VYUY12_1X24 = 8225, V4L2_MBUS_FMT_YUYV12_1X24 = 8226, V4L2_MBUS_FMT_YVYU12_1X24 = 8227, V4L2_MBUS_FMT_SBGGR8_1X8 = 12289, V4L2_MBUS_FMT_SGBRG8_1X8 = 12307, V4L2_MBUS_FMT_SGRBG8_1X8 = 12290, V4L2_MBUS_FMT_SRGGB8_1X8 = 12308, V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 12309, V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 12310, V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 12311, V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 12312, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 12299, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 12300, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 12297, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 12301, V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 12291, V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 12292, V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 12293, V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 12294, V4L2_MBUS_FMT_SBGGR10_1X10 = 12295, V4L2_MBUS_FMT_SGBRG10_1X10 = 12302, V4L2_MBUS_FMT_SGRBG10_1X10 = 12298, V4L2_MBUS_FMT_SRGGB10_1X10 = 12303, V4L2_MBUS_FMT_SBGGR12_1X12 = 12296, V4L2_MBUS_FMT_SGBRG12_1X12 = 12304, V4L2_MBUS_FMT_SGRBG12_1X12 = 12305, V4L2_MBUS_FMT_SRGGB12_1X12 = 12306, V4L2_MBUS_FMT_JPEG_1X8 = 16385, V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 20481, V4L2_MBUS_FMT_AHSV8888_1X32 = 24577 } ; 1976 struct v4l2_mbus_framefmt { __u32 width; __u32 height; __u32 code; __u32 field; __u32 colorspace; __u32 reserved[7U]; } ; 151 struct v4l2_subdev_format { __u32 which; __u32 pad; struct v4l2_mbus_framefmt format; __u32 reserved[8U]; } ; 53 struct v4l2_subdev_crop { __u32 which; __u32 pad; struct v4l2_rect rect; __u32 reserved[8U]; } ; 66 struct v4l2_subdev_mbus_code_enum { __u32 pad; __u32 index; __u32 code; __u32 reserved[9U]; } ; 79 struct v4l2_subdev_frame_size_enum { __u32 index; __u32 pad; __u32 code; __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; __u32 reserved[9U]; } ; 96 struct v4l2_subdev_frame_interval { __u32 pad; struct v4l2_fract interval; __u32 reserved[9U]; } ; 107 struct v4l2_subdev_frame_interval_enum { __u32 index; __u32 pad; __u32 code; __u32 width; __u32 height; struct v4l2_fract interval; __u32 reserved[9U]; } ; 126 struct v4l2_subdev_selection { __u32 which; __u32 pad; __u32 target; __u32 flags; struct v4l2_rect r; __u32 reserved[8U]; } ; 150 struct v4l2_device ; 151 struct v4l2_subdev ; 152 struct v4l2_async_notifier ; 153 enum v4l2_async_match_type { V4L2_ASYNC_MATCH_CUSTOM = 0, V4L2_ASYNC_MATCH_DEVNAME = 1, V4L2_ASYNC_MATCH_I2C = 2, V4L2_ASYNC_MATCH_OF = 3 } ; 160 struct __anonstruct_of_232 { const struct device_node *node; } ; 160 struct __anonstruct_device_name_233 { const char *name; } ; 160 struct __anonstruct_i2c_234 { int adapter_id; unsigned short address; } ; 160 struct __anonstruct_custom_235 { bool (*match)(struct device *, struct v4l2_async_subdev *); void *priv; } ; 160 union __anonunion_match_231 { struct __anonstruct_of_232 of; struct __anonstruct_device_name_233 device_name; struct __anonstruct_i2c_234 i2c; struct __anonstruct_custom_235 custom; } ; 160 struct v4l2_async_subdev { enum v4l2_async_match_type match_type; union __anonunion_match_231 match; struct list_head list; } ; 63 struct v4l2_async_notifier { unsigned int num_subdevs; struct v4l2_async_subdev **subdevs; struct v4l2_device *v4l2_dev; struct list_head waiting; struct list_head done; struct list_head list; int (*bound)(struct v4l2_async_notifier *, struct v4l2_subdev *, struct v4l2_async_subdev *); int (*complete)(struct v4l2_async_notifier *); void (*unbind)(struct v4l2_async_notifier *, struct v4l2_subdev *, struct v4l2_async_subdev *); } ; 98 struct video_device ; 99 struct v4l2_ctrl_handler ; 100 struct v4l2_prio_state { atomic_t prios[4U]; } ; 63 struct v4l2_file_operations { struct module *owner; ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*ioctl)(struct file *, unsigned int, unsigned long); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl32)(struct file *, unsigned int, unsigned long); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct file *); int (*release)(struct file *); } ; 81 struct vb2_queue ; 81 struct v4l2_ioctl_ops ; 81 struct video_device { struct media_entity entity; const struct v4l2_file_operations *fops; struct device dev; struct cdev *cdev; struct v4l2_device *v4l2_dev; struct device *dev_parent; struct v4l2_ctrl_handler *ctrl_handler; struct vb2_queue *queue; struct v4l2_prio_state *prio; char name[32U]; int vfl_type; int vfl_dir; int minor; u16 num; unsigned long flags; int index; spinlock_t fh_lock; struct list_head fh_list; int debug; v4l2_std_id tvnorms; void (*release)(struct video_device *); const struct v4l2_ioctl_ops *ioctl_ops; unsigned long valid_ioctls[3U]; unsigned long disable_locking[3U]; struct mutex *lock; } ; 104 struct v4l2_subdev_ops ; 188 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; unsigned long max_pgoff; pte_t *pte; } ; 221 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ; 2112 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 17 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 162 struct v4l2_priv_tun_config { int tuner; void *priv; } ; 206 struct v4l2_m2m_ctx ; 206 struct v4l2_fh { struct list_head list; struct video_device *vdev; struct v4l2_ctrl_handler *ctrl_handler; enum v4l2_priority prio; wait_queue_head_t wait; struct list_head subscribed; struct list_head available; unsigned int navailable; u32 sequence; struct v4l2_m2m_ctx *m2m_ctx; } ; 106 enum v4l2_mbus_type { V4L2_MBUS_PARALLEL = 0, V4L2_MBUS_BT656 = 1, V4L2_MBUS_CSI2 = 2 } ; 112 struct v4l2_mbus_config { enum v4l2_mbus_type type; unsigned int flags; } ; 109 struct v4l2_subdev_fh ; 110 struct tuner_setup ; 111 struct v4l2_mbus_frame_desc ; 112 struct v4l2_decode_vbi_line { u32 is_second_field; u8 *p; u32 line; u32 type; } ; 61 struct v4l2_subdev_io_pin_config { u32 flags; u8 pin; u8 function; u8 value; u8 strength; } ; 117 struct v4l2_subdev_core_ops { int (*log_status)(struct v4l2_subdev *); int (*s_io_pin_config)(struct v4l2_subdev *, size_t , struct v4l2_subdev_io_pin_config *); int (*init)(struct v4l2_subdev *, u32 ); int (*load_fw)(struct v4l2_subdev *); int (*reset)(struct v4l2_subdev *, u32 ); int (*s_gpio)(struct v4l2_subdev *, u32 ); int (*queryctrl)(struct v4l2_subdev *, struct v4l2_queryctrl *); int (*g_ctrl)(struct v4l2_subdev *, struct v4l2_control *); int (*s_ctrl)(struct v4l2_subdev *, struct v4l2_control *); int (*g_ext_ctrls)(struct v4l2_subdev *, struct v4l2_ext_controls *); int (*s_ext_ctrls)(struct v4l2_subdev *, struct v4l2_ext_controls *); int (*try_ext_ctrls)(struct v4l2_subdev *, struct v4l2_ext_controls *); int (*querymenu)(struct v4l2_subdev *, struct v4l2_querymenu *); long int (*ioctl)(struct v4l2_subdev *, unsigned int, void *); long int (*compat_ioctl32)(struct v4l2_subdev *, unsigned int, unsigned long); int (*g_register)(struct v4l2_subdev *, struct v4l2_dbg_register *); int (*s_register)(struct v4l2_subdev *, const struct v4l2_dbg_register *); int (*s_power)(struct v4l2_subdev *, int); int (*interrupt_service_routine)(struct v4l2_subdev *, u32 , bool *); int (*subscribe_event)(struct v4l2_subdev *, struct v4l2_fh *, struct v4l2_event_subscription *); int (*unsubscribe_event)(struct v4l2_subdev *, struct v4l2_fh *, struct v4l2_event_subscription *); } ; 178 struct v4l2_subdev_tuner_ops { int (*s_radio)(struct v4l2_subdev *); int (*s_frequency)(struct v4l2_subdev *, const struct v4l2_frequency *); int (*g_frequency)(struct v4l2_subdev *, struct v4l2_frequency *); int (*enum_freq_bands)(struct v4l2_subdev *, struct v4l2_frequency_band *); int (*g_tuner)(struct v4l2_subdev *, struct v4l2_tuner *); int (*s_tuner)(struct v4l2_subdev *, const struct v4l2_tuner *); int (*g_modulator)(struct v4l2_subdev *, struct v4l2_modulator *); int (*s_modulator)(struct v4l2_subdev *, const struct v4l2_modulator *); int (*s_type_addr)(struct v4l2_subdev *, struct tuner_setup *); int (*s_config)(struct v4l2_subdev *, const struct v4l2_priv_tun_config *); } ; 205 struct v4l2_subdev_audio_ops { int (*s_clock_freq)(struct v4l2_subdev *, u32 ); int (*s_i2s_clock_freq)(struct v4l2_subdev *, u32 ); int (*s_routing)(struct v4l2_subdev *, u32 , u32 , u32 ); int (*s_stream)(struct v4l2_subdev *, int); } ; 232 struct v4l2_mbus_frame_desc_entry { u16 flags; u32 pixelcode; u32 length; } ; 253 struct v4l2_mbus_frame_desc { struct v4l2_mbus_frame_desc_entry entry[4U]; unsigned short num_entries; } ; 265 struct v4l2_subdev_video_ops { int (*s_routing)(struct v4l2_subdev *, u32 , u32 , u32 ); int (*s_crystal_freq)(struct v4l2_subdev *, u32 , u32 ); int (*g_std)(struct v4l2_subdev *, v4l2_std_id *); int (*s_std)(struct v4l2_subdev *, v4l2_std_id ); int (*s_std_output)(struct v4l2_subdev *, v4l2_std_id ); int (*g_std_output)(struct v4l2_subdev *, v4l2_std_id *); int (*querystd)(struct v4l2_subdev *, v4l2_std_id *); int (*g_tvnorms)(struct v4l2_subdev *, v4l2_std_id *); int (*g_tvnorms_output)(struct v4l2_subdev *, v4l2_std_id *); int (*g_input_status)(struct v4l2_subdev *, u32 *); int (*s_stream)(struct v4l2_subdev *, int); int (*cropcap)(struct v4l2_subdev *, struct v4l2_cropcap *); int (*g_crop)(struct v4l2_subdev *, struct v4l2_crop *); int (*s_crop)(struct v4l2_subdev *, const struct v4l2_crop *); int (*g_parm)(struct v4l2_subdev *, struct v4l2_streamparm *); int (*s_parm)(struct v4l2_subdev *, struct v4l2_streamparm *); int (*g_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_frame_interval *); int (*s_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_frame_interval *); int (*enum_framesizes)(struct v4l2_subdev *, struct v4l2_frmsizeenum *); int (*enum_frameintervals)(struct v4l2_subdev *, struct v4l2_frmivalenum *); int (*s_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*g_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*query_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*enum_mbus_fmt)(struct v4l2_subdev *, unsigned int, enum v4l2_mbus_pixelcode *); int (*enum_mbus_fsizes)(struct v4l2_subdev *, struct v4l2_frmsizeenum *); int (*g_mbus_fmt)(struct v4l2_subdev *, struct v4l2_mbus_framefmt *); int (*try_mbus_fmt)(struct v4l2_subdev *, struct v4l2_mbus_framefmt *); int (*s_mbus_fmt)(struct v4l2_subdev *, struct v4l2_mbus_framefmt *); int (*g_mbus_config)(struct v4l2_subdev *, struct v4l2_mbus_config *); int (*s_mbus_config)(struct v4l2_subdev *, const struct v4l2_mbus_config *); int (*s_rx_buffer)(struct v4l2_subdev *, void *, unsigned int *); } ; 359 struct v4l2_subdev_vbi_ops { int (*decode_vbi_line)(struct v4l2_subdev *, struct v4l2_decode_vbi_line *); int (*s_vbi_data)(struct v4l2_subdev *, const struct v4l2_sliced_vbi_data *); int (*g_vbi_data)(struct v4l2_subdev *, struct v4l2_sliced_vbi_data *); int (*g_sliced_vbi_cap)(struct v4l2_subdev *, struct v4l2_sliced_vbi_cap *); int (*s_raw_fmt)(struct v4l2_subdev *, struct v4l2_vbi_format *); int (*g_sliced_fmt)(struct v4l2_subdev *, struct v4l2_sliced_vbi_format *); int (*s_sliced_fmt)(struct v4l2_subdev *, struct v4l2_sliced_vbi_format *); } ; 399 struct v4l2_subdev_sensor_ops { int (*g_skip_top_lines)(struct v4l2_subdev *, u32 *); int (*g_skip_frames)(struct v4l2_subdev *, u32 *); } ; 414 enum v4l2_subdev_ir_mode { V4L2_SUBDEV_IR_MODE_PULSE_WIDTH = 0 } ; 418 struct v4l2_subdev_ir_parameters { unsigned int bytes_per_data_element; enum v4l2_subdev_ir_mode mode; bool enable; bool interrupt_enable; bool shutdown; bool modulation; u32 max_pulse_width; unsigned int carrier_freq; unsigned int duty_cycle; bool invert_level; bool invert_carrier_sense; u32 noise_filter_min_width; unsigned int carrier_range_lower; unsigned int carrier_range_upper; u32 resolution; } ; 466 struct v4l2_subdev_ir_ops { int (*rx_read)(struct v4l2_subdev *, u8 *, size_t , ssize_t *); int (*rx_g_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*rx_s_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*tx_write)(struct v4l2_subdev *, u8 *, size_t , ssize_t *); int (*tx_g_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*tx_s_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); } ; 485 struct v4l2_subdev_pad_ops { int (*enum_mbus_code)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_mbus_code_enum *); int (*enum_frame_size)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_frame_size_enum *); int (*enum_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_frame_interval_enum *); int (*get_fmt)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_format *); int (*set_fmt)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_format *); int (*set_crop)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_crop *); int (*get_crop)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_crop *); int (*get_selection)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_selection *); int (*set_selection)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_selection *); int (*get_edid)(struct v4l2_subdev *, struct v4l2_edid *); int (*set_edid)(struct v4l2_subdev *, struct v4l2_edid *); int (*dv_timings_cap)(struct v4l2_subdev *, struct v4l2_dv_timings_cap *); int (*enum_dv_timings)(struct v4l2_subdev *, struct v4l2_enum_dv_timings *); int (*link_validate)(struct v4l2_subdev *, struct media_link *, struct v4l2_subdev_format *, struct v4l2_subdev_format *); int (*get_frame_desc)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_frame_desc *); int (*set_frame_desc)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_frame_desc *); } ; 529 struct v4l2_subdev_ops { const struct v4l2_subdev_core_ops *core; const struct v4l2_subdev_tuner_ops *tuner; const struct v4l2_subdev_audio_ops *audio; const struct v4l2_subdev_video_ops *video; const struct v4l2_subdev_vbi_ops *vbi; const struct v4l2_subdev_ir_ops *ir; const struct v4l2_subdev_sensor_ops *sensor; const struct v4l2_subdev_pad_ops *pad; } ; 541 struct v4l2_subdev_internal_ops { int (*registered)(struct v4l2_subdev *); void (*unregistered)(struct v4l2_subdev *); int (*open)(struct v4l2_subdev *, struct v4l2_subdev_fh *); int (*close)(struct v4l2_subdev *, struct v4l2_subdev_fh *); } ; 562 struct regulator_bulk_data ; 563 struct v4l2_subdev_platform_data { struct regulator_bulk_data *regulators; int num_regulators; void *host_priv; } ; 584 struct v4l2_subdev { struct media_entity entity; struct list_head list; struct module *owner; bool owner_v4l2_dev; u32 flags; struct v4l2_device *v4l2_dev; const struct v4l2_subdev_ops *ops; const struct v4l2_subdev_internal_ops *internal_ops; struct v4l2_ctrl_handler *ctrl_handler; char name[32U]; u32 grp_id; void *dev_priv; void *host_priv; struct video_device *devnode; struct device *dev; struct list_head async_list; struct v4l2_async_subdev *asd; struct v4l2_async_notifier *notifier; struct v4l2_subdev_platform_data *pdata; } ; 622 struct __anonstruct_pad_236 { struct v4l2_mbus_framefmt try_fmt; struct v4l2_rect try_crop; struct v4l2_rect try_compose; } ; 622 struct v4l2_subdev_fh { struct v4l2_fh vfh; struct __anonstruct_pad_236 *pad; } ; 691 struct v4l2_device { struct device *dev; struct media_device *mdev; struct list_head subdevs; spinlock_t lock; char name[36U]; void (*notify)(struct v4l2_subdev *, unsigned int, void *); struct v4l2_ctrl_handler *ctrl_handler; struct v4l2_prio_state prio; struct mutex ioctl_lock; struct kref ref; void (*release)(struct v4l2_device *); } ; 87 struct dma_attrs { unsigned long flags[1U]; } ; 70 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 77 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 249 struct dma_buf ; 250 struct dma_buf_attachment ; 251 struct dma_buf_ops { int (*attach)(struct dma_buf *, struct device *, struct dma_buf_attachment *); void (*detach)(struct dma_buf *, struct dma_buf_attachment *); struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction ); void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction ); void (*release)(struct dma_buf *); int (*begin_cpu_access)(struct dma_buf *, size_t , size_t , enum dma_data_direction ); void (*end_cpu_access)(struct dma_buf *, size_t , size_t , enum dma_data_direction ); void * (*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void * (*kmap)(struct dma_buf *, unsigned long); void (*kunmap)(struct dma_buf *, unsigned long, void *); int (*mmap)(struct dma_buf *, struct vm_area_struct *); void * (*vmap)(struct dma_buf *); void (*vunmap)(struct dma_buf *, void *); } ; 108 struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; struct mutex lock; unsigned int vmapping_counter; void *vmap_ptr; const char *exp_name; struct list_head list_node; void *priv; } ; 132 struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; void *priv; } ; 199 struct vb2_fileio_data ; 200 struct vb2_threadio_data ; 201 struct vb2_mem_ops { void * (*alloc)(void *, unsigned long, gfp_t ); void (*put)(void *); struct dma_buf * (*get_dmabuf)(void *, unsigned long); void * (*get_userptr)(void *, unsigned long, unsigned long, int); void (*put_userptr)(void *); void (*prepare)(void *); void (*finish)(void *); void * (*attach_dmabuf)(void *, struct dma_buf *, unsigned long, int); void (*detach_dmabuf)(void *); int (*map_dmabuf)(void *); void (*unmap_dmabuf)(void *); void * (*vaddr)(void *); void * (*cookie)(void *); unsigned int (*num_users)(void *); int (*mmap)(void *, struct vm_area_struct *); } ; 109 struct vb2_plane { void *mem_priv; struct dma_buf *dbuf; unsigned int dbuf_mapped; } ; 128 enum vb2_buffer_state { VB2_BUF_STATE_DEQUEUED = 0, VB2_BUF_STATE_PREPARING = 1, VB2_BUF_STATE_PREPARED = 2, VB2_BUF_STATE_QUEUED = 3, VB2_BUF_STATE_ACTIVE = 4, VB2_BUF_STATE_DONE = 5, VB2_BUF_STATE_ERROR = 6 } ; 138 struct vb2_buffer { struct v4l2_buffer v4l2_buf; struct v4l2_plane v4l2_planes[8U]; struct vb2_queue *vb2_queue; unsigned int num_planes; enum vb2_buffer_state state; struct list_head queued_entry; struct list_head done_entry; struct vb2_plane planes[8U]; u32 cnt_mem_alloc; u32 cnt_mem_put; u32 cnt_mem_get_dmabuf; u32 cnt_mem_get_userptr; u32 cnt_mem_put_userptr; u32 cnt_mem_prepare; u32 cnt_mem_finish; u32 cnt_mem_attach_dmabuf; u32 cnt_mem_detach_dmabuf; u32 cnt_mem_map_dmabuf; u32 cnt_mem_unmap_dmabuf; u32 cnt_mem_vaddr; u32 cnt_mem_cookie; u32 cnt_mem_num_users; u32 cnt_mem_mmap; u32 cnt_buf_init; u32 cnt_buf_prepare; u32 cnt_buf_finish; u32 cnt_buf_cleanup; u32 cnt_buf_queue; u32 cnt_buf_done; } ; 238 struct vb2_ops { int (*queue_setup)(struct vb2_queue *, const struct v4l2_format *, unsigned int *, unsigned int *, unsigned int *, void **); void (*wait_prepare)(struct vb2_queue *); void (*wait_finish)(struct vb2_queue *); int (*buf_init)(struct vb2_buffer *); int (*buf_prepare)(struct vb2_buffer *); void (*buf_finish)(struct vb2_buffer *); void (*buf_cleanup)(struct vb2_buffer *); int (*start_streaming)(struct vb2_queue *, unsigned int); void (*stop_streaming)(struct vb2_queue *); void (*buf_queue)(struct vb2_buffer *); } ; 331 struct vb2_queue { enum v4l2_buf_type type; unsigned int io_modes; unsigned int io_flags; struct mutex *lock; struct v4l2_fh *owner; const struct vb2_ops *ops; const struct vb2_mem_ops *mem_ops; void *drv_priv; unsigned int buf_struct_size; u32 timestamp_flags; gfp_t gfp_flags; u32 min_buffers_needed; enum v4l2_memory memory; struct vb2_buffer *bufs[32U]; unsigned int num_buffers; struct list_head queued_list; unsigned int queued_count; atomic_t owned_by_drv_count; struct list_head done_list; spinlock_t done_lock; wait_queue_head_t done_wq; void *alloc_ctx[8U]; unsigned int plane_sizes[8U]; unsigned char streaming; unsigned char start_streaming_called; struct vb2_fileio_data *fileio; struct vb2_threadio_data *threadio; u32 cnt_queue_setup; u32 cnt_wait_prepare; u32 cnt_wait_finish; u32 cnt_start_streaming; u32 cnt_stop_streaming; } ; 62 enum ldv_25189 { USBTV_COMPOSITE_INPUT = 0, USBTV_SVIDEO_INPUT = 1 } ; 67 struct usbtv { struct device *dev; struct usb_device *udev; struct v4l2_device v4l2_dev; struct video_device vdev; struct vb2_queue vb2q; struct mutex v4l2_lock; struct mutex vb2q_lock; spinlock_t buflock; struct list_head bufs; u32 frame_id; int chunks_done; enum ldv_25189 input; v4l2_std_id norm; int width; int height; int n_chunks; int iso_size; unsigned int sequence; struct urb *isoc_urbs[16U]; } ; 312 typedef struct usb_device *ldv_func_ret_type; 323 typedef struct usb_device *ldv_func_ret_type___0; 40 typedef struct poll_table_struct poll_table; 221 struct v4l2_capability { __u8 driver[16U]; __u8 card[32U]; __u8 bus_info[32U]; __u32 version; __u32 capabilities; __u32 device_caps; __u32 reserved[3U]; } ; 291 struct v4l2_fmtdesc { __u32 index; __u32 type; __u32 flags; __u8 description[32U]; __u32 pixelformat; __u32 reserved[4U]; } ; 539 struct v4l2_jpegcompression { int quality; int APPn; int APP_len; char APP_data[60U]; int COM_len; char COM_data[60U]; __u32 jpeg_markers; } ; 568 struct v4l2_requestbuffers { __u32 count; __u32 type; __u32 memory; __u32 reserved[2U]; } ; 678 struct v4l2_exportbuffer { __u32 type; __u32 index; __u32 plane; __u32 flags; __s32 fd; __u32 reserved[11U]; } ; 737 struct v4l2_framebuffer { __u32 capability; __u32 flags; void *base; struct v4l2_pix_format fmt; } ; 821 struct v4l2_selection { __u32 type; __u32 target; __u32 flags; struct v4l2_rect r; __u32 reserved[9U]; } ; 1168 struct v4l2_input { __u32 index; __u8 name[32U]; __u32 type; __u32 audioset; __u32 tuner; v4l2_std_id std; __u32 status; __u32 capabilities; __u32 reserved[3U]; } ; 1184 struct v4l2_output { __u32 index; __u8 name[32U]; __u32 type; __u32 audioset; __u32 modulator; v4l2_std_id std; __u32 capabilities; __u32 reserved[3U]; } ; 1407 struct v4l2_hw_freq_seek { __u32 tuner; __u32 type; __u32 seek_upward; __u32 wrap_around; __u32 spacing; __u32 rangelow; __u32 rangehigh; __u32 reserved[5U]; } ; 1428 struct v4l2_audio { __u32 index; __u8 name[32U]; __u32 capability; __u32 mode; __u32 reserved[2U]; } ; 1450 struct v4l2_audioout { __u32 index; __u8 name[32U]; __u32 capability; __u32 mode; __u32 reserved[2U]; } ; 1465 struct v4l2_enc_idx_entry { __u64 offset; __u64 pts; __u32 length; __u32 flags; __u32 reserved[2U]; } ; 1484 struct v4l2_enc_idx { __u32 entries; __u32 entries_cap; __u32 reserved[4U]; struct v4l2_enc_idx_entry entry[64U]; } ; 1492 struct __anonstruct_raw_208 { __u32 data[8U]; } ; 1492 union __anonunion_ldv_27397_207 { struct __anonstruct_raw_208 raw; } ; 1492 struct v4l2_encoder_cmd { __u32 cmd; __u32 flags; union __anonunion_ldv_27397_207 ldv_27397; } ; 1511 struct __anonstruct_stop_210 { __u64 pts; } ; 1511 struct __anonstruct_start_211 { __s32 speed; __u32 format; } ; 1511 struct __anonstruct_raw_212 { __u32 data[16U]; } ; 1511 union __anonunion_ldv_27412_209 { struct __anonstruct_stop_210 stop; struct __anonstruct_start_211 start; struct __anonstruct_raw_212 raw; } ; 1511 struct v4l2_decoder_cmd { __u32 cmd; __u32 flags; union __anonunion_ldv_27412_209 ldv_27412; } ; 1869 struct v4l2_dbg_chip_info { struct v4l2_dbg_match match; char name[32U]; __u32 flags; __u32 reserved[32U]; } ; 1880 struct v4l2_create_buffers { __u32 index; __u32 count; __u32 memory; struct v4l2_format format; __u32 reserved[8U]; } ; 1898 struct v4l2_ioctl_ops { int (*vidioc_querycap)(struct file *, void *, struct v4l2_capability *); int (*vidioc_g_priority)(struct file *, void *, enum v4l2_priority *); int (*vidioc_s_priority)(struct file *, void *, enum v4l2_priority ); int (*vidioc_enum_fmt_vid_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_overlay)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_out)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_sdr_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_g_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_reqbufs)(struct file *, void *, struct v4l2_requestbuffers *); int (*vidioc_querybuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_qbuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_expbuf)(struct file *, void *, struct v4l2_exportbuffer *); int (*vidioc_dqbuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_create_bufs)(struct file *, void *, struct v4l2_create_buffers *); int (*vidioc_prepare_buf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_overlay)(struct file *, void *, unsigned int); int (*vidioc_g_fbuf)(struct file *, void *, struct v4l2_framebuffer *); int (*vidioc_s_fbuf)(struct file *, void *, const struct v4l2_framebuffer *); int (*vidioc_streamon)(struct file *, void *, enum v4l2_buf_type ); int (*vidioc_streamoff)(struct file *, void *, enum v4l2_buf_type ); int (*vidioc_g_std)(struct file *, void *, v4l2_std_id *); int (*vidioc_s_std)(struct file *, void *, v4l2_std_id ); int (*vidioc_querystd)(struct file *, void *, v4l2_std_id *); int (*vidioc_enum_input)(struct file *, void *, struct v4l2_input *); int (*vidioc_g_input)(struct file *, void *, unsigned int *); int (*vidioc_s_input)(struct file *, void *, unsigned int); int (*vidioc_enum_output)(struct file *, void *, struct v4l2_output *); int (*vidioc_g_output)(struct file *, void *, unsigned int *); int (*vidioc_s_output)(struct file *, void *, unsigned int); int (*vidioc_queryctrl)(struct file *, void *, struct v4l2_queryctrl *); int (*vidioc_g_ctrl)(struct file *, void *, struct v4l2_control *); int (*vidioc_s_ctrl)(struct file *, void *, struct v4l2_control *); int (*vidioc_g_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_s_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_try_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_querymenu)(struct file *, void *, struct v4l2_querymenu *); int (*vidioc_enumaudio)(struct file *, void *, struct v4l2_audio *); int (*vidioc_g_audio)(struct file *, void *, struct v4l2_audio *); int (*vidioc_s_audio)(struct file *, void *, const struct v4l2_audio *); int (*vidioc_enumaudout)(struct file *, void *, struct v4l2_audioout *); int (*vidioc_g_audout)(struct file *, void *, struct v4l2_audioout *); int (*vidioc_s_audout)(struct file *, void *, const struct v4l2_audioout *); int (*vidioc_g_modulator)(struct file *, void *, struct v4l2_modulator *); int (*vidioc_s_modulator)(struct file *, void *, const struct v4l2_modulator *); int (*vidioc_cropcap)(struct file *, void *, struct v4l2_cropcap *); int (*vidioc_g_crop)(struct file *, void *, struct v4l2_crop *); int (*vidioc_s_crop)(struct file *, void *, const struct v4l2_crop *); int (*vidioc_g_selection)(struct file *, void *, struct v4l2_selection *); int (*vidioc_s_selection)(struct file *, void *, struct v4l2_selection *); int (*vidioc_g_jpegcomp)(struct file *, void *, struct v4l2_jpegcompression *); int (*vidioc_s_jpegcomp)(struct file *, void *, const struct v4l2_jpegcompression *); int (*vidioc_g_enc_index)(struct file *, void *, struct v4l2_enc_idx *); int (*vidioc_encoder_cmd)(struct file *, void *, struct v4l2_encoder_cmd *); int (*vidioc_try_encoder_cmd)(struct file *, void *, struct v4l2_encoder_cmd *); int (*vidioc_decoder_cmd)(struct file *, void *, struct v4l2_decoder_cmd *); int (*vidioc_try_decoder_cmd)(struct file *, void *, struct v4l2_decoder_cmd *); int (*vidioc_g_parm)(struct file *, void *, struct v4l2_streamparm *); int (*vidioc_s_parm)(struct file *, void *, struct v4l2_streamparm *); int (*vidioc_g_tuner)(struct file *, void *, struct v4l2_tuner *); int (*vidioc_s_tuner)(struct file *, void *, const struct v4l2_tuner *); int (*vidioc_g_frequency)(struct file *, void *, struct v4l2_frequency *); int (*vidioc_s_frequency)(struct file *, void *, const struct v4l2_frequency *); int (*vidioc_enum_freq_bands)(struct file *, void *, struct v4l2_frequency_band *); int (*vidioc_g_sliced_vbi_cap)(struct file *, void *, struct v4l2_sliced_vbi_cap *); int (*vidioc_log_status)(struct file *, void *); int (*vidioc_s_hw_freq_seek)(struct file *, void *, const struct v4l2_hw_freq_seek *); int (*vidioc_g_register)(struct file *, void *, struct v4l2_dbg_register *); int (*vidioc_s_register)(struct file *, void *, const struct v4l2_dbg_register *); int (*vidioc_g_chip_info)(struct file *, void *, struct v4l2_dbg_chip_info *); int (*vidioc_enum_framesizes)(struct file *, void *, struct v4l2_frmsizeenum *); int (*vidioc_enum_frameintervals)(struct file *, void *, struct v4l2_frmivalenum *); int (*vidioc_s_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_g_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_query_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_enum_dv_timings)(struct file *, void *, struct v4l2_enum_dv_timings *); int (*vidioc_dv_timings_cap)(struct file *, void *, struct v4l2_dv_timings_cap *); int (*vidioc_g_edid)(struct file *, void *, struct v4l2_edid *); int (*vidioc_s_edid)(struct file *, void *, struct v4l2_edid *); int (*vidioc_subscribe_event)(struct v4l2_fh *, const struct v4l2_event_subscription *); int (*vidioc_unsubscribe_event)(struct v4l2_fh *, const struct v4l2_event_subscription *); long int (*vidioc_default)(struct file *, void *, bool , unsigned int, void *); } ; 19 struct usbtv_norm_params { v4l2_std_id norm; int cap_width; int cap_height; } ; 56 struct usbtv_buf { struct vb2_buffer vb; struct list_head list; } ; 1 long int __builtin_expect(long exp, long c); 71 void warn_slowpath_null(const char *, const int); 15 void __xadd_wrong_size(); 155 int atomic_add_return(int i, atomic_t *v); 41 void kref_get(struct kref *kref); 603 int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd); 837 void * dev_get_drvdata(const struct device *dev); 842 void dev_set_drvdata(struct device *dev, void *data); 1048 int _dev_info(const struct device *, const char *, ...); 194 void * usb_get_intfdata(struct usb_interface *intf); 199 void usb_set_intfdata(struct usb_interface *intf, void *data); 595 struct usb_device * ldv_interface_to_usbdev_5(struct usb_interface *intf); 595 struct usb_device * interface_to_usbdev(struct usb_interface *intf); 605 struct usb_device * usb_get_dev(struct usb_device *); 607 struct usb_device * ldv_usb_get_dev_6(struct usb_device *ldv_func_arg1); 609 void usb_put_dev(struct usb_device *); 612 void ldv_usb_put_dev_7(struct usb_device *ldv_func_arg1); 1656 int usb_control_msg(struct usb_device *, unsigned int, __u8 , __u8 , __u16 , __u16 , void *, __u16 , int); 1784 unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint); 14 void ldv_interface_to_usbdev(); 15 void ldv_usb_get_dev(); 16 void ldv_usb_put_dev(); 144 void kfree(const void *); 315 void * __kmalloc(size_t , gfp_t ); 445 void * kmalloc(size_t size, gfp_t flags); 637 void * kzalloc(size_t size, gfp_t flags); 69 void v4l2_device_get(struct v4l2_device *v4l2_dev); 74 int v4l2_device_put(struct v4l2_device *); 96 int usbtv_set_regs(struct usbtv *usbtv, const u16 (*regs)[2], int size); 98 int usbtv_video_init(struct usbtv *usbtv); 99 void usbtv_video_free(struct usbtv *usbtv); 70 int usbtv_probe(struct usb_interface *intf, const struct usb_device_id *id); 116 void usbtv_disconnect(struct usb_interface *intf); 138 const struct usb_device_id __mod_usb__usbtv_id_table_device_table = { }; 169 void ldv_check_final_state(); 172 void ldv_check_return_value(int); 175 void ldv_check_return_value_probe(int retval); 178 void ldv_initialize(); 181 void ldv_handler_precall(); 184 int nondet_int(); 187 int LDV_IN_INTERRUPT = 0; 190 void ldv_main0_sequence_infinite_withcheck_stateful(); 1 void * __builtin_memcpy(void *, const void *, unsigned long); 33 extern struct module __this_module; 72 void set_bit(long nr, volatile unsigned long *addr); 7 __u32 __arch_swab32(__u32 val); 57 __u32 __fswab32(__u32 val); 391 int snprintf(char *, size_t , const char *, ...); 24 void INIT_LIST_HEAD(struct list_head *list); 47 void __list_add(struct list_head *, struct list_head *, struct list_head *); 74 void list_add_tail(struct list_head *new, struct list_head *head); 112 void list_del(struct list_head *); 186 int list_empty(const struct list_head *head); 34 void * __memcpy(void *, const void *, size_t ); 26 size_t strlcpy(char *, const char *, size_t ); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 32 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 290 raw_spinlock_t * spinlock_check(spinlock_t *lock); 356 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 120 void __mutex_init(struct mutex *, const char *, struct lock_class_key *); 139 void mutex_lock_nested(struct mutex *, unsigned int); 175 void mutex_unlock(struct mutex *); 1044 int dev_warn(const struct device *, const char *, ...); 791 int usb_make_path(struct usb_device *dev, char *buf, size_t size); 1581 struct urb * usb_alloc_urb(int, gfp_t ); 1582 void usb_free_urb(struct urb *); 1585 int usb_submit_urb(struct urb *, gfp_t ); 1587 void usb_kill_urb(struct urb *); 1676 int usb_set_interface(struct usb_device *, int, int); 328 long int video_ioctl2(struct file *, unsigned int, unsigned long); 431 void * vb2_plane_vaddr(struct vb2_buffer *, unsigned int); 434 void vb2_buffer_done(struct vb2_buffer *, enum vb2_buffer_state ); 444 int vb2_queue_init(struct vb2_queue *); 446 void vb2_queue_release(struct vb2_queue *); 539 void * vb2_get_drv_priv(struct vb2_queue *q); 550 void vb2_set_plane_payload(struct vb2_buffer *vb, unsigned int plane_no, unsigned long size); 577 unsigned long int vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no); 594 int vb2_ioctl_reqbufs(struct file *, void *, struct v4l2_requestbuffers *); 596 int vb2_ioctl_create_bufs(struct file *, void *, struct v4l2_create_buffers *); 598 int vb2_ioctl_prepare_buf(struct file *, void *, struct v4l2_buffer *); 600 int vb2_ioctl_querybuf(struct file *, void *, struct v4l2_buffer *); 601 int vb2_ioctl_qbuf(struct file *, void *, struct v4l2_buffer *); 602 int vb2_ioctl_dqbuf(struct file *, void *, struct v4l2_buffer *); 603 int vb2_ioctl_streamon(struct file *, void *, enum v4l2_buf_type ); 604 int vb2_ioctl_streamoff(struct file *, void *, enum v4l2_buf_type ); 610 int vb2_fop_mmap(struct file *, struct vm_area_struct *); 611 int vb2_fop_release(struct file *); 615 ssize_t vb2_fop_read(struct file *, char *, size_t , loff_t *); 617 unsigned int vb2_fop_poll(struct file *, poll_table *); 151 int __video_register_device(struct video_device *, int, int, int, struct module *); 158 int video_register_device(struct video_device *vdev, int type, int nr); 174 void video_unregister_device(struct video_device *); 186 void video_device_release_empty(struct video_device *); 209 void * video_get_drvdata(struct video_device *vdev); 214 void video_set_drvdata(struct video_device *vdev, void *data); 219 struct video_device * video_devdata(struct file *); 223 void * video_drvdata(struct file *file); 205 void v4l2_get_timestamp(struct timeval *); 71 int v4l2_fh_open(struct file *); 79 int v4l2_device_register(struct device *, struct v4l2_device *); 103 void v4l2_device_disconnect(struct v4l2_device *); 106 void v4l2_device_unregister(struct v4l2_device *); 18 extern const struct vb2_mem_ops vb2_vmalloc_memops; 53 struct usbtv_norm_params norm_params[2U] = { { 63744ULL, 720, 480 }, { 255ULL, 720, 576 } }; 66 int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm); 90 int usbtv_select_input(struct usbtv *usbtv, int input); 129 int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm); 180 int usbtv_setup_capture(struct usbtv *usbtv); 276 void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd); 294 void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk); 355 void usbtv_iso_cb(struct urb *ip); 394 struct urb * usbtv_setup_iso_transfer(struct usbtv *usbtv); 422 void usbtv_stop(struct usbtv *usbtv); 449 int usbtv_start(struct usbtv *usbtv); 488 int usbtv_querycap(struct file *file, void *priv, struct v4l2_capability *cap); 502 int usbtv_enum_input(struct file *file, void *priv, struct v4l2_input *i); 523 int usbtv_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f); 535 int usbtv_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f); 551 int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm); 558 int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm); 569 int usbtv_g_input(struct file *file, void *priv, unsigned int *i); 576 int usbtv_s_input(struct file *file, void *priv, unsigned int i); 582 struct v4l2_ioctl_ops usbtv_ioctl_ops = { &usbtv_querycap, 0, 0, &usbtv_enum_fmt_vid_cap, 0, 0, 0, 0, 0, &usbtv_fmt_vid_cap, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &usbtv_fmt_vid_cap, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &usbtv_fmt_vid_cap, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &vb2_ioctl_reqbufs, &vb2_ioctl_querybuf, &vb2_ioctl_qbuf, 0, &vb2_ioctl_dqbuf, &vb2_ioctl_create_bufs, &vb2_ioctl_prepare_buf, 0, 0, 0, &vb2_ioctl_streamon, &vb2_ioctl_streamoff, &usbtv_g_std, &usbtv_s_std, 0, &usbtv_enum_input, &usbtv_g_input, &usbtv_s_input, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 604 struct v4l2_file_operations usbtv_fops = { &__this_module, &vb2_fop_read, 0, &vb2_fop_poll, 0, &video_ioctl2, 0, 0, &vb2_fop_mmap, &v4l2_fh_open, &vb2_fop_release }; 614 int usbtv_queue_setup(struct vb2_queue *vq, const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int *sizes, void **alloc_ctxs); 628 void usbtv_buf_queue(struct vb2_buffer *vb); 644 int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count); 654 void usbtv_stop_streaming(struct vb2_queue *vq); 662 struct vb2_ops usbtv_vb2_ops = { &usbtv_queue_setup, 0, 0, 0, 0, 0, 0, &usbtv_start_streaming, &usbtv_stop_streaming, &usbtv_buf_queue }; 669 void usbtv_release(struct v4l2_device *v4l2_dev); 791 void ldv_main1_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 59 void __builtin_trap(); 7 bool ldv_is_err(const void *ptr); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 19 int ldv_usb_dev_state = 0; return ; } { 192 struct usb_interface *var_group1; 193 const struct usb_device_id *var_usbtv_probe_0_p1; 194 int res_usbtv_probe_0; 195 int ldv_s_usbtv_usb_driver_usb_driver; 196 int tmp; 197 int tmp___0; 224 ldv_s_usbtv_usb_driver_usb_driver = 0; 214 LDV_IN_INTERRUPT = 1; 223 ldv_initialize() { /* Function call is skipped due to function is undefined */} 227 goto ldv_33715; 227 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 230 goto ldv_33714; 228 ldv_33714:; 231 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} 231 switch (tmp) { 72 int ret; 73 int size; 74 struct device *dev; 75 struct usbtv *usbtv; 76 void *tmp; 77 struct usb_device *tmp___0; 75 dev = &(intf->dev); 86 -usb_endpoint_maxp((const struct usb_endpoint_descriptor *)(&(((intf->altsetting) + 1UL)->endpoint->desc))) 87 size = (size & 2047) * (((size & 6144) >> 11) + 1); { 639 void *tmp; { } 447 void *tmp___2; 462 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */} } 90 usbtv = (struct usbtv *)tmp; 93 usbtv->dev = dev; { 312 ldv_func_ret_type ldv_func_res; 313 struct usb_device *tmp; 314 ldv_func_res = tmp; } { 323 ldv_func_ret_type___0 ldv_func_res; 324 struct usb_device *tmp; 325 tmp = usb_get_dev(ldv_func_arg1) { /* Function call is skipped due to function is undefined */} 325 ldv_func_res = tmp; } 96 usbtv->iso_size = size; { } { 680 int ret; 681 struct lock_class_key __key; 682 struct lock_class_key __key___0; 683 struct lock_class_key __key___1; { 68 int i; 69 int ret; 70 struct usbtv_norm_params *params; 68 ret = 0; 69 params = (struct usbtv_norm_params *)0; 71 i = 0; 71 goto ldv_34138; 73 goto ldv_34137; 72 ldv_34137:; 71 i = i + 1; 72 ldv_34138:; 73 goto ldv_34137; 72 ldv_34137:; 71 i = i + 1; 72 ldv_34138:; 77 ldv_34136:; 85 ret = -22; } 684 __raw_spin_lock_init(&(usbtv->buflock.ldv_6365.rlock), "&(&usbtv->buflock)->rlock", &__key) { /* Function call is skipped due to function is undefined */} 685 __mutex_init(&(usbtv->v4l2_lock), "&usbtv->v4l2_lock", &__key___0) { /* Function call is skipped due to function is undefined */} 686 __mutex_init(&(usbtv->vb2q_lock), "&usbtv->vb2q_lock", &__key___1) { /* Function call is skipped due to function is undefined */} 690 usbtv->vb2q.type = 1; 691 usbtv->vb2q.io_modes = 7U; 692 usbtv->vb2q.drv_priv = (void *)usbtv; 693 usbtv->vb2q.buf_struct_size = 944U; 694 usbtv->vb2q.ops = (const struct vb2_ops *)(&usbtv_vb2_ops); 695 usbtv->vb2q.mem_ops = &vb2_vmalloc_memops; 696 usbtv->vb2q.timestamp_flags = 8192U; 697 usbtv->vb2q.lock = &(usbtv->vb2q_lock); 698 ret = vb2_queue_init(&(usbtv->vb2q)) { /* Function call is skipped due to function is undefined */} 705 usbtv->v4l2_dev.release = &usbtv_release; 706 ret = v4l2_device_register(usbtv->dev, &(usbtv->v4l2_dev)) { /* Function call is skipped due to function is undefined */} 713 strlcpy((char *)(&(usbtv->vdev.name)), "usbtv", 32UL) { /* Function call is skipped due to function is undefined */} 714 usbtv->vdev.v4l2_dev = &(usbtv->v4l2_dev); 715 usbtv->vdev.release = &video_device_release_empty; 716 usbtv->vdev.fops = (const struct v4l2_file_operations *)(&usbtv_fops); 717 usbtv->vdev.ioctl_ops = (const struct v4l2_ioctl_ops *)(&usbtv_ioctl_ops); 718 usbtv->vdev.tvnorms = 63999ULL; 719 usbtv->vdev.queue = &(usbtv->vb2q); 720 usbtv->vdev.lock = &(usbtv->v4l2_lock); { 80 Ignored inline assembler code 82 return ;; } { } { 160 int tmp; 161 tmp = __video_register_device(vdev, type, nr, 1, vdev->fops->owner) { /* Function call is skipped due to function is undefined */} } 725 const struct device *__CPAchecker_TMP_2 = (const struct device *)(usbtv->dev); 725 dev_warn(__CPAchecker_TMP_2, "Could not register video device\n") { /* Function call is skipped due to function is undefined */} 726 goto vdev_fail; 730 vdev_fail:; 732 v4l2_device_unregister(&(usbtv->v4l2_dev)) { /* Function call is skipped due to function is undefined */} 733 v4l2_fail:; 734 vb2_queue_release(&(usbtv->vb2q)) { /* Function call is skipped due to function is undefined */} } 102 goto usbtv_video_fail; 109 usbtv_video_fail:; 111 kfree((const void *)usbtv) { /* Function call is skipped due to function is undefined */} } 242 ldv_check_return_value(res_usbtv_probe_0) { /* Function call is skipped due to function is undefined */} { }} | Source code
1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
18 #include <asm/barrier.h>
19
20 #if BITS_PER_LONG == 32
21 # define _BITOPS_LONG_SHIFT 5
22 #elif BITS_PER_LONG == 64
23 # define _BITOPS_LONG_SHIFT 6
24 #else
25 # error "Unexpected BITS_PER_LONG"
26 #endif
27
28 #define BIT_64(n) (U64_C(1) << (n))
29
30 /*
31 * These have to be done with inline assembly: that way the bit-setting
32 * is guaranteed to be atomic. All bit operations return 0 if the bit
33 * was cleared before the operation and != 0 if it was not.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37
38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
39 /* Technically wrong, but this avoids compilation errors on some gcc
40 versions. */
41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42 #else
43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
44 #endif
45
46 #define ADDR BITOP_ADDR(addr)
47
48 /*
49 * We do the locked ops that don't return the old value as
50 * a mask operation on a byte.
51 */
52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
54 #define CONST_MASK(nr) (1 << ((nr) & 7))
55
56 /**
57 * set_bit - Atomically set a bit in memory
58 * @nr: the bit to set
59 * @addr: the address to start counting from
60 *
61 * This function is atomic and may not be reordered. See __set_bit()
62 * if you do not require the atomic guarantees.
63 *
64 * Note: there are no guarantees that this function will not be reordered
65 * on non x86 architectures, so if you are writing portable code,
66 * make sure not to rely on its reordering guarantees.
67 *
68 * Note that @nr may be almost arbitrarily large; this function is not
69 * restricted to acting on a single-word quantity.
70 */
71 static __always_inline void
72 set_bit(long nr, volatile unsigned long *addr)
73 {
74 if (IS_IMMEDIATE(nr)) {
75 asm volatile(LOCK_PREFIX "orb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" ((u8)CONST_MASK(nr))
78 : "memory");
79 } else {
80 asm volatile(LOCK_PREFIX "bts %1,%0"
81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
82 }
83 }
84
85 /**
86 * __set_bit - Set a bit in memory
87 * @nr: the bit to set
88 * @addr: the address to start counting from
89 *
90 * Unlike set_bit(), this function is non-atomic and may be reordered.
91 * If it's called on the same region of memory simultaneously, the effect
92 * may be that only one operation succeeds.
93 */
94 static inline void __set_bit(long nr, volatile unsigned long *addr)
95 {
96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97 }
98
99 /**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109 static __always_inline void
110 clear_bit(long nr, volatile unsigned long *addr)
111 {
112 if (IS_IMMEDIATE(nr)) {
113 asm volatile(LOCK_PREFIX "andb %1,%0"
114 : CONST_MASK_ADDR(nr, addr)
115 : "iq" ((u8)~CONST_MASK(nr)));
116 } else {
117 asm volatile(LOCK_PREFIX "btr %1,%0"
118 : BITOP_ADDR(addr)
119 : "Ir" (nr));
120 }
121 }
122
123 /*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132 {
133 barrier();
134 clear_bit(nr, addr);
135 }
136
137 static inline void __clear_bit(long nr, volatile unsigned long *addr)
138 {
139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140 }
141
142 /*
143 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear
145 * @addr: Address to start counting from
146 *
147 * __clear_bit() is non-atomic and implies release semantics before the memory
148 * operation. It can be used for an unlock if no other CPUs can concurrently
149 * modify other bits in the word.
150 *
151 * No memory barrier is required here, because x86 cannot reorder stores past
152 * older loads. Same principle as spin_unlock.
153 */
154 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155 {
156 barrier();
157 __clear_bit(nr, addr);
158 }
159
160 /**
161 * __change_bit - Toggle a bit in memory
162 * @nr: the bit to change
163 * @addr: the address to start counting from
164 *
165 * Unlike change_bit(), this function is non-atomic and may be reordered.
166 * If it's called on the same region of memory simultaneously, the effect
167 * may be that only one operation succeeds.
168 */
169 static inline void __change_bit(long nr, volatile unsigned long *addr)
170 {
171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172 }
173
174 /**
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183 static inline void change_bit(long nr, volatile unsigned long *addr)
184 {
185 if (IS_IMMEDIATE(nr)) {
186 asm volatile(LOCK_PREFIX "xorb %1,%0"
187 : CONST_MASK_ADDR(nr, addr)
188 : "iq" ((u8)CONST_MASK(nr)));
189 } else {
190 asm volatile(LOCK_PREFIX "btc %1,%0"
191 : BITOP_ADDR(addr)
192 : "Ir" (nr));
193 }
194 }
195
196 /**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier.
203 */
204 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
205 {
206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
207 }
208
209 /**
210 * test_and_set_bit_lock - Set a bit and return its old value for lock
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This is the same as test_and_set_bit on x86.
215 */
216 static __always_inline int
217 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218 {
219 return test_and_set_bit(nr, addr);
220 }
221
222 /**
223 * __test_and_set_bit - Set a bit and return its old value
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail. You must protect multiple accesses with a lock.
230 */
231 static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
232 {
233 int oldbit;
234
235 asm("bts %2,%1\n\t"
236 "sbb %0,%0"
237 : "=r" (oldbit), ADDR
238 : "Ir" (nr));
239 return oldbit;
240 }
241
242 /**
243 * test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to clear
245 * @addr: Address to count from
246 *
247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier.
249 */
250 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
251 {
252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
253 }
254
255 /**
256 * __test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is non-atomic and can be reordered.
261 * If two examples of this operation race, one can appear to succeed
262 * but actually fail. You must protect multiple accesses with a lock.
263 *
264 * Note: the operation is performed atomically with respect to
265 * the local CPU, but not other CPUs. Portable code should not
266 * rely on this behaviour.
267 * KVM relies on this behaviour on x86 for modifying memory that is also
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c
270 */
271 static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
272 {
273 int oldbit;
274
275 asm volatile("btr %2,%1\n\t"
276 "sbb %0,%0"
277 : "=r" (oldbit), ADDR
278 : "Ir" (nr));
279 return oldbit;
280 }
281
282 /* WARNING: non atomic and it can be reordered! */
283 static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
284 {
285 int oldbit;
286
287 asm volatile("btc %2,%1\n\t"
288 "sbb %0,%0"
289 : "=r" (oldbit), ADDR
290 : "Ir" (nr) : "memory");
291
292 return oldbit;
293 }
294
295 /**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
302 */
303 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
304 {
305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
306 }
307
308 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
309 {
310 return ((1UL << (nr & (BITS_PER_LONG-1))) &
311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312 }
313
314 static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
315 {
316 int oldbit;
317
318 asm volatile("bt %2,%1\n\t"
319 "sbb %0,%0"
320 : "=r" (oldbit)
321 : "m" (*(unsigned long *)addr), "Ir" (nr));
322
323 return oldbit;
324 }
325
326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
327 /**
328 * test_bit - Determine whether a bit is set
329 * @nr: bit number to test
330 * @addr: Address to start counting from
331 */
332 static int test_bit(int nr, const volatile unsigned long *addr);
333 #endif
334
335 #define test_bit(nr, addr) \
336 (__builtin_constant_p((nr)) \
337 ? constant_test_bit((nr), (addr)) \
338 : variable_test_bit((nr), (addr)))
339
340 /**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346 static inline unsigned long __ffs(unsigned long word)
347 {
348 asm("rep; bsf %1,%0"
349 : "=r" (word)
350 : "rm" (word));
351 return word;
352 }
353
354 /**
355 * ffz - find first zero bit in word
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360 static inline unsigned long ffz(unsigned long word)
361 {
362 asm("rep; bsf %1,%0"
363 : "=r" (word)
364 : "r" (~word));
365 return word;
366 }
367
368 /*
369 * __fls: find last set bit in word
370 * @word: The word to search
371 *
372 * Undefined if no set bit exists, so code should check against 0 first.
373 */
374 static inline unsigned long __fls(unsigned long word)
375 {
376 asm("bsr %1,%0"
377 : "=r" (word)
378 : "rm" (word));
379 return word;
380 }
381
382 #undef ADDR
383
384 #ifdef __KERNEL__
385 /**
386 * ffs - find first set bit in word
387 * @x: the word to search
388 *
389 * This is defined the same way as the libc and compiler builtin ffs
390 * routines, therefore differs in spirit from the other bitops.
391 *
392 * ffs(value) returns 0 if value is 0 or the position of the first
393 * set bit if value is nonzero. The first (least significant) bit
394 * is at position 1.
395 */
396 static inline int ffs(int x)
397 {
398 int r;
399
400 #ifdef CONFIG_X86_64
401 /*
402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
403 * dest reg is undefined if x==0, but their CPU architect says its
404 * value is written to set it to the same as before, except that the
405 * top 32 bits will be cleared.
406 *
407 * We cannot do this on 32 bits because at the very least some
408 * 486 CPUs did not behave this way.
409 */
410 asm("bsfl %1,%0"
411 : "=r" (r)
412 : "rm" (x), "0" (-1));
413 #elif defined(CONFIG_X86_CMOV)
414 asm("bsfl %1,%0\n\t"
415 "cmovzl %2,%0"
416 : "=&r" (r) : "rm" (x), "r" (-1));
417 #else
418 asm("bsfl %1,%0\n\t"
419 "jnz 1f\n\t"
420 "movl $-1,%0\n"
421 "1:" : "=r" (r) : "rm" (x));
422 #endif
423 return r + 1;
424 }
425
426 /**
427 * fls - find last set bit in word
428 * @x: the word to search
429 *
430 * This is defined in a similar way as the libc and compiler builtin
431 * ffs, but returns the position of the most significant set bit.
432 *
433 * fls(value) returns 0 if value is 0 or the position of the last
434 * set bit if value is nonzero. The last (most significant) bit is
435 * at position 32.
436 */
437 static inline int fls(int x)
438 {
439 int r;
440
441 #ifdef CONFIG_X86_64
442 /*
443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
444 * dest reg is undefined if x==0, but their CPU architect says its
445 * value is written to set it to the same as before, except that the
446 * top 32 bits will be cleared.
447 *
448 * We cannot do this on 32 bits because at the very least some
449 * 486 CPUs did not behave this way.
450 */
451 asm("bsrl %1,%0"
452 : "=r" (r)
453 : "rm" (x), "0" (-1));
454 #elif defined(CONFIG_X86_CMOV)
455 asm("bsrl %1,%0\n\t"
456 "cmovzl %2,%0"
457 : "=&r" (r) : "rm" (x), "rm" (-1));
458 #else
459 asm("bsrl %1,%0\n\t"
460 "jnz 1f\n\t"
461 "movl $-1,%0\n"
462 "1:" : "=r" (r) : "rm" (x));
463 #endif
464 return r + 1;
465 }
466
467 /**
468 * fls64 - find last set bit in a 64-bit word
469 * @x: the word to search
470 *
471 * This is defined in a similar way as the libc and compiler builtin
472 * ffsll, but returns the position of the most significant set bit.
473 *
474 * fls64(value) returns 0 if value is 0 or the position of the last
475 * set bit if value is nonzero. The last (most significant) bit is
476 * at position 64.
477 */
478 #ifdef CONFIG_X86_64
479 static __always_inline int fls64(__u64 x)
480 {
481 int bitpos = -1;
482 /*
483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
484 * dest reg is undefined if x==0, but their CPU architect says its
485 * value is written to set it to the same as before.
486 */
487 asm("bsrq %1,%q0"
488 : "+r" (bitpos)
489 : "rm" (x));
490 return bitpos + 1;
491 }
492 #else
493 #include <asm-generic/bitops/fls64.h>
494 #endif
495
496 #include <asm-generic/bitops/find.h>
497
498 #include <asm-generic/bitops/sched.h>
499
500 #define ARCH_HAS_FAST_MULTIPLIER 1
501
502 #include <asm/arch_hweight.h>
503
504 #include <asm-generic/bitops/const_hweight.h>
505
506 #include <asm-generic/bitops/le.h>
507
508 #include <asm-generic/bitops/ext2-atomic-setbit.h>
509
510 #endif /* __KERNEL__ */
511 #endif /* _ASM_X86_BITOPS_H */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10
11 #include <linux/usb.h>
12
13 // Provide model function prototypes before their usage.
14 void ldv_interface_to_usbdev(void);
15 void ldv_usb_get_dev(void);
16 void ldv_usb_put_dev(void);
17
18 /*
19 * Fushicai USBTV007 Video Grabber Driver
20 *
21 * Product web site:
22 * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
23 *
24 * Following LWN articles were very useful in construction of this driver:
25 * Video4Linux2 API series: http://lwn.net/Articles/203924/
26 * videobuf2 API explanation: http://lwn.net/Articles/447435/
27 * Thanks go to Jonathan Corbet for providing this quality documentation.
28 * He is awesome.
29 *
30 * Copyright (c) 2013 Lubomir Rintel
31 * All rights reserved.
32 * No physical hardware was harmed running Windows during the
33 * reverse-engineering activity
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions, and the following disclaimer,
40 * without modification.
41 * 2. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission.
43 *
44 * Alternatively, this software may be distributed under the terms of the
45 * GNU General Public License ("GPL").
46 */
47
48 #include "usbtv.h"
49
50 int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
51 {
52 int ret;
53 int pipe = usb_rcvctrlpipe(usbtv->udev, 0);
54 int i;
55
56 for (i = 0; i < size; i++) {
57 u16 index = regs[i][0];
58 u16 value = regs[i][1];
59
60 ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
61 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
62 value, index, NULL, 0, 0);
63 if (ret < 0)
64 return ret;
65 }
66
67 return 0;
68 }
69
70 static int usbtv_probe(struct usb_interface *intf,
71 const struct usb_device_id *id)
72 {
73 int ret;
74 int size;
75 struct device *dev = &intf->dev;
76 struct usbtv *usbtv;
77
78 /* Checks that the device is what we think it is. */
79 if (intf->num_altsetting != 2)
80 return -ENODEV;
81 if (intf->altsetting[1].desc.bNumEndpoints != 4)
82 return -ENODEV;
83
84 /* Packet size is split into 11 bits of base size and count of
85 * extra multiplies of it.*/
86 size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
87 size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
88
89 /* Device structure */
90 usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
91 if (usbtv == NULL)
92 return -ENOMEM;
93 usbtv->dev = dev;
94 usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
95
96 usbtv->iso_size = size;
97
98 usb_set_intfdata(intf, usbtv);
99
100 ret = usbtv_video_init(usbtv);
101 if (ret < 0)
102 goto usbtv_video_fail;
103
104 /* for simplicity we exploit the v4l2_device reference counting */
105 v4l2_device_get(&usbtv->v4l2_dev);
106
107 dev_info(dev, "Fushicai USBTV007 Video Grabber\n");
108 return 0;
109
110 usbtv_video_fail:
111 kfree(usbtv);
112
113 return ret;
114 }
115
116 static void usbtv_disconnect(struct usb_interface *intf)
117 {
118 struct usbtv *usbtv = usb_get_intfdata(intf);
119 usb_set_intfdata(intf, NULL);
120
121 if (!usbtv)
122 return;
123
124 usbtv_video_free(usbtv);
125
126 usb_put_dev(usbtv->udev);
127 usbtv->udev = NULL;
128
129 /* the usbtv structure will be deallocated when v4l2 will be
130 done using it */
131 v4l2_device_put(&usbtv->v4l2_dev);
132 }
133
134 static struct usb_device_id usbtv_id_table[] = {
135 { USB_DEVICE(0x1b71, 0x3002) },
136 {}
137 };
138 MODULE_DEVICE_TABLE(usb, usbtv_id_table);
139
140 MODULE_AUTHOR("Lubomir Rintel");
141 MODULE_DESCRIPTION("Fushicai USBTV007 Video Grabber Driver");
142 MODULE_LICENSE("Dual BSD/GPL");
143
144 static struct usb_driver usbtv_usb_driver = {
145 .name = "usbtv",
146 .id_table = usbtv_id_table,
147 .probe = usbtv_probe,
148 .disconnect = usbtv_disconnect,
149 };
150
151 module_usb_driver(usbtv_usb_driver);
152
153
154
155
156
157 /* LDV_COMMENT_BEGIN_MAIN */
158 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
159
160 /*###########################################################################*/
161
162 /*############## Driver Environment Generator 0.2 output ####################*/
163
164 /*###########################################################################*/
165
166
167
168 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
169 void ldv_check_final_state(void);
170
171 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
172 void ldv_check_return_value(int res);
173
174 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
175 void ldv_check_return_value_probe(int res);
176
177 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
178 void ldv_initialize(void);
179
180 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
181 void ldv_handler_precall(void);
182
183 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
184 int nondet_int(void);
185
186 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
187 int LDV_IN_INTERRUPT;
188
189 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
190 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
191
192
193
194 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
195 /*============================= VARIABLE DECLARATION PART =============================*/
196 /** STRUCT: struct type: usb_driver, struct name: usbtv_usb_driver **/
197 /* content: static int usbtv_probe(struct usb_interface *intf, const struct usb_device_id *id)*/
198 /* LDV_COMMENT_END_PREP */
199 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_probe" */
200 struct usb_interface * var_group1;
201 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_probe" */
202 const struct usb_device_id * var_usbtv_probe_0_p1;
203 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "usbtv_probe" */
204 static int res_usbtv_probe_0;
205 /* content: static void usbtv_disconnect(struct usb_interface *intf)*/
206 /* LDV_COMMENT_END_PREP */
207
208
209
210
211 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
212 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
213 /*============================= VARIABLE INITIALIZING PART =============================*/
214 LDV_IN_INTERRUPT=1;
215
216
217
218
219 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
220 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
221 /*============================= FUNCTION CALL SECTION =============================*/
222 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
223 ldv_initialize();
224 int ldv_s_usbtv_usb_driver_usb_driver = 0;
225
226
227 while( nondet_int()
228 || !(ldv_s_usbtv_usb_driver_usb_driver == 0)
229 ) {
230
231 switch(nondet_int()) {
232
233 case 0: {
234
235 /** STRUCT: struct type: usb_driver, struct name: usbtv_usb_driver **/
236 if(ldv_s_usbtv_usb_driver_usb_driver==0) {
237
238 /* content: static int usbtv_probe(struct usb_interface *intf, const struct usb_device_id *id)*/
239 /* LDV_COMMENT_END_PREP */
240 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "usbtv_usb_driver". Standart function test for correct return result. */
241 res_usbtv_probe_0 = usbtv_probe( var_group1, var_usbtv_probe_0_p1);
242 ldv_check_return_value(res_usbtv_probe_0);
243 ldv_check_return_value_probe(res_usbtv_probe_0);
244 if(res_usbtv_probe_0)
245 goto ldv_module_exit;
246 ldv_s_usbtv_usb_driver_usb_driver++;
247
248 }
249
250 }
251
252 break;
253 case 1: {
254
255 /** STRUCT: struct type: usb_driver, struct name: usbtv_usb_driver **/
256 if(ldv_s_usbtv_usb_driver_usb_driver==1) {
257
258 /* content: static void usbtv_disconnect(struct usb_interface *intf)*/
259 /* LDV_COMMENT_END_PREP */
260 /* LDV_COMMENT_FUNCTION_CALL Function from field "disconnect" from driver structure with callbacks "usbtv_usb_driver" */
261 ldv_handler_precall();
262 usbtv_disconnect( var_group1);
263 ldv_s_usbtv_usb_driver_usb_driver=0;
264
265 }
266
267 }
268
269 break;
270 default: break;
271
272 }
273
274 }
275
276 ldv_module_exit:
277
278 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
279 ldv_final: ldv_check_final_state();
280
281 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
282 return;
283
284 }
285 #endif
286
287 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10
11 #include <linux/usb.h>
12
13 // Provide model function prototypes before their usage.
14 void ldv_interface_to_usbdev(void);
15 void ldv_usb_get_dev(void);
16 void ldv_usb_put_dev(void);
17
18 /*
19 * Fushicai USBTV007 Video Grabber Driver
20 *
21 * Product web site:
22 * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
23 *
24 * Following LWN articles were very useful in construction of this driver:
25 * Video4Linux2 API series: http://lwn.net/Articles/203924/
26 * videobuf2 API explanation: http://lwn.net/Articles/447435/
27 * Thanks go to Jonathan Corbet for providing this quality documentation.
28 * He is awesome.
29 *
30 * Copyright (c) 2013 Lubomir Rintel
31 * All rights reserved.
32 * No physical hardware was harmed running Windows during the
33 * reverse-engineering activity
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions, and the following disclaimer,
40 * without modification.
41 * 2. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission.
43 *
44 * Alternatively, this software may be distributed under the terms of the
45 * GNU General Public License ("GPL").
46 */
47
48 #include <media/v4l2-ioctl.h>
49 #include <media/videobuf2-core.h>
50
51 #include "usbtv.h"
52
53 static struct usbtv_norm_params norm_params[] = {
54 {
55 .norm = V4L2_STD_525_60,
56 .cap_width = 720,
57 .cap_height = 480,
58 },
59 {
60 .norm = V4L2_STD_PAL,
61 .cap_width = 720,
62 .cap_height = 576,
63 }
64 };
65
66 static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
67 {
68 int i, ret = 0;
69 struct usbtv_norm_params *params = NULL;
70
71 for (i = 0; i < ARRAY_SIZE(norm_params); i++) {
72 if (norm_params[i].norm & norm) {
73 params = &norm_params[i];
74 break;
75 }
76 }
77
78 if (params) {
79 usbtv->width = params->cap_width;
80 usbtv->height = params->cap_height;
81 usbtv->n_chunks = usbtv->width * usbtv->height
82 / 4 / USBTV_CHUNK;
83 usbtv->norm = params->norm;
84 } else
85 ret = -EINVAL;
86
87 return ret;
88 }
89
90 static int usbtv_select_input(struct usbtv *usbtv, int input)
91 {
92 int ret;
93
94 static const u16 composite[][2] = {
95 { USBTV_BASE + 0x0105, 0x0060 },
96 { USBTV_BASE + 0x011f, 0x00f2 },
97 { USBTV_BASE + 0x0127, 0x0060 },
98 { USBTV_BASE + 0x00ae, 0x0010 },
99 { USBTV_BASE + 0x0284, 0x00aa },
100 { USBTV_BASE + 0x0239, 0x0060 },
101 };
102
103 static const u16 svideo[][2] = {
104 { USBTV_BASE + 0x0105, 0x0010 },
105 { USBTV_BASE + 0x011f, 0x00ff },
106 { USBTV_BASE + 0x0127, 0x0060 },
107 { USBTV_BASE + 0x00ae, 0x0030 },
108 { USBTV_BASE + 0x0284, 0x0088 },
109 { USBTV_BASE + 0x0239, 0x0060 },
110 };
111
112 switch (input) {
113 case USBTV_COMPOSITE_INPUT:
114 ret = usbtv_set_regs(usbtv, composite, ARRAY_SIZE(composite));
115 break;
116 case USBTV_SVIDEO_INPUT:
117 ret = usbtv_set_regs(usbtv, svideo, ARRAY_SIZE(svideo));
118 break;
119 default:
120 ret = -EINVAL;
121 }
122
123 if (!ret)
124 usbtv->input = input;
125
126 return ret;
127 }
128
129 static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm)
130 {
131 int ret;
132 static const u16 pal[][2] = {
133 { USBTV_BASE + 0x001a, 0x0068 },
134 { USBTV_BASE + 0x010e, 0x0072 },
135 { USBTV_BASE + 0x010f, 0x00a2 },
136 { USBTV_BASE + 0x0112, 0x00b0 },
137 { USBTV_BASE + 0x0117, 0x0001 },
138 { USBTV_BASE + 0x0118, 0x002c },
139 { USBTV_BASE + 0x012d, 0x0010 },
140 { USBTV_BASE + 0x012f, 0x0020 },
141 { USBTV_BASE + 0x024f, 0x0002 },
142 { USBTV_BASE + 0x0254, 0x0059 },
143 { USBTV_BASE + 0x025a, 0x0016 },
144 { USBTV_BASE + 0x025b, 0x0035 },
145 { USBTV_BASE + 0x0263, 0x0017 },
146 { USBTV_BASE + 0x0266, 0x0016 },
147 { USBTV_BASE + 0x0267, 0x0036 }
148 };
149
150 static const u16 ntsc[][2] = {
151 { USBTV_BASE + 0x001a, 0x0079 },
152 { USBTV_BASE + 0x010e, 0x0068 },
153 { USBTV_BASE + 0x010f, 0x009c },
154 { USBTV_BASE + 0x0112, 0x00f0 },
155 { USBTV_BASE + 0x0117, 0x0000 },
156 { USBTV_BASE + 0x0118, 0x00fc },
157 { USBTV_BASE + 0x012d, 0x0004 },
158 { USBTV_BASE + 0x012f, 0x0008 },
159 { USBTV_BASE + 0x024f, 0x0001 },
160 { USBTV_BASE + 0x0254, 0x005f },
161 { USBTV_BASE + 0x025a, 0x0012 },
162 { USBTV_BASE + 0x025b, 0x0001 },
163 { USBTV_BASE + 0x0263, 0x001c },
164 { USBTV_BASE + 0x0266, 0x0011 },
165 { USBTV_BASE + 0x0267, 0x0005 }
166 };
167
168 ret = usbtv_configure_for_norm(usbtv, norm);
169
170 if (!ret) {
171 if (norm & V4L2_STD_525_60)
172 ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc));
173 else if (norm & V4L2_STD_PAL)
174 ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal));
175 }
176
177 return ret;
178 }
179
180 static int usbtv_setup_capture(struct usbtv *usbtv)
181 {
182 int ret;
183 static const u16 setup[][2] = {
184 /* These seem to enable the device. */
185 { USBTV_BASE + 0x0008, 0x0001 },
186 { USBTV_BASE + 0x01d0, 0x00ff },
187 { USBTV_BASE + 0x01d9, 0x0002 },
188
189 /* These seem to influence color parameters, such as
190 * brightness, etc. */
191 { USBTV_BASE + 0x0239, 0x0040 },
192 { USBTV_BASE + 0x0240, 0x0000 },
193 { USBTV_BASE + 0x0241, 0x0000 },
194 { USBTV_BASE + 0x0242, 0x0002 },
195 { USBTV_BASE + 0x0243, 0x0080 },
196 { USBTV_BASE + 0x0244, 0x0012 },
197 { USBTV_BASE + 0x0245, 0x0090 },
198 { USBTV_BASE + 0x0246, 0x0000 },
199
200 { USBTV_BASE + 0x0278, 0x002d },
201 { USBTV_BASE + 0x0279, 0x000a },
202 { USBTV_BASE + 0x027a, 0x0032 },
203 { 0xf890, 0x000c },
204 { 0xf894, 0x0086 },
205
206 { USBTV_BASE + 0x00ac, 0x00c0 },
207 { USBTV_BASE + 0x00ad, 0x0000 },
208 { USBTV_BASE + 0x00a2, 0x0012 },
209 { USBTV_BASE + 0x00a3, 0x00e0 },
210 { USBTV_BASE + 0x00a4, 0x0028 },
211 { USBTV_BASE + 0x00a5, 0x0082 },
212 { USBTV_BASE + 0x00a7, 0x0080 },
213 { USBTV_BASE + 0x0000, 0x0014 },
214 { USBTV_BASE + 0x0006, 0x0003 },
215 { USBTV_BASE + 0x0090, 0x0099 },
216 { USBTV_BASE + 0x0091, 0x0090 },
217 { USBTV_BASE + 0x0094, 0x0068 },
218 { USBTV_BASE + 0x0095, 0x0070 },
219 { USBTV_BASE + 0x009c, 0x0030 },
220 { USBTV_BASE + 0x009d, 0x00c0 },
221 { USBTV_BASE + 0x009e, 0x00e0 },
222 { USBTV_BASE + 0x0019, 0x0006 },
223 { USBTV_BASE + 0x008c, 0x00ba },
224 { USBTV_BASE + 0x0101, 0x00ff },
225 { USBTV_BASE + 0x010c, 0x00b3 },
226 { USBTV_BASE + 0x01b2, 0x0080 },
227 { USBTV_BASE + 0x01b4, 0x00a0 },
228 { USBTV_BASE + 0x014c, 0x00ff },
229 { USBTV_BASE + 0x014d, 0x00ca },
230 { USBTV_BASE + 0x0113, 0x0053 },
231 { USBTV_BASE + 0x0119, 0x008a },
232 { USBTV_BASE + 0x013c, 0x0003 },
233 { USBTV_BASE + 0x0150, 0x009c },
234 { USBTV_BASE + 0x0151, 0x0071 },
235 { USBTV_BASE + 0x0152, 0x00c6 },
236 { USBTV_BASE + 0x0153, 0x0084 },
237 { USBTV_BASE + 0x0154, 0x00bc },
238 { USBTV_BASE + 0x0155, 0x00a0 },
239 { USBTV_BASE + 0x0156, 0x00a0 },
240 { USBTV_BASE + 0x0157, 0x009c },
241 { USBTV_BASE + 0x0158, 0x001f },
242 { USBTV_BASE + 0x0159, 0x0006 },
243 { USBTV_BASE + 0x015d, 0x0000 },
244
245 { USBTV_BASE + 0x0284, 0x0088 },
246 { USBTV_BASE + 0x0003, 0x0004 },
247 { USBTV_BASE + 0x0100, 0x00d3 },
248 { USBTV_BASE + 0x0115, 0x0015 },
249 { USBTV_BASE + 0x0220, 0x002e },
250 { USBTV_BASE + 0x0225, 0x0008 },
251 { USBTV_BASE + 0x024e, 0x0002 },
252 { USBTV_BASE + 0x024e, 0x0002 },
253 { USBTV_BASE + 0x024f, 0x0002 },
254 };
255
256 ret = usbtv_set_regs(usbtv, setup, ARRAY_SIZE(setup));
257 if (ret)
258 return ret;
259
260 ret = usbtv_select_norm(usbtv, usbtv->norm);
261 if (ret)
262 return ret;
263
264 ret = usbtv_select_input(usbtv, usbtv->input);
265 if (ret)
266 return ret;
267
268 return 0;
269 }
270
271 /* Copy data from chunk into a frame buffer, deinterlacing the data
272 * into every second line. Unfortunately, they don't align nicely into
273 * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
274 * Therefore, we break down the chunk into two halves before copyting,
275 * so that we can interleave a line if needed. */
276 static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
277 {
278 int half;
279
280 for (half = 0; half < 2; half++) {
281 int part_no = chunk_no * 2 + half;
282 int line = part_no / 3;
283 int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
284
285 u32 *dst = &frame[part_index * USBTV_CHUNK/2];
286 memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
287 src += USBTV_CHUNK/2;
288 }
289 }
290
291 /* Called for each 256-byte image chunk.
292 * First word identifies the chunk, followed by 240 words of image
293 * data and padding. */
294 static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
295 {
296 int frame_id, odd, chunk_no;
297 u32 *frame;
298 struct usbtv_buf *buf;
299 unsigned long flags;
300
301 /* Ignore corrupted lines. */
302 if (!USBTV_MAGIC_OK(chunk))
303 return;
304 frame_id = USBTV_FRAME_ID(chunk);
305 odd = USBTV_ODD(chunk);
306 chunk_no = USBTV_CHUNK_NO(chunk);
307 if (chunk_no >= usbtv->n_chunks)
308 return;
309
310 /* Beginning of a frame. */
311 if (chunk_no == 0) {
312 usbtv->frame_id = frame_id;
313 usbtv->chunks_done = 0;
314 }
315
316 if (usbtv->frame_id != frame_id)
317 return;
318
319 spin_lock_irqsave(&usbtv->buflock, flags);
320 if (list_empty(&usbtv->bufs)) {
321 /* No free buffers. Userspace likely too slow. */
322 spin_unlock_irqrestore(&usbtv->buflock, flags);
323 return;
324 }
325
326 /* First available buffer. */
327 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
328 frame = vb2_plane_vaddr(&buf->vb, 0);
329
330 /* Copy the chunk data. */
331 usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
332 usbtv->chunks_done++;
333
334 /* Last chunk in a frame, signalling an end */
335 if (odd && chunk_no == usbtv->n_chunks-1) {
336 int size = vb2_plane_size(&buf->vb, 0);
337 enum vb2_buffer_state state = usbtv->chunks_done ==
338 usbtv->n_chunks ?
339 VB2_BUF_STATE_DONE :
340 VB2_BUF_STATE_ERROR;
341
342 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
343 buf->vb.v4l2_buf.sequence = usbtv->sequence++;
344 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
345 vb2_set_plane_payload(&buf->vb, 0, size);
346 vb2_buffer_done(&buf->vb, state);
347 list_del(&buf->list);
348 }
349
350 spin_unlock_irqrestore(&usbtv->buflock, flags);
351 }
352
353 /* Got image data. Each packet contains a number of 256-word chunks we
354 * compose the image from. */
355 static void usbtv_iso_cb(struct urb *ip)
356 {
357 int ret;
358 int i;
359 struct usbtv *usbtv = (struct usbtv *)ip->context;
360
361 switch (ip->status) {
362 /* All fine. */
363 case 0:
364 break;
365 /* Device disconnected or capture stopped? */
366 case -ENODEV:
367 case -ENOENT:
368 case -ECONNRESET:
369 case -ESHUTDOWN:
370 return;
371 /* Unknown error. Retry. */
372 default:
373 dev_warn(usbtv->dev, "Bad response for ISO request.\n");
374 goto resubmit;
375 }
376
377 for (i = 0; i < ip->number_of_packets; i++) {
378 int size = ip->iso_frame_desc[i].actual_length;
379 unsigned char *data = ip->transfer_buffer +
380 ip->iso_frame_desc[i].offset;
381 int offset;
382
383 for (offset = 0; USBTV_CHUNK_SIZE * offset < size; offset++)
384 usbtv_image_chunk(usbtv,
385 (u32 *)&data[USBTV_CHUNK_SIZE * offset]);
386 }
387
388 resubmit:
389 ret = usb_submit_urb(ip, GFP_ATOMIC);
390 if (ret < 0)
391 dev_warn(usbtv->dev, "Could not resubmit ISO URB\n");
392 }
393
394 static struct urb *usbtv_setup_iso_transfer(struct usbtv *usbtv)
395 {
396 struct urb *ip;
397 int size = usbtv->iso_size;
398 int i;
399
400 ip = usb_alloc_urb(USBTV_ISOC_PACKETS, GFP_KERNEL);
401 if (ip == NULL)
402 return NULL;
403
404 ip->dev = usbtv->udev;
405 ip->context = usbtv;
406 ip->pipe = usb_rcvisocpipe(usbtv->udev, USBTV_VIDEO_ENDP);
407 ip->interval = 1;
408 ip->transfer_flags = URB_ISO_ASAP;
409 ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS,
410 GFP_KERNEL);
411 ip->complete = usbtv_iso_cb;
412 ip->number_of_packets = USBTV_ISOC_PACKETS;
413 ip->transfer_buffer_length = size * USBTV_ISOC_PACKETS;
414 for (i = 0; i < USBTV_ISOC_PACKETS; i++) {
415 ip->iso_frame_desc[i].offset = size * i;
416 ip->iso_frame_desc[i].length = size;
417 }
418
419 return ip;
420 }
421
422 static void usbtv_stop(struct usbtv *usbtv)
423 {
424 int i;
425 unsigned long flags;
426
427 /* Cancel running transfers. */
428 for (i = 0; i < USBTV_ISOC_TRANSFERS; i++) {
429 struct urb *ip = usbtv->isoc_urbs[i];
430 if (ip == NULL)
431 continue;
432 usb_kill_urb(ip);
433 kfree(ip->transfer_buffer);
434 usb_free_urb(ip);
435 usbtv->isoc_urbs[i] = NULL;
436 }
437
438 /* Return buffers to userspace. */
439 spin_lock_irqsave(&usbtv->buflock, flags);
440 while (!list_empty(&usbtv->bufs)) {
441 struct usbtv_buf *buf = list_first_entry(&usbtv->bufs,
442 struct usbtv_buf, list);
443 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
444 list_del(&buf->list);
445 }
446 spin_unlock_irqrestore(&usbtv->buflock, flags);
447 }
448
449 static int usbtv_start(struct usbtv *usbtv)
450 {
451 int i;
452 int ret;
453
454 ret = usb_set_interface(usbtv->udev, 0, 0);
455 if (ret < 0)
456 return ret;
457
458 ret = usbtv_setup_capture(usbtv);
459 if (ret < 0)
460 return ret;
461
462 ret = usb_set_interface(usbtv->udev, 0, 1);
463 if (ret < 0)
464 return ret;
465
466 for (i = 0; i < USBTV_ISOC_TRANSFERS; i++) {
467 struct urb *ip;
468
469 ip = usbtv_setup_iso_transfer(usbtv);
470 if (ip == NULL) {
471 ret = -ENOMEM;
472 goto start_fail;
473 }
474 usbtv->isoc_urbs[i] = ip;
475
476 ret = usb_submit_urb(ip, GFP_KERNEL);
477 if (ret < 0)
478 goto start_fail;
479 }
480
481 return 0;
482
483 start_fail:
484 usbtv_stop(usbtv);
485 return ret;
486 }
487
488 static int usbtv_querycap(struct file *file, void *priv,
489 struct v4l2_capability *cap)
490 {
491 struct usbtv *dev = video_drvdata(file);
492
493 strlcpy(cap->driver, "usbtv", sizeof(cap->driver));
494 strlcpy(cap->card, "usbtv", sizeof(cap->card));
495 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
496 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE;
497 cap->device_caps |= V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
498 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
499 return 0;
500 }
501
502 static int usbtv_enum_input(struct file *file, void *priv,
503 struct v4l2_input *i)
504 {
505 struct usbtv *dev = video_drvdata(file);
506
507 switch (i->index) {
508 case USBTV_COMPOSITE_INPUT:
509 strlcpy(i->name, "Composite", sizeof(i->name));
510 break;
511 case USBTV_SVIDEO_INPUT:
512 strlcpy(i->name, "S-Video", sizeof(i->name));
513 break;
514 default:
515 return -EINVAL;
516 }
517
518 i->type = V4L2_INPUT_TYPE_CAMERA;
519 i->std = dev->vdev.tvnorms;
520 return 0;
521 }
522
523 static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv,
524 struct v4l2_fmtdesc *f)
525 {
526 if (f->index > 0)
527 return -EINVAL;
528
529 strlcpy(f->description, "16 bpp YUY2, 4:2:2, packed",
530 sizeof(f->description));
531 f->pixelformat = V4L2_PIX_FMT_YUYV;
532 return 0;
533 }
534
535 static int usbtv_fmt_vid_cap(struct file *file, void *priv,
536 struct v4l2_format *f)
537 {
538 struct usbtv *usbtv = video_drvdata(file);
539
540 f->fmt.pix.width = usbtv->width;
541 f->fmt.pix.height = usbtv->height;
542 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
543 f->fmt.pix.field = V4L2_FIELD_INTERLACED;
544 f->fmt.pix.bytesperline = usbtv->width * 2;
545 f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height);
546 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
547
548 return 0;
549 }
550
551 static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
552 {
553 struct usbtv *usbtv = video_drvdata(file);
554 *norm = usbtv->norm;
555 return 0;
556 }
557
558 static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
559 {
560 int ret = -EINVAL;
561 struct usbtv *usbtv = video_drvdata(file);
562
563 if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL))
564 ret = usbtv_select_norm(usbtv, norm);
565
566 return ret;
567 }
568
569 static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
570 {
571 struct usbtv *usbtv = video_drvdata(file);
572 *i = usbtv->input;
573 return 0;
574 }
575
576 static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
577 {
578 struct usbtv *usbtv = video_drvdata(file);
579 return usbtv_select_input(usbtv, i);
580 }
581
582 static struct v4l2_ioctl_ops usbtv_ioctl_ops = {
583 .vidioc_querycap = usbtv_querycap,
584 .vidioc_enum_input = usbtv_enum_input,
585 .vidioc_enum_fmt_vid_cap = usbtv_enum_fmt_vid_cap,
586 .vidioc_g_fmt_vid_cap = usbtv_fmt_vid_cap,
587 .vidioc_try_fmt_vid_cap = usbtv_fmt_vid_cap,
588 .vidioc_s_fmt_vid_cap = usbtv_fmt_vid_cap,
589 .vidioc_g_std = usbtv_g_std,
590 .vidioc_s_std = usbtv_s_std,
591 .vidioc_g_input = usbtv_g_input,
592 .vidioc_s_input = usbtv_s_input,
593
594 .vidioc_reqbufs = vb2_ioctl_reqbufs,
595 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
596 .vidioc_querybuf = vb2_ioctl_querybuf,
597 .vidioc_create_bufs = vb2_ioctl_create_bufs,
598 .vidioc_qbuf = vb2_ioctl_qbuf,
599 .vidioc_dqbuf = vb2_ioctl_dqbuf,
600 .vidioc_streamon = vb2_ioctl_streamon,
601 .vidioc_streamoff = vb2_ioctl_streamoff,
602 };
603
604 static struct v4l2_file_operations usbtv_fops = {
605 .owner = THIS_MODULE,
606 .unlocked_ioctl = video_ioctl2,
607 .mmap = vb2_fop_mmap,
608 .open = v4l2_fh_open,
609 .release = vb2_fop_release,
610 .read = vb2_fop_read,
611 .poll = vb2_fop_poll,
612 };
613
614 static int usbtv_queue_setup(struct vb2_queue *vq,
615 const struct v4l2_format *v4l_fmt, unsigned int *nbuffers,
616 unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
617 {
618 struct usbtv *usbtv = vb2_get_drv_priv(vq);
619
620 if (*nbuffers < 2)
621 *nbuffers = 2;
622 *nplanes = 1;
623 sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
624
625 return 0;
626 }
627
628 static void usbtv_buf_queue(struct vb2_buffer *vb)
629 {
630 struct usbtv *usbtv = vb2_get_drv_priv(vb->vb2_queue);
631 struct usbtv_buf *buf = container_of(vb, struct usbtv_buf, vb);
632 unsigned long flags;
633
634 if (usbtv->udev == NULL) {
635 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
636 return;
637 }
638
639 spin_lock_irqsave(&usbtv->buflock, flags);
640 list_add_tail(&buf->list, &usbtv->bufs);
641 spin_unlock_irqrestore(&usbtv->buflock, flags);
642 }
643
644 static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count)
645 {
646 struct usbtv *usbtv = vb2_get_drv_priv(vq);
647
648 if (usbtv->udev == NULL)
649 return -ENODEV;
650
651 return usbtv_start(usbtv);
652 }
653
654 static void usbtv_stop_streaming(struct vb2_queue *vq)
655 {
656 struct usbtv *usbtv = vb2_get_drv_priv(vq);
657
658 if (usbtv->udev)
659 usbtv_stop(usbtv);
660 }
661
662 static struct vb2_ops usbtv_vb2_ops = {
663 .queue_setup = usbtv_queue_setup,
664 .buf_queue = usbtv_buf_queue,
665 .start_streaming = usbtv_start_streaming,
666 .stop_streaming = usbtv_stop_streaming,
667 };
668
669 static void usbtv_release(struct v4l2_device *v4l2_dev)
670 {
671 struct usbtv *usbtv = container_of(v4l2_dev, struct usbtv, v4l2_dev);
672
673 v4l2_device_unregister(&usbtv->v4l2_dev);
674 vb2_queue_release(&usbtv->vb2q);
675 kfree(usbtv);
676 }
677
678 int usbtv_video_init(struct usbtv *usbtv)
679 {
680 int ret;
681
682 (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60);
683
684 spin_lock_init(&usbtv->buflock);
685 mutex_init(&usbtv->v4l2_lock);
686 mutex_init(&usbtv->vb2q_lock);
687 INIT_LIST_HEAD(&usbtv->bufs);
688
689 /* videobuf2 structure */
690 usbtv->vb2q.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
691 usbtv->vb2q.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
692 usbtv->vb2q.drv_priv = usbtv;
693 usbtv->vb2q.buf_struct_size = sizeof(struct usbtv_buf);
694 usbtv->vb2q.ops = &usbtv_vb2_ops;
695 usbtv->vb2q.mem_ops = &vb2_vmalloc_memops;
696 usbtv->vb2q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
697 usbtv->vb2q.lock = &usbtv->vb2q_lock;
698 ret = vb2_queue_init(&usbtv->vb2q);
699 if (ret < 0) {
700 dev_warn(usbtv->dev, "Could not initialize videobuf2 queue\n");
701 return ret;
702 }
703
704 /* v4l2 structure */
705 usbtv->v4l2_dev.release = usbtv_release;
706 ret = v4l2_device_register(usbtv->dev, &usbtv->v4l2_dev);
707 if (ret < 0) {
708 dev_warn(usbtv->dev, "Could not register v4l2 device\n");
709 goto v4l2_fail;
710 }
711
712 /* Video structure */
713 strlcpy(usbtv->vdev.name, "usbtv", sizeof(usbtv->vdev.name));
714 usbtv->vdev.v4l2_dev = &usbtv->v4l2_dev;
715 usbtv->vdev.release = video_device_release_empty;
716 usbtv->vdev.fops = &usbtv_fops;
717 usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops;
718 usbtv->vdev.tvnorms = USBTV_TV_STD;
719 usbtv->vdev.queue = &usbtv->vb2q;
720 usbtv->vdev.lock = &usbtv->v4l2_lock;
721 set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags);
722 video_set_drvdata(&usbtv->vdev, usbtv);
723 ret = video_register_device(&usbtv->vdev, VFL_TYPE_GRABBER, -1);
724 if (ret < 0) {
725 dev_warn(usbtv->dev, "Could not register video device\n");
726 goto vdev_fail;
727 }
728
729 return 0;
730
731 vdev_fail:
732 v4l2_device_unregister(&usbtv->v4l2_dev);
733 v4l2_fail:
734 vb2_queue_release(&usbtv->vb2q);
735
736 return ret;
737 }
738
739 void usbtv_video_free(struct usbtv *usbtv)
740 {
741 mutex_lock(&usbtv->vb2q_lock);
742 mutex_lock(&usbtv->v4l2_lock);
743
744 usbtv_stop(usbtv);
745 video_unregister_device(&usbtv->vdev);
746 v4l2_device_disconnect(&usbtv->v4l2_dev);
747
748 mutex_unlock(&usbtv->v4l2_lock);
749 mutex_unlock(&usbtv->vb2q_lock);
750
751 v4l2_device_put(&usbtv->v4l2_dev);
752 }
753
754
755
756
757
758 /* LDV_COMMENT_BEGIN_MAIN */
759 #ifdef LDV_MAIN1_sequence_infinite_withcheck_stateful
760
761 /*###########################################################################*/
762
763 /*############## Driver Environment Generator 0.2 output ####################*/
764
765 /*###########################################################################*/
766
767
768
769 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
770 void ldv_check_final_state(void);
771
772 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
773 void ldv_check_return_value(int res);
774
775 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
776 void ldv_check_return_value_probe(int res);
777
778 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
779 void ldv_initialize(void);
780
781 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
782 void ldv_handler_precall(void);
783
784 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
785 int nondet_int(void);
786
787 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
788 int LDV_IN_INTERRUPT;
789
790 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
791 void ldv_main1_sequence_infinite_withcheck_stateful(void) {
792
793
794
795 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
796 /*============================= VARIABLE DECLARATION PART =============================*/
797 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
798 /* content: static int usbtv_querycap(struct file *file, void *priv, struct v4l2_capability *cap)*/
799 /* LDV_COMMENT_END_PREP */
800 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_querycap" */
801 struct file * var_group1;
802 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_querycap" */
803 void * var_usbtv_querycap_10_p1;
804 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_querycap" */
805 struct v4l2_capability * var_usbtv_querycap_10_p2;
806 /* content: static int usbtv_enum_input(struct file *file, void *priv, struct v4l2_input *i)*/
807 /* LDV_COMMENT_END_PREP */
808 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_enum_input" */
809 void * var_usbtv_enum_input_11_p1;
810 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_enum_input" */
811 struct v4l2_input * var_usbtv_enum_input_11_p2;
812 /* content: static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f)*/
813 /* LDV_COMMENT_END_PREP */
814 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_enum_fmt_vid_cap" */
815 void * var_usbtv_enum_fmt_vid_cap_12_p1;
816 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_enum_fmt_vid_cap" */
817 struct v4l2_fmtdesc * var_usbtv_enum_fmt_vid_cap_12_p2;
818 /* content: static int usbtv_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)*/
819 /* LDV_COMMENT_END_PREP */
820 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_fmt_vid_cap" */
821 void * var_usbtv_fmt_vid_cap_13_p1;
822 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_fmt_vid_cap" */
823 struct v4l2_format * var_usbtv_fmt_vid_cap_13_p2;
824 /* content: static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)*/
825 /* LDV_COMMENT_END_PREP */
826 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_g_std" */
827 void * var_usbtv_g_std_14_p1;
828 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_g_std" */
829 v4l2_std_id * var_usbtv_g_std_14_p2;
830 /* content: static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)*/
831 /* LDV_COMMENT_END_PREP */
832 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_s_std" */
833 void * var_usbtv_s_std_15_p1;
834 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_s_std" */
835 v4l2_std_id var_usbtv_s_std_15_p2;
836 /* content: static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)*/
837 /* LDV_COMMENT_END_PREP */
838 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_g_input" */
839 void * var_usbtv_g_input_16_p1;
840 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_g_input" */
841 unsigned int * var_usbtv_g_input_16_p2;
842 /* content: static int usbtv_s_input(struct file *file, void *priv, unsigned int i)*/
843 /* LDV_COMMENT_END_PREP */
844 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_s_input" */
845 void * var_usbtv_s_input_17_p1;
846 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_s_input" */
847 unsigned int var_usbtv_s_input_17_p2;
848
849 /** STRUCT: struct type: vb2_ops, struct name: usbtv_vb2_ops **/
850 /* content: static void usbtv_buf_queue(struct vb2_buffer *vb)*/
851 /* LDV_COMMENT_END_PREP */
852 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_buf_queue" */
853 struct vb2_buffer * var_group2;
854 /* content: static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count)*/
855 /* LDV_COMMENT_END_PREP */
856 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_start_streaming" */
857 struct vb2_queue * var_group3;
858 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "usbtv_start_streaming" */
859 unsigned int var_usbtv_start_streaming_19_p1;
860 /* content: static void usbtv_stop_streaming(struct vb2_queue *vq)*/
861 /* LDV_COMMENT_END_PREP */
862
863
864
865
866 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
867 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
868 /*============================= VARIABLE INITIALIZING PART =============================*/
869 LDV_IN_INTERRUPT=1;
870
871
872
873
874 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
875 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
876 /*============================= FUNCTION CALL SECTION =============================*/
877 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
878 ldv_initialize();
879
880
881
882
883
884 while( nondet_int()
885 ) {
886
887 switch(nondet_int()) {
888
889 case 0: {
890
891 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
892
893
894 /* content: static int usbtv_querycap(struct file *file, void *priv, struct v4l2_capability *cap)*/
895 /* LDV_COMMENT_END_PREP */
896 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_querycap" from driver structure with callbacks "usbtv_ioctl_ops" */
897 ldv_handler_precall();
898 usbtv_querycap( var_group1, var_usbtv_querycap_10_p1, var_usbtv_querycap_10_p2);
899
900
901
902
903 }
904
905 break;
906 case 1: {
907
908 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
909
910
911 /* content: static int usbtv_enum_input(struct file *file, void *priv, struct v4l2_input *i)*/
912 /* LDV_COMMENT_END_PREP */
913 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_enum_input" from driver structure with callbacks "usbtv_ioctl_ops" */
914 ldv_handler_precall();
915 usbtv_enum_input( var_group1, var_usbtv_enum_input_11_p1, var_usbtv_enum_input_11_p2);
916
917
918
919
920 }
921
922 break;
923 case 2: {
924
925 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
926
927
928 /* content: static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f)*/
929 /* LDV_COMMENT_END_PREP */
930 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_enum_fmt_vid_cap" from driver structure with callbacks "usbtv_ioctl_ops" */
931 ldv_handler_precall();
932 usbtv_enum_fmt_vid_cap( var_group1, var_usbtv_enum_fmt_vid_cap_12_p1, var_usbtv_enum_fmt_vid_cap_12_p2);
933
934
935
936
937 }
938
939 break;
940 case 3: {
941
942 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
943
944
945 /* content: static int usbtv_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)*/
946 /* LDV_COMMENT_END_PREP */
947 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_g_fmt_vid_cap" from driver structure with callbacks "usbtv_ioctl_ops" */
948 ldv_handler_precall();
949 usbtv_fmt_vid_cap( var_group1, var_usbtv_fmt_vid_cap_13_p1, var_usbtv_fmt_vid_cap_13_p2);
950
951
952
953
954 }
955
956 break;
957 case 4: {
958
959 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
960
961
962 /* content: static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)*/
963 /* LDV_COMMENT_END_PREP */
964 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_g_std" from driver structure with callbacks "usbtv_ioctl_ops" */
965 ldv_handler_precall();
966 usbtv_g_std( var_group1, var_usbtv_g_std_14_p1, var_usbtv_g_std_14_p2);
967
968
969
970
971 }
972
973 break;
974 case 5: {
975
976 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
977
978
979 /* content: static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)*/
980 /* LDV_COMMENT_END_PREP */
981 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_s_std" from driver structure with callbacks "usbtv_ioctl_ops" */
982 ldv_handler_precall();
983 usbtv_s_std( var_group1, var_usbtv_s_std_15_p1, var_usbtv_s_std_15_p2);
984
985
986
987
988 }
989
990 break;
991 case 6: {
992
993 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
994
995
996 /* content: static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)*/
997 /* LDV_COMMENT_END_PREP */
998 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_g_input" from driver structure with callbacks "usbtv_ioctl_ops" */
999 ldv_handler_precall();
1000 usbtv_g_input( var_group1, var_usbtv_g_input_16_p1, var_usbtv_g_input_16_p2);
1001
1002
1003
1004
1005 }
1006
1007 break;
1008 case 7: {
1009
1010 /** STRUCT: struct type: v4l2_ioctl_ops, struct name: usbtv_ioctl_ops **/
1011
1012
1013 /* content: static int usbtv_s_input(struct file *file, void *priv, unsigned int i)*/
1014 /* LDV_COMMENT_END_PREP */
1015 /* LDV_COMMENT_FUNCTION_CALL Function from field "vidioc_s_input" from driver structure with callbacks "usbtv_ioctl_ops" */
1016 ldv_handler_precall();
1017 usbtv_s_input( var_group1, var_usbtv_s_input_17_p1, var_usbtv_s_input_17_p2);
1018
1019
1020
1021
1022 }
1023
1024 break;
1025 case 8: {
1026
1027 /** STRUCT: struct type: vb2_ops, struct name: usbtv_vb2_ops **/
1028
1029
1030 /* content: static void usbtv_buf_queue(struct vb2_buffer *vb)*/
1031 /* LDV_COMMENT_END_PREP */
1032 /* LDV_COMMENT_FUNCTION_CALL Function from field "buf_queue" from driver structure with callbacks "usbtv_vb2_ops" */
1033 ldv_handler_precall();
1034 usbtv_buf_queue( var_group2);
1035
1036
1037
1038
1039 }
1040
1041 break;
1042 case 9: {
1043
1044 /** STRUCT: struct type: vb2_ops, struct name: usbtv_vb2_ops **/
1045
1046
1047 /* content: static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count)*/
1048 /* LDV_COMMENT_END_PREP */
1049 /* LDV_COMMENT_FUNCTION_CALL Function from field "start_streaming" from driver structure with callbacks "usbtv_vb2_ops" */
1050 ldv_handler_precall();
1051 usbtv_start_streaming( var_group3, var_usbtv_start_streaming_19_p1);
1052
1053
1054
1055
1056 }
1057
1058 break;
1059 case 10: {
1060
1061 /** STRUCT: struct type: vb2_ops, struct name: usbtv_vb2_ops **/
1062
1063
1064 /* content: static void usbtv_stop_streaming(struct vb2_queue *vq)*/
1065 /* LDV_COMMENT_END_PREP */
1066 /* LDV_COMMENT_FUNCTION_CALL Function from field "stop_streaming" from driver structure with callbacks "usbtv_vb2_ops" */
1067 ldv_handler_precall();
1068 usbtv_stop_streaming( var_group3);
1069
1070
1071
1072
1073 }
1074
1075 break;
1076 default: break;
1077
1078 }
1079
1080 }
1081
1082 ldv_module_exit:
1083
1084 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1085 ldv_final: ldv_check_final_state();
1086
1087 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1088 return;
1089
1090 }
1091 #endif
1092
1093 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4
5 #include <linux/usb.h>
6
7 #include <verifier/rcv.h> // For LDV auxiliary routines.
8 #include <kernel-model/ERR.inc>
9
10 // There are 3 possible states of usb device reference counter
11 enum
12 {
13 LDV_USB_DEV_ZERO_STATE = 0, // Usb device reference hasn't been acquired
14 LDV_USB_DEV_ACQUIRED = 1, // Usb device reference acquired
15 LDV_USB_DEV_INCREASED = 2 // Usb device reference counter increased
16 };
17
18 /* LDV_COMMENT_OTHER The model automaton state (one of thee possible ones). */
19 int ldv_usb_dev_state = LDV_USB_DEV_ZERO_STATE;
20
21 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_interface_to_usbdev') Change state state after acquiring a reference to usb_device. */
22 void ldv_interface_to_usbdev(void)
23 {
24 /* LDV_COMMENT_OTHER Initially we suppose this function is used to acquire a reference to usb_device. */
25 if (ldv_usb_dev_state == LDV_USB_DEV_ZERO_STATE)
26 /* LDV_COMMENT_CHANGE_STATE Usb device reference acquired. */
27 ldv_usb_dev_state = LDV_USB_DEV_ACQUIRED;
28 }
29
30 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_get_dev') Change state after increasing the reference counter with usb_get_dev. */
31 void ldv_usb_get_dev(void)
32 {
33 /* LDV_COMMENT_OTHER Here the reference has surely been acquired somewhere. */
34 if (ldv_usb_dev_state < LDV_USB_DEV_ACQUIRED) {
35 /* LDV_COMMENT_CHANGE_STATE The reference has already been acquired. */
36 ldv_usb_dev_state = LDV_USB_DEV_ACQUIRED;
37 }
38 /* LDV_COMMENT_CHANGE_STATE Increase reference counter. */
39 ldv_usb_dev_state++;
40 }
41
42 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_put_dev') Change state after decreasing the reference counter with usb_put_dev. */
43 void ldv_usb_put_dev(void)
44 {
45 /* LDV_COMMENT_ASSERT Check usb device reference counter has been increased. */
46 ldv_assert(ldv_usb_dev_state >= LDV_USB_DEV_INCREASED);
47 /* LDV_COMMENT_CHANGE_STATE Decrease reference counter. */
48 ldv_usb_dev_state--;
49 /* LDV_COMMENT_OTHER LDV_USB_DEV_ACQUIRED is special (for when the one has forgotten to increase the counter). Not intednded to be used here. */
50 if (ldv_usb_dev_state == LDV_USB_DEV_ACQUIRED) {
51 /* LDV_COMMENT_CHANGE_STATE Re-zero the model variable. */
52 ldv_usb_dev_state = LDV_USB_DEV_ZERO_STATE;
53 }
54 }
55
56 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_return_value_probe') Check the probe function leaved the model in the proper state. */
57 void ldv_check_return_value_probe(int retval)
58 {
59 /* LDV_COMMENT_OTHER Probe finished unsuccessfully and returned an error. */
60 if (retval) {
61 /* LDV_COMMENT_ASSERT Check usb device reference counter is not increased. */
62 ldv_assert(ldv_usb_dev_state < LDV_USB_DEV_INCREASED);
63 /* LDV_COMMENT_OTHER LDV_USB_DEV_ACQUIRED is special (for when the one has forgotten to increase the counter). Not this case. */
64 if (ldv_usb_dev_state == LDV_USB_DEV_ACQUIRED)
65 /* LDV_COMMENT_CHANGE_STATE Re-zero the model variable. */
66 ldv_usb_dev_state = LDV_USB_DEV_ZERO_STATE;
67 } // else /* LDV_COMMENT_OTHER Probe finished successfully and returned 0. */
68 // /* LDV_COMMENT_ASSERT Check usb device reference counter is not acquired or has been increased. */
69 // ldv_assert(ldv_usb_dev_state != LDV_USB_DEV_ACQUIRED);
70 }
71
72 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that usb device reference hasn't been acquired or the counter has been decreased. */
73 void ldv_check_final_state(void)
74 {
75 /* LDV_COMMENT_ASSERT Check that usb device reference hasn't been acquired or the counter has been decreased. */
76 ldv_assert(ldv_usb_dev_state < LDV_USB_DEV_INCREASED);
77 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 /* Return nondeterministic negative integer number. */
29 static inline int ldv_undef_int_negative(void)
30 {
31 int ret = ldv_undef_int();
32
33 ldv_assume(ret < 0);
34
35 return ret;
36 }
37 /* Return nondeterministic nonpositive integer number. */
38 static inline int ldv_undef_int_nonpositive(void)
39 {
40 int ret = ldv_undef_int();
41
42 ldv_assume(ret <= 0);
43
44 return ret;
45 }
46
47 /* Add explicit model for __builin_expect GCC function. Without the model a
48 return value will be treated as nondetermined by verifiers. */
49 long __builtin_expect(long exp, long c)
50 {
51 return exp;
52 }
53
54 /* This function causes the program to exit abnormally. GCC implements this
55 function by using a target-dependent mechanism (such as intentionally executing
56 an illegal instruction) or by calling abort. The mechanism used may vary from
57 release to release so you should not rely on any particular implementation.
58 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
59 void __builtin_trap(void)
60 {
61 ldv_assert(0);
62 }
63
64 /* The constant is for simulating an error of ldv_undef_ptr() function. */
65 #define LDV_PTR_MAX 2012
66
67 #endif /* _LDV_RCV_H_ */ 1 /*
2 * device.h - generic, centralized driver model
3 *
4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
6 * Copyright (c) 2008-2009 Novell Inc.
7 *
8 * This file is released under the GPLv2
9 *
10 * See Documentation/driver-model/ for more information.
11 */
12
13 #ifndef _DEVICE_H_
14 #define _DEVICE_H_
15
16 #include <linux/ioport.h>
17 #include <linux/kobject.h>
18 #include <linux/klist.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/compiler.h>
22 #include <linux/types.h>
23 #include <linux/mutex.h>
24 #include <linux/pinctrl/devinfo.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/ratelimit.h>
28 #include <linux/uidgid.h>
29 #include <linux/gfp.h>
30 #include <asm/device.h>
31
32 struct device;
33 struct device_private;
34 struct device_driver;
35 struct driver_private;
36 struct module;
37 struct class;
38 struct subsys_private;
39 struct bus_type;
40 struct device_node;
41 struct iommu_ops;
42 struct iommu_group;
43
44 struct bus_attribute {
45 struct attribute attr;
46 ssize_t (*show)(struct bus_type *bus, char *buf);
47 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
48 };
49
50 #define BUS_ATTR(_name, _mode, _show, _store) \
51 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
52 #define BUS_ATTR_RW(_name) \
53 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
54 #define BUS_ATTR_RO(_name) \
55 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
56
57 extern int __must_check bus_create_file(struct bus_type *,
58 struct bus_attribute *);
59 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
60
61 /**
62 * struct bus_type - The bus type of the device
63 *
64 * @name: The name of the bus.
65 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
66 * @dev_root: Default device to use as the parent.
67 * @dev_attrs: Default attributes of the devices on the bus.
68 * @bus_groups: Default attributes of the bus.
69 * @dev_groups: Default attributes of the devices on the bus.
70 * @drv_groups: Default attributes of the device drivers on the bus.
71 * @match: Called, perhaps multiple times, whenever a new device or driver
72 * is added for this bus. It should return a nonzero value if the
73 * given device can be handled by the given driver.
74 * @uevent: Called when a device is added, removed, or a few other things
75 * that generate uevents to add the environment variables.
76 * @probe: Called when a new device or driver add to this bus, and callback
77 * the specific driver's probe to initial the matched device.
78 * @remove: Called when a device removed from this bus.
79 * @shutdown: Called at shut-down time to quiesce the device.
80 *
81 * @online: Called to put the device back online (after offlining it).
82 * @offline: Called to put the device offline for hot-removal. May fail.
83 *
84 * @suspend: Called when a device on this bus wants to go to sleep mode.
85 * @resume: Called to bring a device on this bus out of sleep mode.
86 * @pm: Power management operations of this bus, callback the specific
87 * device driver's pm-ops.
88 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
89 * driver implementations to a bus and allow the driver to do
90 * bus-specific setup
91 * @p: The private data of the driver core, only the driver core can
92 * touch this.
93 * @lock_key: Lock class key for use by the lock validator
94 *
95 * A bus is a channel between the processor and one or more devices. For the
96 * purposes of the device model, all devices are connected via a bus, even if
97 * it is an internal, virtual, "platform" bus. Buses can plug into each other.
98 * A USB controller is usually a PCI device, for example. The device model
99 * represents the actual connections between buses and the devices they control.
100 * A bus is represented by the bus_type structure. It contains the name, the
101 * default attributes, the bus' methods, PM operations, and the driver core's
102 * private data.
103 */
104 struct bus_type {
105 const char *name;
106 const char *dev_name;
107 struct device *dev_root;
108 struct device_attribute *dev_attrs; /* use dev_groups instead */
109 const struct attribute_group **bus_groups;
110 const struct attribute_group **dev_groups;
111 const struct attribute_group **drv_groups;
112
113 int (*match)(struct device *dev, struct device_driver *drv);
114 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
115 int (*probe)(struct device *dev);
116 int (*remove)(struct device *dev);
117 void (*shutdown)(struct device *dev);
118
119 int (*online)(struct device *dev);
120 int (*offline)(struct device *dev);
121
122 int (*suspend)(struct device *dev, pm_message_t state);
123 int (*resume)(struct device *dev);
124
125 const struct dev_pm_ops *pm;
126
127 struct iommu_ops *iommu_ops;
128
129 struct subsys_private *p;
130 struct lock_class_key lock_key;
131 };
132
133 extern int __must_check bus_register(struct bus_type *bus);
134
135 extern void bus_unregister(struct bus_type *bus);
136
137 extern int __must_check bus_rescan_devices(struct bus_type *bus);
138
139 /* iterator helpers for buses */
140 struct subsys_dev_iter {
141 struct klist_iter ki;
142 const struct device_type *type;
143 };
144 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
145 struct bus_type *subsys,
146 struct device *start,
147 const struct device_type *type);
148 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
149 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
150
151 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
152 int (*fn)(struct device *dev, void *data));
153 struct device *bus_find_device(struct bus_type *bus, struct device *start,
154 void *data,
155 int (*match)(struct device *dev, void *data));
156 struct device *bus_find_device_by_name(struct bus_type *bus,
157 struct device *start,
158 const char *name);
159 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
160 struct device *hint);
161 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
162 void *data, int (*fn)(struct device_driver *, void *));
163 void bus_sort_breadthfirst(struct bus_type *bus,
164 int (*compare)(const struct device *a,
165 const struct device *b));
166 /*
167 * Bus notifiers: Get notified of addition/removal of devices
168 * and binding/unbinding of drivers to devices.
169 * In the long run, it should be a replacement for the platform
170 * notify hooks.
171 */
172 struct notifier_block;
173
174 extern int bus_register_notifier(struct bus_type *bus,
175 struct notifier_block *nb);
176 extern int bus_unregister_notifier(struct bus_type *bus,
177 struct notifier_block *nb);
178
179 /* All 4 notifers below get called with the target struct device *
180 * as an argument. Note that those functions are likely to be called
181 * with the device lock held in the core, so be careful.
182 */
183 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
184 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
185 #define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
186 bound */
187 #define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
188 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
189 unbound */
190 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
191 from the device */
192
193 extern struct kset *bus_get_kset(struct bus_type *bus);
194 extern struct klist *bus_get_device_klist(struct bus_type *bus);
195
196 /**
197 * struct device_driver - The basic device driver structure
198 * @name: Name of the device driver.
199 * @bus: The bus which the device of this driver belongs to.
200 * @owner: The module owner.
201 * @mod_name: Used for built-in modules.
202 * @suppress_bind_attrs: Disables bind/unbind via sysfs.
203 * @of_match_table: The open firmware table.
204 * @acpi_match_table: The ACPI match table.
205 * @probe: Called to query the existence of a specific device,
206 * whether this driver can work with it, and bind the driver
207 * to a specific device.
208 * @remove: Called when the device is removed from the system to
209 * unbind a device from this driver.
210 * @shutdown: Called at shut-down time to quiesce the device.
211 * @suspend: Called to put the device to sleep mode. Usually to a
212 * low power state.
213 * @resume: Called to bring a device from sleep mode.
214 * @groups: Default attributes that get created by the driver core
215 * automatically.
216 * @pm: Power management operations of the device which matched
217 * this driver.
218 * @p: Driver core's private data, no one other than the driver
219 * core can touch this.
220 *
221 * The device driver-model tracks all of the drivers known to the system.
222 * The main reason for this tracking is to enable the driver core to match
223 * up drivers with new devices. Once drivers are known objects within the
224 * system, however, a number of other things become possible. Device drivers
225 * can export information and configuration variables that are independent
226 * of any specific device.
227 */
228 struct device_driver {
229 const char *name;
230 struct bus_type *bus;
231
232 struct module *owner;
233 const char *mod_name; /* used for built-in modules */
234
235 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
236
237 const struct of_device_id *of_match_table;
238 const struct acpi_device_id *acpi_match_table;
239
240 int (*probe) (struct device *dev);
241 int (*remove) (struct device *dev);
242 void (*shutdown) (struct device *dev);
243 int (*suspend) (struct device *dev, pm_message_t state);
244 int (*resume) (struct device *dev);
245 const struct attribute_group **groups;
246
247 const struct dev_pm_ops *pm;
248
249 struct driver_private *p;
250 };
251
252
253 extern int __must_check driver_register(struct device_driver *drv);
254 extern void driver_unregister(struct device_driver *drv);
255
256 extern struct device_driver *driver_find(const char *name,
257 struct bus_type *bus);
258 extern int driver_probe_done(void);
259 extern void wait_for_device_probe(void);
260
261
262 /* sysfs interface for exporting driver attributes */
263
264 struct driver_attribute {
265 struct attribute attr;
266 ssize_t (*show)(struct device_driver *driver, char *buf);
267 ssize_t (*store)(struct device_driver *driver, const char *buf,
268 size_t count);
269 };
270
271 #define DRIVER_ATTR(_name, _mode, _show, _store) \
272 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
273 #define DRIVER_ATTR_RW(_name) \
274 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
275 #define DRIVER_ATTR_RO(_name) \
276 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
277 #define DRIVER_ATTR_WO(_name) \
278 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
279
280 extern int __must_check driver_create_file(struct device_driver *driver,
281 const struct driver_attribute *attr);
282 extern void driver_remove_file(struct device_driver *driver,
283 const struct driver_attribute *attr);
284
285 extern int __must_check driver_for_each_device(struct device_driver *drv,
286 struct device *start,
287 void *data,
288 int (*fn)(struct device *dev,
289 void *));
290 struct device *driver_find_device(struct device_driver *drv,
291 struct device *start, void *data,
292 int (*match)(struct device *dev, void *data));
293
294 /**
295 * struct subsys_interface - interfaces to device functions
296 * @name: name of the device function
297 * @subsys: subsytem of the devices to attach to
298 * @node: the list of functions registered at the subsystem
299 * @add_dev: device hookup to device function handler
300 * @remove_dev: device hookup to device function handler
301 *
302 * Simple interfaces attached to a subsystem. Multiple interfaces can
303 * attach to a subsystem and its devices. Unlike drivers, they do not
304 * exclusively claim or control devices. Interfaces usually represent
305 * a specific functionality of a subsystem/class of devices.
306 */
307 struct subsys_interface {
308 const char *name;
309 struct bus_type *subsys;
310 struct list_head node;
311 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
312 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
313 };
314
315 int subsys_interface_register(struct subsys_interface *sif);
316 void subsys_interface_unregister(struct subsys_interface *sif);
317
318 int subsys_system_register(struct bus_type *subsys,
319 const struct attribute_group **groups);
320 int subsys_virtual_register(struct bus_type *subsys,
321 const struct attribute_group **groups);
322
323 /**
324 * struct class - device classes
325 * @name: Name of the class.
326 * @owner: The module owner.
327 * @class_attrs: Default attributes of this class.
328 * @dev_groups: Default attributes of the devices that belong to the class.
329 * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
330 * @dev_uevent: Called when a device is added, removed from this class, or a
331 * few other things that generate uevents to add the environment
332 * variables.
333 * @devnode: Callback to provide the devtmpfs.
334 * @class_release: Called to release this class.
335 * @dev_release: Called to release the device.
336 * @suspend: Used to put the device to sleep mode, usually to a low power
337 * state.
338 * @resume: Used to bring the device from the sleep mode.
339 * @ns_type: Callbacks so sysfs can detemine namespaces.
340 * @namespace: Namespace of the device belongs to this class.
341 * @pm: The default device power management operations of this class.
342 * @p: The private data of the driver core, no one other than the
343 * driver core can touch this.
344 *
345 * A class is a higher-level view of a device that abstracts out low-level
346 * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
347 * at the class level, they are all simply disks. Classes allow user space
348 * to work with devices based on what they do, rather than how they are
349 * connected or how they work.
350 */
351 struct class {
352 const char *name;
353 struct module *owner;
354
355 struct class_attribute *class_attrs;
356 const struct attribute_group **dev_groups;
357 struct kobject *dev_kobj;
358
359 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
360 char *(*devnode)(struct device *dev, umode_t *mode);
361
362 void (*class_release)(struct class *class);
363 void (*dev_release)(struct device *dev);
364
365 int (*suspend)(struct device *dev, pm_message_t state);
366 int (*resume)(struct device *dev);
367
368 const struct kobj_ns_type_operations *ns_type;
369 const void *(*namespace)(struct device *dev);
370
371 const struct dev_pm_ops *pm;
372
373 struct subsys_private *p;
374 };
375
376 struct class_dev_iter {
377 struct klist_iter ki;
378 const struct device_type *type;
379 };
380
381 extern struct kobject *sysfs_dev_block_kobj;
382 extern struct kobject *sysfs_dev_char_kobj;
383 extern int __must_check __class_register(struct class *class,
384 struct lock_class_key *key);
385 extern void class_unregister(struct class *class);
386
387 /* This is a #define to keep the compiler from merging different
388 * instances of the __key variable */
389 #define class_register(class) \
390 ({ \
391 static struct lock_class_key __key; \
392 __class_register(class, &__key); \
393 })
394
395 struct class_compat;
396 struct class_compat *class_compat_register(const char *name);
397 void class_compat_unregister(struct class_compat *cls);
398 int class_compat_create_link(struct class_compat *cls, struct device *dev,
399 struct device *device_link);
400 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
401 struct device *device_link);
402
403 extern void class_dev_iter_init(struct class_dev_iter *iter,
404 struct class *class,
405 struct device *start,
406 const struct device_type *type);
407 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
408 extern void class_dev_iter_exit(struct class_dev_iter *iter);
409
410 extern int class_for_each_device(struct class *class, struct device *start,
411 void *data,
412 int (*fn)(struct device *dev, void *data));
413 extern struct device *class_find_device(struct class *class,
414 struct device *start, const void *data,
415 int (*match)(struct device *, const void *));
416
417 struct class_attribute {
418 struct attribute attr;
419 ssize_t (*show)(struct class *class, struct class_attribute *attr,
420 char *buf);
421 ssize_t (*store)(struct class *class, struct class_attribute *attr,
422 const char *buf, size_t count);
423 };
424
425 #define CLASS_ATTR(_name, _mode, _show, _store) \
426 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
427 #define CLASS_ATTR_RW(_name) \
428 struct class_attribute class_attr_##_name = __ATTR_RW(_name)
429 #define CLASS_ATTR_RO(_name) \
430 struct class_attribute class_attr_##_name = __ATTR_RO(_name)
431
432 extern int __must_check class_create_file_ns(struct class *class,
433 const struct class_attribute *attr,
434 const void *ns);
435 extern void class_remove_file_ns(struct class *class,
436 const struct class_attribute *attr,
437 const void *ns);
438
439 static inline int __must_check class_create_file(struct class *class,
440 const struct class_attribute *attr)
441 {
442 return class_create_file_ns(class, attr, NULL);
443 }
444
445 static inline void class_remove_file(struct class *class,
446 const struct class_attribute *attr)
447 {
448 return class_remove_file_ns(class, attr, NULL);
449 }
450
451 /* Simple class attribute that is just a static string */
452 struct class_attribute_string {
453 struct class_attribute attr;
454 char *str;
455 };
456
457 /* Currently read-only only */
458 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
459 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
460 #define CLASS_ATTR_STRING(_name, _mode, _str) \
461 struct class_attribute_string class_attr_##_name = \
462 _CLASS_ATTR_STRING(_name, _mode, _str)
463
464 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
465 char *buf);
466
467 struct class_interface {
468 struct list_head node;
469 struct class *class;
470
471 int (*add_dev) (struct device *, struct class_interface *);
472 void (*remove_dev) (struct device *, struct class_interface *);
473 };
474
475 extern int __must_check class_interface_register(struct class_interface *);
476 extern void class_interface_unregister(struct class_interface *);
477
478 extern struct class * __must_check __class_create(struct module *owner,
479 const char *name,
480 struct lock_class_key *key);
481 extern void class_destroy(struct class *cls);
482
483 /* This is a #define to keep the compiler from merging different
484 * instances of the __key variable */
485 #define class_create(owner, name) \
486 ({ \
487 static struct lock_class_key __key; \
488 __class_create(owner, name, &__key); \
489 })
490
491 /*
492 * The type of device, "struct device" is embedded in. A class
493 * or bus can contain devices of different types
494 * like "partitions" and "disks", "mouse" and "event".
495 * This identifies the device type and carries type-specific
496 * information, equivalent to the kobj_type of a kobject.
497 * If "name" is specified, the uevent will contain it in
498 * the DEVTYPE variable.
499 */
500 struct device_type {
501 const char *name;
502 const struct attribute_group **groups;
503 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
504 char *(*devnode)(struct device *dev, umode_t *mode,
505 kuid_t *uid, kgid_t *gid);
506 void (*release)(struct device *dev);
507
508 const struct dev_pm_ops *pm;
509 };
510
511 /* interface for exporting device attributes */
512 struct device_attribute {
513 struct attribute attr;
514 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
515 char *buf);
516 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
517 const char *buf, size_t count);
518 };
519
520 struct dev_ext_attribute {
521 struct device_attribute attr;
522 void *var;
523 };
524
525 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
526 char *buf);
527 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
528 const char *buf, size_t count);
529 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
530 char *buf);
531 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
532 const char *buf, size_t count);
533 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
534 char *buf);
535 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
536 const char *buf, size_t count);
537
538 #define DEVICE_ATTR(_name, _mode, _show, _store) \
539 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
540 #define DEVICE_ATTR_RW(_name) \
541 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
542 #define DEVICE_ATTR_RO(_name) \
543 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
544 #define DEVICE_ATTR_WO(_name) \
545 struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
546 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
547 struct dev_ext_attribute dev_attr_##_name = \
548 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
549 #define DEVICE_INT_ATTR(_name, _mode, _var) \
550 struct dev_ext_attribute dev_attr_##_name = \
551 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
552 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
553 struct dev_ext_attribute dev_attr_##_name = \
554 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
555 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
556 struct device_attribute dev_attr_##_name = \
557 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
558
559 extern int device_create_file(struct device *device,
560 const struct device_attribute *entry);
561 extern void device_remove_file(struct device *dev,
562 const struct device_attribute *attr);
563 extern bool device_remove_file_self(struct device *dev,
564 const struct device_attribute *attr);
565 extern int __must_check device_create_bin_file(struct device *dev,
566 const struct bin_attribute *attr);
567 extern void device_remove_bin_file(struct device *dev,
568 const struct bin_attribute *attr);
569
570 /* device resource management */
571 typedef void (*dr_release_t)(struct device *dev, void *res);
572 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
573
574 #ifdef CONFIG_DEBUG_DEVRES
575 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
576 const char *name);
577 #define devres_alloc(release, size, gfp) \
578 __devres_alloc(release, size, gfp, #release)
579 #else
580 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
581 #endif
582 extern void devres_for_each_res(struct device *dev, dr_release_t release,
583 dr_match_t match, void *match_data,
584 void (*fn)(struct device *, void *, void *),
585 void *data);
586 extern void devres_free(void *res);
587 extern void devres_add(struct device *dev, void *res);
588 extern void *devres_find(struct device *dev, dr_release_t release,
589 dr_match_t match, void *match_data);
590 extern void *devres_get(struct device *dev, void *new_res,
591 dr_match_t match, void *match_data);
592 extern void *devres_remove(struct device *dev, dr_release_t release,
593 dr_match_t match, void *match_data);
594 extern int devres_destroy(struct device *dev, dr_release_t release,
595 dr_match_t match, void *match_data);
596 extern int devres_release(struct device *dev, dr_release_t release,
597 dr_match_t match, void *match_data);
598
599 /* devres group */
600 extern void * __must_check devres_open_group(struct device *dev, void *id,
601 gfp_t gfp);
602 extern void devres_close_group(struct device *dev, void *id);
603 extern void devres_remove_group(struct device *dev, void *id);
604 extern int devres_release_group(struct device *dev, void *id);
605
606 /* managed devm_k.alloc/kfree for device drivers */
607 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
608 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
609 {
610 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
611 }
612 static inline void *devm_kmalloc_array(struct device *dev,
613 size_t n, size_t size, gfp_t flags)
614 {
615 if (size != 0 && n > SIZE_MAX / size)
616 return NULL;
617 return devm_kmalloc(dev, n * size, flags);
618 }
619 static inline void *devm_kcalloc(struct device *dev,
620 size_t n, size_t size, gfp_t flags)
621 {
622 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
623 }
624 extern void devm_kfree(struct device *dev, void *p);
625 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
626 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
627 gfp_t gfp);
628
629 extern unsigned long devm_get_free_pages(struct device *dev,
630 gfp_t gfp_mask, unsigned int order);
631 extern void devm_free_pages(struct device *dev, unsigned long addr);
632
633 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
634 void __iomem *devm_request_and_ioremap(struct device *dev,
635 struct resource *res);
636
637 /* allows to add/remove a custom action to devres stack */
638 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
639 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
640
641 struct device_dma_parameters {
642 /*
643 * a low level driver may set these to teach IOMMU code about
644 * sg limitations.
645 */
646 unsigned int max_segment_size;
647 unsigned long segment_boundary_mask;
648 };
649
650 struct acpi_device;
651
652 struct acpi_dev_node {
653 #ifdef CONFIG_ACPI
654 struct acpi_device *companion;
655 #endif
656 };
657
658 /**
659 * struct device - The basic device structure
660 * @parent: The device's "parent" device, the device to which it is attached.
661 * In most cases, a parent device is some sort of bus or host
662 * controller. If parent is NULL, the device, is a top-level device,
663 * which is not usually what you want.
664 * @p: Holds the private data of the driver core portions of the device.
665 * See the comment of the struct device_private for detail.
666 * @kobj: A top-level, abstract class from which other classes are derived.
667 * @init_name: Initial name of the device.
668 * @type: The type of device.
669 * This identifies the device type and carries type-specific
670 * information.
671 * @mutex: Mutex to synchronize calls to its driver.
672 * @bus: Type of bus device is on.
673 * @driver: Which driver has allocated this
674 * @platform_data: Platform data specific to the device.
675 * Example: For devices on custom boards, as typical of embedded
676 * and SOC based hardware, Linux often uses platform_data to point
677 * to board-specific structures describing devices and how they
678 * are wired. That can include what ports are available, chip
679 * variants, which GPIO pins act in what additional roles, and so
680 * on. This shrinks the "Board Support Packages" (BSPs) and
681 * minimizes board-specific #ifdefs in drivers.
682 * @driver_data: Private pointer for driver specific info.
683 * @power: For device power management.
684 * See Documentation/power/devices.txt for details.
685 * @pm_domain: Provide callbacks that are executed during system suspend,
686 * hibernation, system resume and during runtime PM transitions
687 * along with subsystem-level and driver-level callbacks.
688 * @pins: For device pin management.
689 * See Documentation/pinctrl.txt for details.
690 * @numa_node: NUMA node this device is close to.
691 * @dma_mask: Dma mask (if dma'ble device).
692 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
693 * hardware supports 64-bit addresses for consistent allocations
694 * such descriptors.
695 * @dma_pfn_offset: offset of DMA memory range relatively of RAM
696 * @dma_parms: A low level driver may set these to teach IOMMU code about
697 * segment limitations.
698 * @dma_pools: Dma pools (if dma'ble device).
699 * @dma_mem: Internal for coherent mem override.
700 * @cma_area: Contiguous memory area for dma allocations
701 * @archdata: For arch-specific additions.
702 * @of_node: Associated device tree node.
703 * @acpi_node: Associated ACPI device node.
704 * @devt: For creating the sysfs "dev".
705 * @id: device instance
706 * @devres_lock: Spinlock to protect the resource of the device.
707 * @devres_head: The resources list of the device.
708 * @knode_class: The node used to add the device to the class list.
709 * @class: The class of the device.
710 * @groups: Optional attribute groups.
711 * @release: Callback to free the device after all references have
712 * gone away. This should be set by the allocator of the
713 * device (i.e. the bus driver that discovered the device).
714 * @iommu_group: IOMMU group the device belongs to.
715 *
716 * @offline_disabled: If set, the device is permanently online.
717 * @offline: Set after successful invocation of bus type's .offline().
718 *
719 * At the lowest level, every device in a Linux system is represented by an
720 * instance of struct device. The device structure contains the information
721 * that the device model core needs to model the system. Most subsystems,
722 * however, track additional information about the devices they host. As a
723 * result, it is rare for devices to be represented by bare device structures;
724 * instead, that structure, like kobject structures, is usually embedded within
725 * a higher-level representation of the device.
726 */
727 struct device {
728 struct device *parent;
729
730 struct device_private *p;
731
732 struct kobject kobj;
733 const char *init_name; /* initial name of the device */
734 const struct device_type *type;
735
736 struct mutex mutex; /* mutex to synchronize calls to
737 * its driver.
738 */
739
740 struct bus_type *bus; /* type of bus device is on */
741 struct device_driver *driver; /* which driver has allocated this
742 device */
743 void *platform_data; /* Platform specific data, device
744 core doesn't touch it */
745 void *driver_data; /* Driver data, set and get with
746 dev_set/get_drvdata */
747 struct dev_pm_info power;
748 struct dev_pm_domain *pm_domain;
749
750 #ifdef CONFIG_PINCTRL
751 struct dev_pin_info *pins;
752 #endif
753
754 #ifdef CONFIG_NUMA
755 int numa_node; /* NUMA node this device is close to */
756 #endif
757 u64 *dma_mask; /* dma mask (if dma'able device) */
758 u64 coherent_dma_mask;/* Like dma_mask, but for
759 alloc_coherent mappings as
760 not all hardware supports
761 64 bit addresses for consistent
762 allocations such descriptors. */
763 unsigned long dma_pfn_offset;
764
765 struct device_dma_parameters *dma_parms;
766
767 struct list_head dma_pools; /* dma pools (if dma'ble) */
768
769 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
770 override */
771 #ifdef CONFIG_DMA_CMA
772 struct cma *cma_area; /* contiguous memory area for dma
773 allocations */
774 #endif
775 /* arch specific additions */
776 struct dev_archdata archdata;
777
778 struct device_node *of_node; /* associated device tree node */
779 struct acpi_dev_node acpi_node; /* associated ACPI device node */
780
781 dev_t devt; /* dev_t, creates the sysfs "dev" */
782 u32 id; /* device instance */
783
784 spinlock_t devres_lock;
785 struct list_head devres_head;
786
787 struct klist_node knode_class;
788 struct class *class;
789 const struct attribute_group **groups; /* optional groups */
790
791 void (*release)(struct device *dev);
792 struct iommu_group *iommu_group;
793
794 bool offline_disabled:1;
795 bool offline:1;
796 };
797
798 static inline struct device *kobj_to_dev(struct kobject *kobj)
799 {
800 return container_of(kobj, struct device, kobj);
801 }
802
803 /* Get the wakeup routines, which depend on struct device */
804 #include <linux/pm_wakeup.h>
805
806 static inline const char *dev_name(const struct device *dev)
807 {
808 /* Use the init name until the kobject becomes available */
809 if (dev->init_name)
810 return dev->init_name;
811
812 return kobject_name(&dev->kobj);
813 }
814
815 extern __printf(2, 3)
816 int dev_set_name(struct device *dev, const char *name, ...);
817
818 #ifdef CONFIG_NUMA
819 static inline int dev_to_node(struct device *dev)
820 {
821 return dev->numa_node;
822 }
823 static inline void set_dev_node(struct device *dev, int node)
824 {
825 dev->numa_node = node;
826 }
827 #else
828 static inline int dev_to_node(struct device *dev)
829 {
830 return -1;
831 }
832 static inline void set_dev_node(struct device *dev, int node)
833 {
834 }
835 #endif
836
837 static inline void *dev_get_drvdata(const struct device *dev)
838 {
839 return dev->driver_data;
840 }
841
842 static inline void dev_set_drvdata(struct device *dev, void *data)
843 {
844 dev->driver_data = data;
845 }
846
847 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
848 {
849 return dev ? dev->power.subsys_data : NULL;
850 }
851
852 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
853 {
854 return dev->kobj.uevent_suppress;
855 }
856
857 static inline void dev_set_uevent_suppress(struct device *dev, int val)
858 {
859 dev->kobj.uevent_suppress = val;
860 }
861
862 static inline int device_is_registered(struct device *dev)
863 {
864 return dev->kobj.state_in_sysfs;
865 }
866
867 static inline void device_enable_async_suspend(struct device *dev)
868 {
869 if (!dev->power.is_prepared)
870 dev->power.async_suspend = true;
871 }
872
873 static inline void device_disable_async_suspend(struct device *dev)
874 {
875 if (!dev->power.is_prepared)
876 dev->power.async_suspend = false;
877 }
878
879 static inline bool device_async_suspend_enabled(struct device *dev)
880 {
881 return !!dev->power.async_suspend;
882 }
883
884 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
885 {
886 dev->power.ignore_children = enable;
887 }
888
889 static inline void dev_pm_syscore_device(struct device *dev, bool val)
890 {
891 #ifdef CONFIG_PM_SLEEP
892 dev->power.syscore = val;
893 #endif
894 }
895
896 static inline void device_lock(struct device *dev)
897 {
898 mutex_lock(&dev->mutex);
899 }
900
901 static inline int device_trylock(struct device *dev)
902 {
903 return mutex_trylock(&dev->mutex);
904 }
905
906 static inline void device_unlock(struct device *dev)
907 {
908 mutex_unlock(&dev->mutex);
909 }
910
911 void driver_init(void);
912
913 /*
914 * High level routines for use by the bus drivers
915 */
916 extern int __must_check device_register(struct device *dev);
917 extern void device_unregister(struct device *dev);
918 extern void device_initialize(struct device *dev);
919 extern int __must_check device_add(struct device *dev);
920 extern void device_del(struct device *dev);
921 extern int device_for_each_child(struct device *dev, void *data,
922 int (*fn)(struct device *dev, void *data));
923 extern struct device *device_find_child(struct device *dev, void *data,
924 int (*match)(struct device *dev, void *data));
925 extern int device_rename(struct device *dev, const char *new_name);
926 extern int device_move(struct device *dev, struct device *new_parent,
927 enum dpm_order dpm_order);
928 extern const char *device_get_devnode(struct device *dev,
929 umode_t *mode, kuid_t *uid, kgid_t *gid,
930 const char **tmp);
931
932 static inline bool device_supports_offline(struct device *dev)
933 {
934 return dev->bus && dev->bus->offline && dev->bus->online;
935 }
936
937 extern void lock_device_hotplug(void);
938 extern void unlock_device_hotplug(void);
939 extern int lock_device_hotplug_sysfs(void);
940 extern int device_offline(struct device *dev);
941 extern int device_online(struct device *dev);
942 /*
943 * Root device objects for grouping under /sys/devices
944 */
945 extern struct device *__root_device_register(const char *name,
946 struct module *owner);
947
948 /* This is a macro to avoid include problems with THIS_MODULE */
949 #define root_device_register(name) \
950 __root_device_register(name, THIS_MODULE)
951
952 extern void root_device_unregister(struct device *root);
953
954 static inline void *dev_get_platdata(const struct device *dev)
955 {
956 return dev->platform_data;
957 }
958
959 /*
960 * Manual binding of a device to driver. See drivers/base/bus.c
961 * for information on use.
962 */
963 extern int __must_check device_bind_driver(struct device *dev);
964 extern void device_release_driver(struct device *dev);
965 extern int __must_check device_attach(struct device *dev);
966 extern int __must_check driver_attach(struct device_driver *drv);
967 extern int __must_check device_reprobe(struct device *dev);
968
969 /*
970 * Easy functions for dynamically creating devices on the fly
971 */
972 extern struct device *device_create_vargs(struct class *cls,
973 struct device *parent,
974 dev_t devt,
975 void *drvdata,
976 const char *fmt,
977 va_list vargs);
978 extern __printf(5, 6)
979 struct device *device_create(struct class *cls, struct device *parent,
980 dev_t devt, void *drvdata,
981 const char *fmt, ...);
982 extern __printf(6, 7)
983 struct device *device_create_with_groups(struct class *cls,
984 struct device *parent, dev_t devt, void *drvdata,
985 const struct attribute_group **groups,
986 const char *fmt, ...);
987 extern void device_destroy(struct class *cls, dev_t devt);
988
989 /*
990 * Platform "fixup" functions - allow the platform to have their say
991 * about devices and actions that the general device layer doesn't
992 * know about.
993 */
994 /* Notify platform of device discovery */
995 extern int (*platform_notify)(struct device *dev);
996
997 extern int (*platform_notify_remove)(struct device *dev);
998
999
1000 /*
1001 * get_device - atomically increment the reference count for the device.
1002 *
1003 */
1004 extern struct device *get_device(struct device *dev);
1005 extern void put_device(struct device *dev);
1006
1007 #ifdef CONFIG_DEVTMPFS
1008 extern int devtmpfs_create_node(struct device *dev);
1009 extern int devtmpfs_delete_node(struct device *dev);
1010 extern int devtmpfs_mount(const char *mntdir);
1011 #else
1012 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
1013 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
1014 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
1015 #endif
1016
1017 /* drivers/base/power/shutdown.c */
1018 extern void device_shutdown(void);
1019
1020 /* debugging and troubleshooting/diagnostic helpers. */
1021 extern const char *dev_driver_string(const struct device *dev);
1022
1023
1024 #ifdef CONFIG_PRINTK
1025
1026 extern __printf(3, 0)
1027 int dev_vprintk_emit(int level, const struct device *dev,
1028 const char *fmt, va_list args);
1029 extern __printf(3, 4)
1030 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
1031
1032 extern __printf(3, 4)
1033 int dev_printk(const char *level, const struct device *dev,
1034 const char *fmt, ...);
1035 extern __printf(2, 3)
1036 int dev_emerg(const struct device *dev, const char *fmt, ...);
1037 extern __printf(2, 3)
1038 int dev_alert(const struct device *dev, const char *fmt, ...);
1039 extern __printf(2, 3)
1040 int dev_crit(const struct device *dev, const char *fmt, ...);
1041 extern __printf(2, 3)
1042 int dev_err(const struct device *dev, const char *fmt, ...);
1043 extern __printf(2, 3)
1044 int dev_warn(const struct device *dev, const char *fmt, ...);
1045 extern __printf(2, 3)
1046 int dev_notice(const struct device *dev, const char *fmt, ...);
1047 extern __printf(2, 3)
1048 int _dev_info(const struct device *dev, const char *fmt, ...);
1049
1050 #else
1051
1052 static inline __printf(3, 0)
1053 int dev_vprintk_emit(int level, const struct device *dev,
1054 const char *fmt, va_list args)
1055 { return 0; }
1056 static inline __printf(3, 4)
1057 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
1058 { return 0; }
1059
1060 static inline int __dev_printk(const char *level, const struct device *dev,
1061 struct va_format *vaf)
1062 { return 0; }
1063 static inline __printf(3, 4)
1064 int dev_printk(const char *level, const struct device *dev,
1065 const char *fmt, ...)
1066 { return 0; }
1067
1068 static inline __printf(2, 3)
1069 int dev_emerg(const struct device *dev, const char *fmt, ...)
1070 { return 0; }
1071 static inline __printf(2, 3)
1072 int dev_crit(const struct device *dev, const char *fmt, ...)
1073 { return 0; }
1074 static inline __printf(2, 3)
1075 int dev_alert(const struct device *dev, const char *fmt, ...)
1076 { return 0; }
1077 static inline __printf(2, 3)
1078 int dev_err(const struct device *dev, const char *fmt, ...)
1079 { return 0; }
1080 static inline __printf(2, 3)
1081 int dev_warn(const struct device *dev, const char *fmt, ...)
1082 { return 0; }
1083 static inline __printf(2, 3)
1084 int dev_notice(const struct device *dev, const char *fmt, ...)
1085 { return 0; }
1086 static inline __printf(2, 3)
1087 int _dev_info(const struct device *dev, const char *fmt, ...)
1088 { return 0; }
1089
1090 #endif
1091
1092 /*
1093 * Stupid hackaround for existing uses of non-printk uses dev_info
1094 *
1095 * Note that the definition of dev_info below is actually _dev_info
1096 * and a macro is used to avoid redefining dev_info
1097 */
1098
1099 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
1100
1101 #if defined(CONFIG_DYNAMIC_DEBUG)
1102 #define dev_dbg(dev, format, ...) \
1103 do { \
1104 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
1105 } while (0)
1106 #elif defined(DEBUG)
1107 #define dev_dbg(dev, format, arg...) \
1108 dev_printk(KERN_DEBUG, dev, format, ##arg)
1109 #else
1110 #define dev_dbg(dev, format, arg...) \
1111 ({ \
1112 if (0) \
1113 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1114 0; \
1115 })
1116 #endif
1117
1118 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1119 do { \
1120 static DEFINE_RATELIMIT_STATE(_rs, \
1121 DEFAULT_RATELIMIT_INTERVAL, \
1122 DEFAULT_RATELIMIT_BURST); \
1123 if (__ratelimit(&_rs)) \
1124 dev_level(dev, fmt, ##__VA_ARGS__); \
1125 } while (0)
1126
1127 #define dev_emerg_ratelimited(dev, fmt, ...) \
1128 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
1129 #define dev_alert_ratelimited(dev, fmt, ...) \
1130 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
1131 #define dev_crit_ratelimited(dev, fmt, ...) \
1132 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
1133 #define dev_err_ratelimited(dev, fmt, ...) \
1134 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
1135 #define dev_warn_ratelimited(dev, fmt, ...) \
1136 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
1137 #define dev_notice_ratelimited(dev, fmt, ...) \
1138 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
1139 #define dev_info_ratelimited(dev, fmt, ...) \
1140 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
1141 #if defined(CONFIG_DYNAMIC_DEBUG)
1142 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
1143 #define dev_dbg_ratelimited(dev, fmt, ...) \
1144 do { \
1145 static DEFINE_RATELIMIT_STATE(_rs, \
1146 DEFAULT_RATELIMIT_INTERVAL, \
1147 DEFAULT_RATELIMIT_BURST); \
1148 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
1149 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
1150 __ratelimit(&_rs)) \
1151 __dynamic_dev_dbg(&descriptor, dev, fmt, \
1152 ##__VA_ARGS__); \
1153 } while (0)
1154 #elif defined(DEBUG)
1155 #define dev_dbg_ratelimited(dev, fmt, ...) \
1156 do { \
1157 static DEFINE_RATELIMIT_STATE(_rs, \
1158 DEFAULT_RATELIMIT_INTERVAL, \
1159 DEFAULT_RATELIMIT_BURST); \
1160 if (__ratelimit(&_rs)) \
1161 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1162 } while (0)
1163 #else
1164 #define dev_dbg_ratelimited(dev, fmt, ...) \
1165 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
1166 #endif
1167
1168 #ifdef VERBOSE_DEBUG
1169 #define dev_vdbg dev_dbg
1170 #else
1171 #define dev_vdbg(dev, format, arg...) \
1172 ({ \
1173 if (0) \
1174 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1175 0; \
1176 })
1177 #endif
1178
1179 /*
1180 * dev_WARN*() acts like dev_printk(), but with the key difference of
1181 * using WARN/WARN_ONCE to include file/line information and a backtrace.
1182 */
1183 #define dev_WARN(dev, format, arg...) \
1184 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
1185
1186 #define dev_WARN_ONCE(dev, condition, format, arg...) \
1187 WARN_ONCE(condition, "%s %s: " format, \
1188 dev_driver_string(dev), dev_name(dev), ## arg)
1189
1190 /* Create alias, so I can be autoloaded. */
1191 #define MODULE_ALIAS_CHARDEV(major,minor) \
1192 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
1193 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
1194 MODULE_ALIAS("char-major-" __stringify(major) "-*")
1195
1196 #ifdef CONFIG_SYSFS_DEPRECATED
1197 extern long sysfs_deprecated;
1198 #else
1199 #define sysfs_deprecated 0
1200 #endif
1201
1202 /**
1203 * module_driver() - Helper macro for drivers that don't do anything
1204 * special in module init/exit. This eliminates a lot of boilerplate.
1205 * Each module may only use this macro once, and calling it replaces
1206 * module_init() and module_exit().
1207 *
1208 * @__driver: driver name
1209 * @__register: register function for this driver type
1210 * @__unregister: unregister function for this driver type
1211 * @...: Additional arguments to be passed to __register and __unregister.
1212 *
1213 * Use this macro to construct bus specific macros for registering
1214 * drivers, and do not use it on its own.
1215 */
1216 #define module_driver(__driver, __register, __unregister, ...) \
1217 static int __init __driver##_init(void) \
1218 { \
1219 return __register(&(__driver) , ##__VA_ARGS__); \
1220 } \
1221 module_init(__driver##_init); \
1222 static void __exit __driver##_exit(void) \
1223 { \
1224 __unregister(&(__driver) , ##__VA_ARGS__); \
1225 } \
1226 module_exit(__driver##_exit);
1227
1228 #endif /* _DEVICE_H_ */ 1 #ifndef _LINUX_LIST_H
2 #define _LINUX_LIST_H
3
4 #include <linux/types.h>
5 #include <linux/stddef.h>
6 #include <linux/poison.h>
7 #include <linux/const.h>
8
9 /*
10 * Simple doubly linked list implementation.
11 *
12 * Some of the internal functions ("__xxx") are useful when
13 * manipulating whole lists rather than single entries, as
14 * sometimes we already know the next/prev entries and we can
15 * generate better code by using them directly rather than
16 * using the generic single-entry routines.
17 */
18
19 #define LIST_HEAD_INIT(name) { &(name), &(name) }
20
21 #define LIST_HEAD(name) \
22 struct list_head name = LIST_HEAD_INIT(name)
23
24 static inline void INIT_LIST_HEAD(struct list_head *list)
25 {
26 list->next = list;
27 list->prev = list;
28 }
29
30 /*
31 * Insert a new entry between two known consecutive entries.
32 *
33 * This is only for internal list manipulation where we know
34 * the prev/next entries already!
35 */
36 #ifndef CONFIG_DEBUG_LIST
37 static inline void __list_add(struct list_head *new,
38 struct list_head *prev,
39 struct list_head *next)
40 {
41 next->prev = new;
42 new->next = next;
43 new->prev = prev;
44 prev->next = new;
45 }
46 #else
47 extern void __list_add(struct list_head *new,
48 struct list_head *prev,
49 struct list_head *next);
50 #endif
51
52 /**
53 * list_add - add a new entry
54 * @new: new entry to be added
55 * @head: list head to add it after
56 *
57 * Insert a new entry after the specified head.
58 * This is good for implementing stacks.
59 */
60 static inline void list_add(struct list_head *new, struct list_head *head)
61 {
62 __list_add(new, head, head->next);
63 }
64
65
66 /**
67 * list_add_tail - add a new entry
68 * @new: new entry to be added
69 * @head: list head to add it before
70 *
71 * Insert a new entry before the specified head.
72 * This is useful for implementing queues.
73 */
74 static inline void list_add_tail(struct list_head *new, struct list_head *head)
75 {
76 __list_add(new, head->prev, head);
77 }
78
79 /*
80 * Delete a list entry by making the prev/next entries
81 * point to each other.
82 *
83 * This is only for internal list manipulation where we know
84 * the prev/next entries already!
85 */
86 static inline void __list_del(struct list_head * prev, struct list_head * next)
87 {
88 next->prev = prev;
89 prev->next = next;
90 }
91
92 /**
93 * list_del - deletes entry from list.
94 * @entry: the element to delete from the list.
95 * Note: list_empty() on entry does not return true after this, the entry is
96 * in an undefined state.
97 */
98 #ifndef CONFIG_DEBUG_LIST
99 static inline void __list_del_entry(struct list_head *entry)
100 {
101 __list_del(entry->prev, entry->next);
102 }
103
104 static inline void list_del(struct list_head *entry)
105 {
106 __list_del(entry->prev, entry->next);
107 entry->next = LIST_POISON1;
108 entry->prev = LIST_POISON2;
109 }
110 #else
111 extern void __list_del_entry(struct list_head *entry);
112 extern void list_del(struct list_head *entry);
113 #endif
114
115 /**
116 * list_replace - replace old entry by new one
117 * @old : the element to be replaced
118 * @new : the new element to insert
119 *
120 * If @old was empty, it will be overwritten.
121 */
122 static inline void list_replace(struct list_head *old,
123 struct list_head *new)
124 {
125 new->next = old->next;
126 new->next->prev = new;
127 new->prev = old->prev;
128 new->prev->next = new;
129 }
130
131 static inline void list_replace_init(struct list_head *old,
132 struct list_head *new)
133 {
134 list_replace(old, new);
135 INIT_LIST_HEAD(old);
136 }
137
138 /**
139 * list_del_init - deletes entry from list and reinitialize it.
140 * @entry: the element to delete from the list.
141 */
142 static inline void list_del_init(struct list_head *entry)
143 {
144 __list_del_entry(entry);
145 INIT_LIST_HEAD(entry);
146 }
147
148 /**
149 * list_move - delete from one list and add as another's head
150 * @list: the entry to move
151 * @head: the head that will precede our entry
152 */
153 static inline void list_move(struct list_head *list, struct list_head *head)
154 {
155 __list_del_entry(list);
156 list_add(list, head);
157 }
158
159 /**
160 * list_move_tail - delete from one list and add as another's tail
161 * @list: the entry to move
162 * @head: the head that will follow our entry
163 */
164 static inline void list_move_tail(struct list_head *list,
165 struct list_head *head)
166 {
167 __list_del_entry(list);
168 list_add_tail(list, head);
169 }
170
171 /**
172 * list_is_last - tests whether @list is the last entry in list @head
173 * @list: the entry to test
174 * @head: the head of the list
175 */
176 static inline int list_is_last(const struct list_head *list,
177 const struct list_head *head)
178 {
179 return list->next == head;
180 }
181
182 /**
183 * list_empty - tests whether a list is empty
184 * @head: the list to test.
185 */
186 static inline int list_empty(const struct list_head *head)
187 {
188 return head->next == head;
189 }
190
191 /**
192 * list_empty_careful - tests whether a list is empty and not being modified
193 * @head: the list to test
194 *
195 * Description:
196 * tests whether a list is empty _and_ checks that no other CPU might be
197 * in the process of modifying either member (next or prev)
198 *
199 * NOTE: using list_empty_careful() without synchronization
200 * can only be safe if the only activity that can happen
201 * to the list entry is list_del_init(). Eg. it cannot be used
202 * if another CPU could re-list_add() it.
203 */
204 static inline int list_empty_careful(const struct list_head *head)
205 {
206 struct list_head *next = head->next;
207 return (next == head) && (next == head->prev);
208 }
209
210 /**
211 * list_rotate_left - rotate the list to the left
212 * @head: the head of the list
213 */
214 static inline void list_rotate_left(struct list_head *head)
215 {
216 struct list_head *first;
217
218 if (!list_empty(head)) {
219 first = head->next;
220 list_move_tail(first, head);
221 }
222 }
223
224 /**
225 * list_is_singular - tests whether a list has just one entry.
226 * @head: the list to test.
227 */
228 static inline int list_is_singular(const struct list_head *head)
229 {
230 return !list_empty(head) && (head->next == head->prev);
231 }
232
233 static inline void __list_cut_position(struct list_head *list,
234 struct list_head *head, struct list_head *entry)
235 {
236 struct list_head *new_first = entry->next;
237 list->next = head->next;
238 list->next->prev = list;
239 list->prev = entry;
240 entry->next = list;
241 head->next = new_first;
242 new_first->prev = head;
243 }
244
245 /**
246 * list_cut_position - cut a list into two
247 * @list: a new list to add all removed entries
248 * @head: a list with entries
249 * @entry: an entry within head, could be the head itself
250 * and if so we won't cut the list
251 *
252 * This helper moves the initial part of @head, up to and
253 * including @entry, from @head to @list. You should
254 * pass on @entry an element you know is on @head. @list
255 * should be an empty list or a list you do not care about
256 * losing its data.
257 *
258 */
259 static inline void list_cut_position(struct list_head *list,
260 struct list_head *head, struct list_head *entry)
261 {
262 if (list_empty(head))
263 return;
264 if (list_is_singular(head) &&
265 (head->next != entry && head != entry))
266 return;
267 if (entry == head)
268 INIT_LIST_HEAD(list);
269 else
270 __list_cut_position(list, head, entry);
271 }
272
273 static inline void __list_splice(const struct list_head *list,
274 struct list_head *prev,
275 struct list_head *next)
276 {
277 struct list_head *first = list->next;
278 struct list_head *last = list->prev;
279
280 first->prev = prev;
281 prev->next = first;
282
283 last->next = next;
284 next->prev = last;
285 }
286
287 /**
288 * list_splice - join two lists, this is designed for stacks
289 * @list: the new list to add.
290 * @head: the place to add it in the first list.
291 */
292 static inline void list_splice(const struct list_head *list,
293 struct list_head *head)
294 {
295 if (!list_empty(list))
296 __list_splice(list, head, head->next);
297 }
298
299 /**
300 * list_splice_tail - join two lists, each list being a queue
301 * @list: the new list to add.
302 * @head: the place to add it in the first list.
303 */
304 static inline void list_splice_tail(struct list_head *list,
305 struct list_head *head)
306 {
307 if (!list_empty(list))
308 __list_splice(list, head->prev, head);
309 }
310
311 /**
312 * list_splice_init - join two lists and reinitialise the emptied list.
313 * @list: the new list to add.
314 * @head: the place to add it in the first list.
315 *
316 * The list at @list is reinitialised
317 */
318 static inline void list_splice_init(struct list_head *list,
319 struct list_head *head)
320 {
321 if (!list_empty(list)) {
322 __list_splice(list, head, head->next);
323 INIT_LIST_HEAD(list);
324 }
325 }
326
327 /**
328 * list_splice_tail_init - join two lists and reinitialise the emptied list
329 * @list: the new list to add.
330 * @head: the place to add it in the first list.
331 *
332 * Each of the lists is a queue.
333 * The list at @list is reinitialised
334 */
335 static inline void list_splice_tail_init(struct list_head *list,
336 struct list_head *head)
337 {
338 if (!list_empty(list)) {
339 __list_splice(list, head->prev, head);
340 INIT_LIST_HEAD(list);
341 }
342 }
343
344 /**
345 * list_entry - get the struct for this entry
346 * @ptr: the &struct list_head pointer.
347 * @type: the type of the struct this is embedded in.
348 * @member: the name of the list_struct within the struct.
349 */
350 #define list_entry(ptr, type, member) \
351 container_of(ptr, type, member)
352
353 /**
354 * list_first_entry - get the first element from a list
355 * @ptr: the list head to take the element from.
356 * @type: the type of the struct this is embedded in.
357 * @member: the name of the list_struct within the struct.
358 *
359 * Note, that list is expected to be not empty.
360 */
361 #define list_first_entry(ptr, type, member) \
362 list_entry((ptr)->next, type, member)
363
364 /**
365 * list_last_entry - get the last element from a list
366 * @ptr: the list head to take the element from.
367 * @type: the type of the struct this is embedded in.
368 * @member: the name of the list_struct within the struct.
369 *
370 * Note, that list is expected to be not empty.
371 */
372 #define list_last_entry(ptr, type, member) \
373 list_entry((ptr)->prev, type, member)
374
375 /**
376 * list_first_entry_or_null - get the first element from a list
377 * @ptr: the list head to take the element from.
378 * @type: the type of the struct this is embedded in.
379 * @member: the name of the list_struct within the struct.
380 *
381 * Note that if the list is empty, it returns NULL.
382 */
383 #define list_first_entry_or_null(ptr, type, member) \
384 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
385
386 /**
387 * list_next_entry - get the next element in list
388 * @pos: the type * to cursor
389 * @member: the name of the list_struct within the struct.
390 */
391 #define list_next_entry(pos, member) \
392 list_entry((pos)->member.next, typeof(*(pos)), member)
393
394 /**
395 * list_prev_entry - get the prev element in list
396 * @pos: the type * to cursor
397 * @member: the name of the list_struct within the struct.
398 */
399 #define list_prev_entry(pos, member) \
400 list_entry((pos)->member.prev, typeof(*(pos)), member)
401
402 /**
403 * list_for_each - iterate over a list
404 * @pos: the &struct list_head to use as a loop cursor.
405 * @head: the head for your list.
406 */
407 #define list_for_each(pos, head) \
408 for (pos = (head)->next; pos != (head); pos = pos->next)
409
410 /**
411 * list_for_each_prev - iterate over a list backwards
412 * @pos: the &struct list_head to use as a loop cursor.
413 * @head: the head for your list.
414 */
415 #define list_for_each_prev(pos, head) \
416 for (pos = (head)->prev; pos != (head); pos = pos->prev)
417
418 /**
419 * list_for_each_safe - iterate over a list safe against removal of list entry
420 * @pos: the &struct list_head to use as a loop cursor.
421 * @n: another &struct list_head to use as temporary storage
422 * @head: the head for your list.
423 */
424 #define list_for_each_safe(pos, n, head) \
425 for (pos = (head)->next, n = pos->next; pos != (head); \
426 pos = n, n = pos->next)
427
428 /**
429 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
430 * @pos: the &struct list_head to use as a loop cursor.
431 * @n: another &struct list_head to use as temporary storage
432 * @head: the head for your list.
433 */
434 #define list_for_each_prev_safe(pos, n, head) \
435 for (pos = (head)->prev, n = pos->prev; \
436 pos != (head); \
437 pos = n, n = pos->prev)
438
439 /**
440 * list_for_each_entry - iterate over list of given type
441 * @pos: the type * to use as a loop cursor.
442 * @head: the head for your list.
443 * @member: the name of the list_struct within the struct.
444 */
445 #define list_for_each_entry(pos, head, member) \
446 for (pos = list_first_entry(head, typeof(*pos), member); \
447 &pos->member != (head); \
448 pos = list_next_entry(pos, member))
449
450 /**
451 * list_for_each_entry_reverse - iterate backwards over list of given type.
452 * @pos: the type * to use as a loop cursor.
453 * @head: the head for your list.
454 * @member: the name of the list_struct within the struct.
455 */
456 #define list_for_each_entry_reverse(pos, head, member) \
457 for (pos = list_last_entry(head, typeof(*pos), member); \
458 &pos->member != (head); \
459 pos = list_prev_entry(pos, member))
460
461 /**
462 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
463 * @pos: the type * to use as a start point
464 * @head: the head of the list
465 * @member: the name of the list_struct within the struct.
466 *
467 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
468 */
469 #define list_prepare_entry(pos, head, member) \
470 ((pos) ? : list_entry(head, typeof(*pos), member))
471
472 /**
473 * list_for_each_entry_continue - continue iteration over list of given type
474 * @pos: the type * to use as a loop cursor.
475 * @head: the head for your list.
476 * @member: the name of the list_struct within the struct.
477 *
478 * Continue to iterate over list of given type, continuing after
479 * the current position.
480 */
481 #define list_for_each_entry_continue(pos, head, member) \
482 for (pos = list_next_entry(pos, member); \
483 &pos->member != (head); \
484 pos = list_next_entry(pos, member))
485
486 /**
487 * list_for_each_entry_continue_reverse - iterate backwards from the given point
488 * @pos: the type * to use as a loop cursor.
489 * @head: the head for your list.
490 * @member: the name of the list_struct within the struct.
491 *
492 * Start to iterate over list of given type backwards, continuing after
493 * the current position.
494 */
495 #define list_for_each_entry_continue_reverse(pos, head, member) \
496 for (pos = list_prev_entry(pos, member); \
497 &pos->member != (head); \
498 pos = list_prev_entry(pos, member))
499
500 /**
501 * list_for_each_entry_from - iterate over list of given type from the current point
502 * @pos: the type * to use as a loop cursor.
503 * @head: the head for your list.
504 * @member: the name of the list_struct within the struct.
505 *
506 * Iterate over list of given type, continuing from current position.
507 */
508 #define list_for_each_entry_from(pos, head, member) \
509 for (; &pos->member != (head); \
510 pos = list_next_entry(pos, member))
511
512 /**
513 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
514 * @pos: the type * to use as a loop cursor.
515 * @n: another type * to use as temporary storage
516 * @head: the head for your list.
517 * @member: the name of the list_struct within the struct.
518 */
519 #define list_for_each_entry_safe(pos, n, head, member) \
520 for (pos = list_first_entry(head, typeof(*pos), member), \
521 n = list_next_entry(pos, member); \
522 &pos->member != (head); \
523 pos = n, n = list_next_entry(n, member))
524
525 /**
526 * list_for_each_entry_safe_continue - continue list iteration safe against removal
527 * @pos: the type * to use as a loop cursor.
528 * @n: another type * to use as temporary storage
529 * @head: the head for your list.
530 * @member: the name of the list_struct within the struct.
531 *
532 * Iterate over list of given type, continuing after current point,
533 * safe against removal of list entry.
534 */
535 #define list_for_each_entry_safe_continue(pos, n, head, member) \
536 for (pos = list_next_entry(pos, member), \
537 n = list_next_entry(pos, member); \
538 &pos->member != (head); \
539 pos = n, n = list_next_entry(n, member))
540
541 /**
542 * list_for_each_entry_safe_from - iterate over list from current point safe against removal
543 * @pos: the type * to use as a loop cursor.
544 * @n: another type * to use as temporary storage
545 * @head: the head for your list.
546 * @member: the name of the list_struct within the struct.
547 *
548 * Iterate over list of given type from current point, safe against
549 * removal of list entry.
550 */
551 #define list_for_each_entry_safe_from(pos, n, head, member) \
552 for (n = list_next_entry(pos, member); \
553 &pos->member != (head); \
554 pos = n, n = list_next_entry(n, member))
555
556 /**
557 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
558 * @pos: the type * to use as a loop cursor.
559 * @n: another type * to use as temporary storage
560 * @head: the head for your list.
561 * @member: the name of the list_struct within the struct.
562 *
563 * Iterate backwards over list of given type, safe against removal
564 * of list entry.
565 */
566 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
567 for (pos = list_last_entry(head, typeof(*pos), member), \
568 n = list_prev_entry(pos, member); \
569 &pos->member != (head); \
570 pos = n, n = list_prev_entry(n, member))
571
572 /**
573 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
574 * @pos: the loop cursor used in the list_for_each_entry_safe loop
575 * @n: temporary storage used in list_for_each_entry_safe
576 * @member: the name of the list_struct within the struct.
577 *
578 * list_safe_reset_next is not safe to use in general if the list may be
579 * modified concurrently (eg. the lock is dropped in the loop body). An
580 * exception to this is if the cursor element (pos) is pinned in the list,
581 * and list_safe_reset_next is called after re-taking the lock and before
582 * completing the current iteration of the loop body.
583 */
584 #define list_safe_reset_next(pos, n, member) \
585 n = list_next_entry(pos, member)
586
587 /*
588 * Double linked lists with a single pointer list head.
589 * Mostly useful for hash tables where the two pointer list head is
590 * too wasteful.
591 * You lose the ability to access the tail in O(1).
592 */
593
594 #define HLIST_HEAD_INIT { .first = NULL }
595 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
596 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
597 static inline void INIT_HLIST_NODE(struct hlist_node *h)
598 {
599 h->next = NULL;
600 h->pprev = NULL;
601 }
602
603 static inline int hlist_unhashed(const struct hlist_node *h)
604 {
605 return !h->pprev;
606 }
607
608 static inline int hlist_empty(const struct hlist_head *h)
609 {
610 return !h->first;
611 }
612
613 static inline void __hlist_del(struct hlist_node *n)
614 {
615 struct hlist_node *next = n->next;
616 struct hlist_node **pprev = n->pprev;
617 *pprev = next;
618 if (next)
619 next->pprev = pprev;
620 }
621
622 static inline void hlist_del(struct hlist_node *n)
623 {
624 __hlist_del(n);
625 n->next = LIST_POISON1;
626 n->pprev = LIST_POISON2;
627 }
628
629 static inline void hlist_del_init(struct hlist_node *n)
630 {
631 if (!hlist_unhashed(n)) {
632 __hlist_del(n);
633 INIT_HLIST_NODE(n);
634 }
635 }
636
637 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
638 {
639 struct hlist_node *first = h->first;
640 n->next = first;
641 if (first)
642 first->pprev = &n->next;
643 h->first = n;
644 n->pprev = &h->first;
645 }
646
647 /* next must be != NULL */
648 static inline void hlist_add_before(struct hlist_node *n,
649 struct hlist_node *next)
650 {
651 n->pprev = next->pprev;
652 n->next = next;
653 next->pprev = &n->next;
654 *(n->pprev) = n;
655 }
656
657 static inline void hlist_add_after(struct hlist_node *n,
658 struct hlist_node *next)
659 {
660 next->next = n->next;
661 n->next = next;
662 next->pprev = &n->next;
663
664 if(next->next)
665 next->next->pprev = &next->next;
666 }
667
668 /* after that we'll appear to be on some hlist and hlist_del will work */
669 static inline void hlist_add_fake(struct hlist_node *n)
670 {
671 n->pprev = &n->next;
672 }
673
674 /*
675 * Move a list from one list head to another. Fixup the pprev
676 * reference of the first entry if it exists.
677 */
678 static inline void hlist_move_list(struct hlist_head *old,
679 struct hlist_head *new)
680 {
681 new->first = old->first;
682 if (new->first)
683 new->first->pprev = &new->first;
684 old->first = NULL;
685 }
686
687 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
688
689 #define hlist_for_each(pos, head) \
690 for (pos = (head)->first; pos ; pos = pos->next)
691
692 #define hlist_for_each_safe(pos, n, head) \
693 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
694 pos = n)
695
696 #define hlist_entry_safe(ptr, type, member) \
697 ({ typeof(ptr) ____ptr = (ptr); \
698 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
699 })
700
701 /**
702 * hlist_for_each_entry - iterate over list of given type
703 * @pos: the type * to use as a loop cursor.
704 * @head: the head for your list.
705 * @member: the name of the hlist_node within the struct.
706 */
707 #define hlist_for_each_entry(pos, head, member) \
708 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
709 pos; \
710 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
711
712 /**
713 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
714 * @pos: the type * to use as a loop cursor.
715 * @member: the name of the hlist_node within the struct.
716 */
717 #define hlist_for_each_entry_continue(pos, member) \
718 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
719 pos; \
720 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
721
722 /**
723 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
724 * @pos: the type * to use as a loop cursor.
725 * @member: the name of the hlist_node within the struct.
726 */
727 #define hlist_for_each_entry_from(pos, member) \
728 for (; pos; \
729 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
730
731 /**
732 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
733 * @pos: the type * to use as a loop cursor.
734 * @n: another &struct hlist_node to use as temporary storage
735 * @head: the head for your list.
736 * @member: the name of the hlist_node within the struct.
737 */
738 #define hlist_for_each_entry_safe(pos, n, head, member) \
739 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
740 pos && ({ n = pos->member.next; 1; }); \
741 pos = hlist_entry_safe(n, typeof(*pos), member))
742
743 #endif 1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11 #ifndef _LINUX_SLAB_H
12 #define _LINUX_SLAB_H
13
14 #include <linux/gfp.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17
18
19 /*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
22 */
23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
30 /*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
68
69 /* Flag to prevent checks on free */
70 #ifdef CONFIG_DEBUG_OBJECTS
71 # define SLAB_DEBUG_OBJECTS 0x00400000UL
72 #else
73 # define SLAB_DEBUG_OBJECTS 0x00000000UL
74 #endif
75
76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
78 /* Don't track use of uninitialized memory */
79 #ifdef CONFIG_KMEMCHECK
80 # define SLAB_NOTRACK 0x01000000UL
81 #else
82 # define SLAB_NOTRACK 0x00000000UL
83 #endif
84 #ifdef CONFIG_FAILSLAB
85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86 #else
87 # define SLAB_FAILSLAB 0x00000000UL
88 #endif
89
90 /* The following flags affect the page allocator grouping pages by mobility */
91 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
92 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
93 /*
94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
95 *
96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
97 *
98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
99 * Both make kfree a no-op.
100 */
101 #define ZERO_SIZE_PTR ((void *)16)
102
103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104 (unsigned long)ZERO_SIZE_PTR)
105
106 #include <linux/kmemleak.h>
107
108 struct mem_cgroup;
109 /*
110 * struct kmem_cache related prototypes
111 */
112 void __init kmem_cache_init(void);
113 int slab_is_available(void);
114
115 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long,
117 void (*)(void *));
118 #ifdef CONFIG_MEMCG_KMEM
119 struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *,
120 struct kmem_cache *,
121 const char *);
122 #endif
123 void kmem_cache_destroy(struct kmem_cache *);
124 int kmem_cache_shrink(struct kmem_cache *);
125 void kmem_cache_free(struct kmem_cache *, void *);
126
127 /*
128 * Please use this macro to create slab caches. Simply specify the
129 * name of the structure and maybe some flags that are listed above.
130 *
131 * The alignment of the struct determines object alignment. If you
132 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
133 * then the objects will be properly aligned in SMP configurations.
134 */
135 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
136 sizeof(struct __struct), __alignof__(struct __struct),\
137 (__flags), NULL)
138
139 /*
140 * Common kmalloc functions provided by all allocators
141 */
142 void * __must_check __krealloc(const void *, size_t, gfp_t);
143 void * __must_check krealloc(const void *, size_t, gfp_t);
144 void kfree(const void *);
145 void kzfree(const void *);
146 size_t ksize(const void *);
147
148 /*
149 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
150 * alignment larger than the alignment of a 64-bit integer.
151 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
152 */
153 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
154 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
155 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
156 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
157 #else
158 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
159 #endif
160
161 #ifdef CONFIG_SLOB
162 /*
163 * Common fields provided in kmem_cache by all slab allocators
164 * This struct is either used directly by the allocator (SLOB)
165 * or the allocator must include definitions for all fields
166 * provided in kmem_cache_common in their definition of kmem_cache.
167 *
168 * Once we can do anonymous structs (C11 standard) we could put a
169 * anonymous struct definition in these allocators so that the
170 * separate allocations in the kmem_cache structure of SLAB and
171 * SLUB is no longer needed.
172 */
173 struct kmem_cache {
174 unsigned int object_size;/* The original size of the object */
175 unsigned int size; /* The aligned/padded/added on size */
176 unsigned int align; /* Alignment as calculated */
177 unsigned long flags; /* Active flags on the slab */
178 const char *name; /* Slab name for sysfs */
179 int refcount; /* Use counter */
180 void (*ctor)(void *); /* Called on object slot creation */
181 struct list_head list; /* List of all slab caches on the system */
182 };
183
184 #endif /* CONFIG_SLOB */
185
186 /*
187 * Kmalloc array related definitions
188 */
189
190 #ifdef CONFIG_SLAB
191 /*
192 * The largest kmalloc size supported by the SLAB allocators is
193 * 32 megabyte (2^25) or the maximum allocatable page order if that is
194 * less than 32 MB.
195 *
196 * WARNING: Its not easy to increase this value since the allocators have
197 * to do various tricks to work around compiler limitations in order to
198 * ensure proper constant folding.
199 */
200 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
201 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
202 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
203 #ifndef KMALLOC_SHIFT_LOW
204 #define KMALLOC_SHIFT_LOW 5
205 #endif
206 #endif
207
208 #ifdef CONFIG_SLUB
209 /*
210 * SLUB directly allocates requests fitting in to an order-1 page
211 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
212 */
213 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
214 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
215 #ifndef KMALLOC_SHIFT_LOW
216 #define KMALLOC_SHIFT_LOW 3
217 #endif
218 #endif
219
220 #ifdef CONFIG_SLOB
221 /*
222 * SLOB passes all requests larger than one page to the page allocator.
223 * No kmalloc array is necessary since objects of different sizes can
224 * be allocated from the same page.
225 */
226 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
227 #define KMALLOC_SHIFT_MAX 30
228 #ifndef KMALLOC_SHIFT_LOW
229 #define KMALLOC_SHIFT_LOW 3
230 #endif
231 #endif
232
233 /* Maximum allocatable size */
234 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
235 /* Maximum size for which we actually use a slab cache */
236 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
237 /* Maximum order allocatable via the slab allocagtor */
238 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
239
240 /*
241 * Kmalloc subsystem.
242 */
243 #ifndef KMALLOC_MIN_SIZE
244 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
245 #endif
246
247 /*
248 * This restriction comes from byte sized index implementation.
249 * Page size is normally 2^12 bytes and, in this case, if we want to use
250 * byte sized index which can represent 2^8 entries, the size of the object
251 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
252 * If minimum size of kmalloc is less than 16, we use it as minimum object
253 * size and give up to use byte sized index.
254 */
255 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
256 (KMALLOC_MIN_SIZE) : 16)
257
258 #ifndef CONFIG_SLOB
259 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
260 #ifdef CONFIG_ZONE_DMA
261 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
262 #endif
263
264 /*
265 * Figure out which kmalloc slab an allocation of a certain size
266 * belongs to.
267 * 0 = zero alloc
268 * 1 = 65 .. 96 bytes
269 * 2 = 120 .. 192 bytes
270 * n = 2^(n-1) .. 2^n -1
271 */
272 static __always_inline int kmalloc_index(size_t size)
273 {
274 if (!size)
275 return 0;
276
277 if (size <= KMALLOC_MIN_SIZE)
278 return KMALLOC_SHIFT_LOW;
279
280 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
281 return 1;
282 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
283 return 2;
284 if (size <= 8) return 3;
285 if (size <= 16) return 4;
286 if (size <= 32) return 5;
287 if (size <= 64) return 6;
288 if (size <= 128) return 7;
289 if (size <= 256) return 8;
290 if (size <= 512) return 9;
291 if (size <= 1024) return 10;
292 if (size <= 2 * 1024) return 11;
293 if (size <= 4 * 1024) return 12;
294 if (size <= 8 * 1024) return 13;
295 if (size <= 16 * 1024) return 14;
296 if (size <= 32 * 1024) return 15;
297 if (size <= 64 * 1024) return 16;
298 if (size <= 128 * 1024) return 17;
299 if (size <= 256 * 1024) return 18;
300 if (size <= 512 * 1024) return 19;
301 if (size <= 1024 * 1024) return 20;
302 if (size <= 2 * 1024 * 1024) return 21;
303 if (size <= 4 * 1024 * 1024) return 22;
304 if (size <= 8 * 1024 * 1024) return 23;
305 if (size <= 16 * 1024 * 1024) return 24;
306 if (size <= 32 * 1024 * 1024) return 25;
307 if (size <= 64 * 1024 * 1024) return 26;
308 BUG();
309
310 /* Will never be reached. Needed because the compiler may complain */
311 return -1;
312 }
313 #endif /* !CONFIG_SLOB */
314
315 void *__kmalloc(size_t size, gfp_t flags);
316 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
317
318 #ifdef CONFIG_NUMA
319 void *__kmalloc_node(size_t size, gfp_t flags, int node);
320 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
321 #else
322 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
323 {
324 return __kmalloc(size, flags);
325 }
326
327 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
328 {
329 return kmem_cache_alloc(s, flags);
330 }
331 #endif
332
333 #ifdef CONFIG_TRACING
334 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
335
336 #ifdef CONFIG_NUMA
337 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
338 gfp_t gfpflags,
339 int node, size_t size);
340 #else
341 static __always_inline void *
342 kmem_cache_alloc_node_trace(struct kmem_cache *s,
343 gfp_t gfpflags,
344 int node, size_t size)
345 {
346 return kmem_cache_alloc_trace(s, gfpflags, size);
347 }
348 #endif /* CONFIG_NUMA */
349
350 #else /* CONFIG_TRACING */
351 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
352 gfp_t flags, size_t size)
353 {
354 return kmem_cache_alloc(s, flags);
355 }
356
357 static __always_inline void *
358 kmem_cache_alloc_node_trace(struct kmem_cache *s,
359 gfp_t gfpflags,
360 int node, size_t size)
361 {
362 return kmem_cache_alloc_node(s, gfpflags, node);
363 }
364 #endif /* CONFIG_TRACING */
365
366 #ifdef CONFIG_SLAB
367 #include <linux/slab_def.h>
368 #endif
369
370 #ifdef CONFIG_SLUB
371 #include <linux/slub_def.h>
372 #endif
373
374 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
375
376 #ifdef CONFIG_TRACING
377 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
378 #else
379 static __always_inline void *
380 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
381 {
382 return kmalloc_order(size, flags, order);
383 }
384 #endif
385
386 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
387 {
388 unsigned int order = get_order(size);
389 return kmalloc_order_trace(size, flags, order);
390 }
391
392 /**
393 * kmalloc - allocate memory
394 * @size: how many bytes of memory are required.
395 * @flags: the type of memory to allocate.
396 *
397 * kmalloc is the normal method of allocating memory
398 * for objects smaller than page size in the kernel.
399 *
400 * The @flags argument may be one of:
401 *
402 * %GFP_USER - Allocate memory on behalf of user. May sleep.
403 *
404 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
405 *
406 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
407 * For example, use this inside interrupt handlers.
408 *
409 * %GFP_HIGHUSER - Allocate pages from high memory.
410 *
411 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
412 *
413 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
414 *
415 * %GFP_NOWAIT - Allocation will not sleep.
416 *
417 * %__GFP_THISNODE - Allocate node-local memory only.
418 *
419 * %GFP_DMA - Allocation suitable for DMA.
420 * Should only be used for kmalloc() caches. Otherwise, use a
421 * slab created with SLAB_DMA.
422 *
423 * Also it is possible to set different flags by OR'ing
424 * in one or more of the following additional @flags:
425 *
426 * %__GFP_COLD - Request cache-cold pages instead of
427 * trying to return cache-warm pages.
428 *
429 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
430 *
431 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
432 * (think twice before using).
433 *
434 * %__GFP_NORETRY - If memory is not immediately available,
435 * then give up at once.
436 *
437 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
438 *
439 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
440 *
441 * There are other flags available as well, but these are not intended
442 * for general use, and so are not documented here. For a full list of
443 * potential flags, always refer to linux/gfp.h.
444 */
445 static __always_inline void *kmalloc(size_t size, gfp_t flags)
446 {
447 if (__builtin_constant_p(size)) {
448 if (size > KMALLOC_MAX_CACHE_SIZE)
449 return kmalloc_large(size, flags);
450 #ifndef CONFIG_SLOB
451 if (!(flags & GFP_DMA)) {
452 int index = kmalloc_index(size);
453
454 if (!index)
455 return ZERO_SIZE_PTR;
456
457 return kmem_cache_alloc_trace(kmalloc_caches[index],
458 flags, size);
459 }
460 #endif
461 }
462 return __kmalloc(size, flags);
463 }
464
465 /*
466 * Determine size used for the nth kmalloc cache.
467 * return size or 0 if a kmalloc cache for that
468 * size does not exist
469 */
470 static __always_inline int kmalloc_size(int n)
471 {
472 #ifndef CONFIG_SLOB
473 if (n > 2)
474 return 1 << n;
475
476 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
477 return 96;
478
479 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
480 return 192;
481 #endif
482 return 0;
483 }
484
485 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
486 {
487 #ifndef CONFIG_SLOB
488 if (__builtin_constant_p(size) &&
489 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
490 int i = kmalloc_index(size);
491
492 if (!i)
493 return ZERO_SIZE_PTR;
494
495 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
496 flags, node, size);
497 }
498 #endif
499 return __kmalloc_node(size, flags, node);
500 }
501
502 /*
503 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
504 * Intended for arches that get misalignment faults even for 64 bit integer
505 * aligned buffers.
506 */
507 #ifndef ARCH_SLAB_MINALIGN
508 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
509 #endif
510 /*
511 * This is the main placeholder for memcg-related information in kmem caches.
512 * struct kmem_cache will hold a pointer to it, so the memory cost while
513 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
514 * would otherwise be if that would be bundled in kmem_cache: we'll need an
515 * extra pointer chase. But the trade off clearly lays in favor of not
516 * penalizing non-users.
517 *
518 * Both the root cache and the child caches will have it. For the root cache,
519 * this will hold a dynamically allocated array large enough to hold
520 * information about the currently limited memcgs in the system. To allow the
521 * array to be accessed without taking any locks, on relocation we free the old
522 * version only after a grace period.
523 *
524 * Child caches will hold extra metadata needed for its operation. Fields are:
525 *
526 * @memcg: pointer to the memcg this cache belongs to
527 * @list: list_head for the list of all caches in this memcg
528 * @root_cache: pointer to the global, root cache, this cache was derived from
529 * @nr_pages: number of pages that belongs to this cache.
530 */
531 struct memcg_cache_params {
532 bool is_root_cache;
533 union {
534 struct {
535 struct rcu_head rcu_head;
536 struct kmem_cache *memcg_caches[0];
537 };
538 struct {
539 struct mem_cgroup *memcg;
540 struct list_head list;
541 struct kmem_cache *root_cache;
542 atomic_t nr_pages;
543 };
544 };
545 };
546
547 int memcg_update_all_caches(int num_memcgs);
548
549 struct seq_file;
550 int cache_show(struct kmem_cache *s, struct seq_file *m);
551 void print_slabinfo_header(struct seq_file *m);
552
553 /**
554 * kmalloc_array - allocate memory for an array.
555 * @n: number of elements.
556 * @size: element size.
557 * @flags: the type of memory to allocate (see kmalloc).
558 */
559 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
560 {
561 if (size != 0 && n > SIZE_MAX / size)
562 return NULL;
563 return __kmalloc(n * size, flags);
564 }
565
566 /**
567 * kcalloc - allocate memory for an array. The memory is set to zero.
568 * @n: number of elements.
569 * @size: element size.
570 * @flags: the type of memory to allocate (see kmalloc).
571 */
572 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
573 {
574 return kmalloc_array(n, size, flags | __GFP_ZERO);
575 }
576
577 /*
578 * kmalloc_track_caller is a special version of kmalloc that records the
579 * calling function of the routine calling it for slab leak tracking instead
580 * of just the calling function (confusing, eh?).
581 * It's useful when the call to kmalloc comes from a widely-used standard
582 * allocator where we care about the real place the memory allocation
583 * request comes from.
584 */
585 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
586 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
587 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
588 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
589 #define kmalloc_track_caller(size, flags) \
590 __kmalloc_track_caller(size, flags, _RET_IP_)
591 #else
592 #define kmalloc_track_caller(size, flags) \
593 __kmalloc(size, flags)
594 #endif /* DEBUG_SLAB */
595
596 #ifdef CONFIG_NUMA
597 /*
598 * kmalloc_node_track_caller is a special version of kmalloc_node that
599 * records the calling function of the routine calling it for slab leak
600 * tracking instead of just the calling function (confusing, eh?).
601 * It's useful when the call to kmalloc_node comes from a widely-used
602 * standard allocator where we care about the real place the memory
603 * allocation request comes from.
604 */
605 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
606 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
607 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
608 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
609 #define kmalloc_node_track_caller(size, flags, node) \
610 __kmalloc_node_track_caller(size, flags, node, \
611 _RET_IP_)
612 #else
613 #define kmalloc_node_track_caller(size, flags, node) \
614 __kmalloc_node(size, flags, node)
615 #endif
616
617 #else /* CONFIG_NUMA */
618
619 #define kmalloc_node_track_caller(size, flags, node) \
620 kmalloc_track_caller(size, flags)
621
622 #endif /* CONFIG_NUMA */
623
624 /*
625 * Shortcuts
626 */
627 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
628 {
629 return kmem_cache_alloc(k, flags | __GFP_ZERO);
630 }
631
632 /**
633 * kzalloc - allocate memory. The memory is set to zero.
634 * @size: how many bytes of memory are required.
635 * @flags: the type of memory to allocate (see kmalloc).
636 */
637 static inline void *kzalloc(size_t size, gfp_t flags)
638 {
639 return kmalloc(size, flags | __GFP_ZERO);
640 }
641
642 /**
643 * kzalloc_node - allocate zeroed memory from a particular memory node.
644 * @size: how many bytes of memory are required.
645 * @flags: the type of memory to allocate (see kmalloc).
646 * @node: memory node from which to allocate
647 */
648 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
649 {
650 return kmalloc_node(size, flags | __GFP_ZERO, node);
651 }
652
653 /*
654 * Determine the size of a slab object
655 */
656 static inline unsigned int kmem_cache_size(struct kmem_cache *s)
657 {
658 return s->object_size;
659 }
660
661 void __init kmem_cache_init_late(void);
662
663 #endif /* _LINUX_SLAB_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139 #ifndef smp_mb__after_unlock_lock
140 #define smp_mb__after_unlock_lock() do { } while (0)
141 #endif
142
143 /**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question.
146 */
147 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
148
149 #ifdef CONFIG_DEBUG_SPINLOCK
150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
154 #else
155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
156 {
157 __acquire(lock);
158 arch_spin_lock(&lock->raw_lock);
159 }
160
161 static inline void
162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
163 {
164 __acquire(lock);
165 arch_spin_lock_flags(&lock->raw_lock, *flags);
166 }
167
168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
169 {
170 return arch_spin_trylock(&(lock)->raw_lock);
171 }
172
173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
174 {
175 arch_spin_unlock(&lock->raw_lock);
176 __release(lock);
177 }
178 #endif
179
180 /*
181 * Define the various spin_lock methods. Note we define these
182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
183 * various methods are defined as nops in the case they are not
184 * required.
185 */
186 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
187
188 #define raw_spin_lock(lock) _raw_spin_lock(lock)
189
190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
191 # define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193
194 # define raw_spin_lock_nest_lock(lock, nest_lock) \
195 do { \
196 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0)
199 #else
200 # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
201 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
202 #endif
203
204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205
206 #define raw_spin_lock_irqsave(lock, flags) \
207 do { \
208 typecheck(unsigned long, flags); \
209 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211
212 #ifdef CONFIG_DEBUG_LOCK_ALLOC
213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
214 do { \
215 typecheck(unsigned long, flags); \
216 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
217 } while (0)
218 #else
219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
220 do { \
221 typecheck(unsigned long, flags); \
222 flags = _raw_spin_lock_irqsave(lock); \
223 } while (0)
224 #endif
225
226 #else
227
228 #define raw_spin_lock_irqsave(lock, flags) \
229 do { \
230 typecheck(unsigned long, flags); \
231 _raw_spin_lock_irqsave(lock, flags); \
232 } while (0)
233
234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
235 raw_spin_lock_irqsave(lock, flags)
236
237 #endif
238
239 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
240 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
241 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
242 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
243
244 #define raw_spin_unlock_irqrestore(lock, flags) \
245 do { \
246 typecheck(unsigned long, flags); \
247 _raw_spin_unlock_irqrestore(lock, flags); \
248 } while (0)
249 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
250
251 #define raw_spin_trylock_bh(lock) \
252 __cond_lock(lock, _raw_spin_trylock_bh(lock))
253
254 #define raw_spin_trylock_irq(lock) \
255 ({ \
256 local_irq_disable(); \
257 raw_spin_trylock(lock) ? \
258 1 : ({ local_irq_enable(); 0; }); \
259 })
260
261 #define raw_spin_trylock_irqsave(lock, flags) \
262 ({ \
263 local_irq_save(flags); \
264 raw_spin_trylock(lock) ? \
265 1 : ({ local_irq_restore(flags); 0; }); \
266 })
267
268 /**
269 * raw_spin_can_lock - would raw_spin_trylock() succeed?
270 * @lock: the spinlock in question.
271 */
272 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
273
274 /* Include rwlock functions */
275 #include <linux/rwlock.h>
276
277 /*
278 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
279 */
280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
281 # include <linux/spinlock_api_smp.h>
282 #else
283 # include <linux/spinlock_api_up.h>
284 #endif
285
286 /*
287 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
288 */
289
290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
291 {
292 return &lock->rlock;
293 }
294
295 #define spin_lock_init(_lock) \
296 do { \
297 spinlock_check(_lock); \
298 raw_spin_lock_init(&(_lock)->rlock); \
299 } while (0)
300
301 static inline void spin_lock(spinlock_t *lock)
302 {
303 raw_spin_lock(&lock->rlock);
304 }
305
306 static inline void spin_lock_bh(spinlock_t *lock)
307 {
308 raw_spin_lock_bh(&lock->rlock);
309 }
310
311 static inline int spin_trylock(spinlock_t *lock)
312 {
313 return raw_spin_trylock(&lock->rlock);
314 }
315
316 #define spin_lock_nested(lock, subclass) \
317 do { \
318 raw_spin_lock_nested(spinlock_check(lock), subclass); \
319 } while (0)
320
321 #define spin_lock_nest_lock(lock, nest_lock) \
322 do { \
323 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
324 } while (0)
325
326 static inline void spin_lock_irq(spinlock_t *lock)
327 {
328 raw_spin_lock_irq(&lock->rlock);
329 }
330
331 #define spin_lock_irqsave(lock, flags) \
332 do { \
333 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
334 } while (0)
335
336 #define spin_lock_irqsave_nested(lock, flags, subclass) \
337 do { \
338 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
339 } while (0)
340
341 static inline void spin_unlock(spinlock_t *lock)
342 {
343 raw_spin_unlock(&lock->rlock);
344 }
345
346 static inline void spin_unlock_bh(spinlock_t *lock)
347 {
348 raw_spin_unlock_bh(&lock->rlock);
349 }
350
351 static inline void spin_unlock_irq(spinlock_t *lock)
352 {
353 raw_spin_unlock_irq(&lock->rlock);
354 }
355
356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
357 {
358 raw_spin_unlock_irqrestore(&lock->rlock, flags);
359 }
360
361 static inline int spin_trylock_bh(spinlock_t *lock)
362 {
363 return raw_spin_trylock_bh(&lock->rlock);
364 }
365
366 static inline int spin_trylock_irq(spinlock_t *lock)
367 {
368 return raw_spin_trylock_irq(&lock->rlock);
369 }
370
371 #define spin_trylock_irqsave(lock, flags) \
372 ({ \
373 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
374 })
375
376 static inline void spin_unlock_wait(spinlock_t *lock)
377 {
378 raw_spin_unlock_wait(&lock->rlock);
379 }
380
381 static inline int spin_is_locked(spinlock_t *lock)
382 {
383 return raw_spin_is_locked(&lock->rlock);
384 }
385
386 static inline int spin_is_contended(spinlock_t *lock)
387 {
388 return raw_spin_is_contended(&lock->rlock);
389 }
390
391 static inline int spin_can_lock(spinlock_t *lock)
392 {
393 return raw_spin_can_lock(&lock->rlock);
394 }
395
396 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
397
398 /*
399 * Pull the atomic_t declaration:
400 * (asm-mips/atomic.h needs above definitions)
401 */
402 #include <linux/atomic.h>
403 /**
404 * atomic_dec_and_lock - lock on reaching reference count zero
405 * @atomic: the atomic counter
406 * @lock: the spinlock in question
407 *
408 * Decrements @atomic by 1. If the result is 0, returns true and locks
409 * @lock. Returns false for all other cases.
410 */
411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
412 #define atomic_dec_and_lock(atomic, lock) \
413 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
414
415 #endif /* __LINUX_SPINLOCK_H */ 1 #ifndef __LINUX_USB_H
2 #define __LINUX_USB_H
3
4 #include <linux/mod_devicetable.h>
5 #include <linux/usb/ch9.h>
6
7 #define USB_MAJOR 180
8 #define USB_DEVICE_MAJOR 189
9
10
11 #ifdef __KERNEL__
12
13 #include <linux/errno.h> /* for -ENODEV */
14 #include <linux/delay.h> /* for mdelay() */
15 #include <linux/interrupt.h> /* for in_interrupt() */
16 #include <linux/list.h> /* for struct list_head */
17 #include <linux/kref.h> /* for struct kref */
18 #include <linux/device.h> /* for struct device */
19 #include <linux/fs.h> /* for struct file_operations */
20 #include <linux/completion.h> /* for struct completion */
21 #include <linux/sched.h> /* for current && schedule_timeout */
22 #include <linux/mutex.h> /* for struct mutex */
23 #include <linux/pm_runtime.h> /* for runtime PM */
24
25 struct usb_device;
26 struct usb_driver;
27 struct wusb_dev;
28
29 /*-------------------------------------------------------------------------*/
30
31 /*
32 * Host-side wrappers for standard USB descriptors ... these are parsed
33 * from the data provided by devices. Parsing turns them from a flat
34 * sequence of descriptors into a hierarchy:
35 *
36 * - devices have one (usually) or more configs;
37 * - configs have one (often) or more interfaces;
38 * - interfaces have one (usually) or more settings;
39 * - each interface setting has zero or (usually) more endpoints.
40 * - a SuperSpeed endpoint has a companion descriptor
41 *
42 * And there might be other descriptors mixed in with those.
43 *
44 * Devices may also have class-specific or vendor-specific descriptors.
45 */
46
47 struct ep_device;
48
49 /**
50 * struct usb_host_endpoint - host-side endpoint descriptor and queue
51 * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
52 * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
53 * @urb_list: urbs queued to this endpoint; maintained by usbcore
54 * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
55 * with one or more transfer descriptors (TDs) per urb
56 * @ep_dev: ep_device for sysfs info
57 * @extra: descriptors following this endpoint in the configuration
58 * @extralen: how many bytes of "extra" are valid
59 * @enabled: URBs may be submitted to this endpoint
60 * @streams: number of USB-3 streams allocated on the endpoint
61 *
62 * USB requests are always queued to a given endpoint, identified by a
63 * descriptor within an active interface in a given USB configuration.
64 */
65 struct usb_host_endpoint {
66 struct usb_endpoint_descriptor desc;
67 struct usb_ss_ep_comp_descriptor ss_ep_comp;
68 struct list_head urb_list;
69 void *hcpriv;
70 struct ep_device *ep_dev; /* For sysfs info */
71
72 unsigned char *extra; /* Extra descriptors */
73 int extralen;
74 int enabled;
75 int streams;
76 };
77
78 /* host-side wrapper for one interface setting's parsed descriptors */
79 struct usb_host_interface {
80 struct usb_interface_descriptor desc;
81
82 int extralen;
83 unsigned char *extra; /* Extra descriptors */
84
85 /* array of desc.bNumEndpoint endpoints associated with this
86 * interface setting. these will be in no particular order.
87 */
88 struct usb_host_endpoint *endpoint;
89
90 char *string; /* iInterface string, if present */
91 };
92
93 enum usb_interface_condition {
94 USB_INTERFACE_UNBOUND = 0,
95 USB_INTERFACE_BINDING,
96 USB_INTERFACE_BOUND,
97 USB_INTERFACE_UNBINDING,
98 };
99
100 /**
101 * struct usb_interface - what usb device drivers talk to
102 * @altsetting: array of interface structures, one for each alternate
103 * setting that may be selected. Each one includes a set of
104 * endpoint configurations. They will be in no particular order.
105 * @cur_altsetting: the current altsetting.
106 * @num_altsetting: number of altsettings defined.
107 * @intf_assoc: interface association descriptor
108 * @minor: the minor number assigned to this interface, if this
109 * interface is bound to a driver that uses the USB major number.
110 * If this interface does not use the USB major, this field should
111 * be unused. The driver should set this value in the probe()
112 * function of the driver, after it has been assigned a minor
113 * number from the USB core by calling usb_register_dev().
114 * @condition: binding state of the interface: not bound, binding
115 * (in probe()), bound to a driver, or unbinding (in disconnect())
116 * @sysfs_files_created: sysfs attributes exist
117 * @ep_devs_created: endpoint child pseudo-devices exist
118 * @unregistering: flag set when the interface is being unregistered
119 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
120 * capability during autosuspend.
121 * @needs_altsetting0: flag set when a set-interface request for altsetting 0
122 * has been deferred.
123 * @needs_binding: flag set when the driver should be re-probed or unbound
124 * following a reset or suspend operation it doesn't support.
125 * @dev: driver model's view of this device
126 * @usb_dev: if an interface is bound to the USB major, this will point
127 * to the sysfs representation for that device.
128 * @pm_usage_cnt: PM usage counter for this interface
129 * @reset_ws: Used for scheduling resets from atomic context.
130 * @reset_running: set to 1 if the interface is currently running a
131 * queued reset so that usb_cancel_queued_reset() doesn't try to
132 * remove from the workqueue when running inside the worker
133 * thread. See __usb_queue_reset_device().
134 * @resetting_device: USB core reset the device, so use alt setting 0 as
135 * current; needs bandwidth alloc after reset.
136 *
137 * USB device drivers attach to interfaces on a physical device. Each
138 * interface encapsulates a single high level function, such as feeding
139 * an audio stream to a speaker or reporting a change in a volume control.
140 * Many USB devices only have one interface. The protocol used to talk to
141 * an interface's endpoints can be defined in a usb "class" specification,
142 * or by a product's vendor. The (default) control endpoint is part of
143 * every interface, but is never listed among the interface's descriptors.
144 *
145 * The driver that is bound to the interface can use standard driver model
146 * calls such as dev_get_drvdata() on the dev member of this structure.
147 *
148 * Each interface may have alternate settings. The initial configuration
149 * of a device sets altsetting 0, but the device driver can change
150 * that setting using usb_set_interface(). Alternate settings are often
151 * used to control the use of periodic endpoints, such as by having
152 * different endpoints use different amounts of reserved USB bandwidth.
153 * All standards-conformant USB devices that use isochronous endpoints
154 * will use them in non-default settings.
155 *
156 * The USB specification says that alternate setting numbers must run from
157 * 0 to one less than the total number of alternate settings. But some
158 * devices manage to mess this up, and the structures aren't necessarily
159 * stored in numerical order anyhow. Use usb_altnum_to_altsetting() to
160 * look up an alternate setting in the altsetting array based on its number.
161 */
162 struct usb_interface {
163 /* array of alternate settings for this interface,
164 * stored in no particular order */
165 struct usb_host_interface *altsetting;
166
167 struct usb_host_interface *cur_altsetting; /* the currently
168 * active alternate setting */
169 unsigned num_altsetting; /* number of alternate settings */
170
171 /* If there is an interface association descriptor then it will list
172 * the associated interfaces */
173 struct usb_interface_assoc_descriptor *intf_assoc;
174
175 int minor; /* minor number this interface is
176 * bound to */
177 enum usb_interface_condition condition; /* state of binding */
178 unsigned sysfs_files_created:1; /* the sysfs attributes exist */
179 unsigned ep_devs_created:1; /* endpoint "devices" exist */
180 unsigned unregistering:1; /* unregistration is in progress */
181 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
182 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
183 unsigned needs_binding:1; /* needs delayed unbind/rebind */
184 unsigned reset_running:1;
185 unsigned resetting_device:1; /* true: bandwidth alloc after reset */
186
187 struct device dev; /* interface specific device info */
188 struct device *usb_dev;
189 atomic_t pm_usage_cnt; /* usage counter for autosuspend */
190 struct work_struct reset_ws; /* for resets in atomic context */
191 };
192 #define to_usb_interface(d) container_of(d, struct usb_interface, dev)
193
194 static inline void *usb_get_intfdata(struct usb_interface *intf)
195 {
196 return dev_get_drvdata(&intf->dev);
197 }
198
199 static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
200 {
201 dev_set_drvdata(&intf->dev, data);
202 }
203
204 struct usb_interface *usb_get_intf(struct usb_interface *intf);
205 void usb_put_intf(struct usb_interface *intf);
206
207 /* Hard limit */
208 #define USB_MAXENDPOINTS 30
209 /* this maximum is arbitrary */
210 #define USB_MAXINTERFACES 32
211 #define USB_MAXIADS (USB_MAXINTERFACES/2)
212
213 /**
214 * struct usb_interface_cache - long-term representation of a device interface
215 * @num_altsetting: number of altsettings defined.
216 * @ref: reference counter.
217 * @altsetting: variable-length array of interface structures, one for
218 * each alternate setting that may be selected. Each one includes a
219 * set of endpoint configurations. They will be in no particular order.
220 *
221 * These structures persist for the lifetime of a usb_device, unlike
222 * struct usb_interface (which persists only as long as its configuration
223 * is installed). The altsetting arrays can be accessed through these
224 * structures at any time, permitting comparison of configurations and
225 * providing support for the /proc/bus/usb/devices pseudo-file.
226 */
227 struct usb_interface_cache {
228 unsigned num_altsetting; /* number of alternate settings */
229 struct kref ref; /* reference counter */
230
231 /* variable-length array of alternate settings for this interface,
232 * stored in no particular order */
233 struct usb_host_interface altsetting[0];
234 };
235 #define ref_to_usb_interface_cache(r) \
236 container_of(r, struct usb_interface_cache, ref)
237 #define altsetting_to_usb_interface_cache(a) \
238 container_of(a, struct usb_interface_cache, altsetting[0])
239
240 /**
241 * struct usb_host_config - representation of a device's configuration
242 * @desc: the device's configuration descriptor.
243 * @string: pointer to the cached version of the iConfiguration string, if
244 * present for this configuration.
245 * @intf_assoc: list of any interface association descriptors in this config
246 * @interface: array of pointers to usb_interface structures, one for each
247 * interface in the configuration. The number of interfaces is stored
248 * in desc.bNumInterfaces. These pointers are valid only while the
249 * the configuration is active.
250 * @intf_cache: array of pointers to usb_interface_cache structures, one
251 * for each interface in the configuration. These structures exist
252 * for the entire life of the device.
253 * @extra: pointer to buffer containing all extra descriptors associated
254 * with this configuration (those preceding the first interface
255 * descriptor).
256 * @extralen: length of the extra descriptors buffer.
257 *
258 * USB devices may have multiple configurations, but only one can be active
259 * at any time. Each encapsulates a different operational environment;
260 * for example, a dual-speed device would have separate configurations for
261 * full-speed and high-speed operation. The number of configurations
262 * available is stored in the device descriptor as bNumConfigurations.
263 *
264 * A configuration can contain multiple interfaces. Each corresponds to
265 * a different function of the USB device, and all are available whenever
266 * the configuration is active. The USB standard says that interfaces
267 * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot
268 * of devices get this wrong. In addition, the interface array is not
269 * guaranteed to be sorted in numerical order. Use usb_ifnum_to_if() to
270 * look up an interface entry based on its number.
271 *
272 * Device drivers should not attempt to activate configurations. The choice
273 * of which configuration to install is a policy decision based on such
274 * considerations as available power, functionality provided, and the user's
275 * desires (expressed through userspace tools). However, drivers can call
276 * usb_reset_configuration() to reinitialize the current configuration and
277 * all its interfaces.
278 */
279 struct usb_host_config {
280 struct usb_config_descriptor desc;
281
282 char *string; /* iConfiguration string, if present */
283
284 /* List of any Interface Association Descriptors in this
285 * configuration. */
286 struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS];
287
288 /* the interfaces associated with this configuration,
289 * stored in no particular order */
290 struct usb_interface *interface[USB_MAXINTERFACES];
291
292 /* Interface information available even when this is not the
293 * active configuration */
294 struct usb_interface_cache *intf_cache[USB_MAXINTERFACES];
295
296 unsigned char *extra; /* Extra descriptors */
297 int extralen;
298 };
299
300 /* USB2.0 and USB3.0 device BOS descriptor set */
301 struct usb_host_bos {
302 struct usb_bos_descriptor *desc;
303
304 /* wireless cap descriptor is handled by wusb */
305 struct usb_ext_cap_descriptor *ext_cap;
306 struct usb_ss_cap_descriptor *ss_cap;
307 struct usb_ss_container_id_descriptor *ss_id;
308 };
309
310 int __usb_get_extra_descriptor(char *buffer, unsigned size,
311 unsigned char type, void **ptr);
312 #define usb_get_extra_descriptor(ifpoint, type, ptr) \
313 __usb_get_extra_descriptor((ifpoint)->extra, \
314 (ifpoint)->extralen, \
315 type, (void **)ptr)
316
317 /* ----------------------------------------------------------------------- */
318
319 /* USB device number allocation bitmap */
320 struct usb_devmap {
321 unsigned long devicemap[128 / (8*sizeof(unsigned long))];
322 };
323
324 /*
325 * Allocated per bus (tree of devices) we have:
326 */
327 struct usb_bus {
328 struct device *controller; /* host/master side hardware */
329 int busnum; /* Bus number (in order of reg) */
330 const char *bus_name; /* stable id (PCI slot_name etc) */
331 u8 uses_dma; /* Does the host controller use DMA? */
332 u8 uses_pio_for_control; /*
333 * Does the host controller use PIO
334 * for control transfers?
335 */
336 u8 otg_port; /* 0, or number of OTG/HNP port */
337 unsigned is_b_host:1; /* true during some HNP roleswitches */
338 unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
339 unsigned no_stop_on_short:1; /*
340 * Quirk: some controllers don't stop
341 * the ep queue on a short transfer
342 * with the URB_SHORT_NOT_OK flag set.
343 */
344 unsigned no_sg_constraint:1; /* no sg constraint */
345 unsigned sg_tablesize; /* 0 or largest number of sg list entries */
346
347 int devnum_next; /* Next open device number in
348 * round-robin allocation */
349
350 struct usb_devmap devmap; /* device address allocation map */
351 struct usb_device *root_hub; /* Root hub */
352 struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
353 struct list_head bus_list; /* list of busses */
354
355 struct mutex usb_address0_mutex; /* unaddressed device mutex */
356
357 int bandwidth_allocated; /* on this bus: how much of the time
358 * reserved for periodic (intr/iso)
359 * requests is used, on average?
360 * Units: microseconds/frame.
361 * Limits: Full/low speed reserve 90%,
362 * while high speed reserves 80%.
363 */
364 int bandwidth_int_reqs; /* number of Interrupt requests */
365 int bandwidth_isoc_reqs; /* number of Isoc. requests */
366
367 unsigned resuming_ports; /* bit array: resuming root-hub ports */
368
369 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
370 struct mon_bus *mon_bus; /* non-null when associated */
371 int monitored; /* non-zero when monitored */
372 #endif
373 };
374
375 struct usb_dev_state;
376
377 /* ----------------------------------------------------------------------- */
378
379 struct usb_tt;
380
381 enum usb_device_removable {
382 USB_DEVICE_REMOVABLE_UNKNOWN = 0,
383 USB_DEVICE_REMOVABLE,
384 USB_DEVICE_FIXED,
385 };
386
387 enum usb_port_connect_type {
388 USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
389 USB_PORT_CONNECT_TYPE_HOT_PLUG,
390 USB_PORT_CONNECT_TYPE_HARD_WIRED,
391 USB_PORT_NOT_USED,
392 };
393
394 /*
395 * USB 2.0 Link Power Management (LPM) parameters.
396 */
397 struct usb2_lpm_parameters {
398 /* Best effort service latency indicate how long the host will drive
399 * resume on an exit from L1.
400 */
401 unsigned int besl;
402
403 /* Timeout value in microseconds for the L1 inactivity (LPM) timer.
404 * When the timer counts to zero, the parent hub will initiate a LPM
405 * transition to L1.
406 */
407 int timeout;
408 };
409
410 /*
411 * USB 3.0 Link Power Management (LPM) parameters.
412 *
413 * PEL and SEL are USB 3.0 Link PM latencies for device-initiated LPM exit.
414 * MEL is the USB 3.0 Link PM latency for host-initiated LPM exit.
415 * All three are stored in nanoseconds.
416 */
417 struct usb3_lpm_parameters {
418 /*
419 * Maximum exit latency (MEL) for the host to send a packet to the
420 * device (either a Ping for isoc endpoints, or a data packet for
421 * interrupt endpoints), the hubs to decode the packet, and for all hubs
422 * in the path to transition the links to U0.
423 */
424 unsigned int mel;
425 /*
426 * Maximum exit latency for a device-initiated LPM transition to bring
427 * all links into U0. Abbreviated as "PEL" in section 9.4.12 of the USB
428 * 3.0 spec, with no explanation of what "P" stands for. "Path"?
429 */
430 unsigned int pel;
431
432 /*
433 * The System Exit Latency (SEL) includes PEL, and three other
434 * latencies. After a device initiates a U0 transition, it will take
435 * some time from when the device sends the ERDY to when it will finally
436 * receive the data packet. Basically, SEL should be the worse-case
437 * latency from when a device starts initiating a U0 transition to when
438 * it will get data.
439 */
440 unsigned int sel;
441 /*
442 * The idle timeout value that is currently programmed into the parent
443 * hub for this device. When the timer counts to zero, the parent hub
444 * will initiate an LPM transition to either U1 or U2.
445 */
446 int timeout;
447 };
448
449 /**
450 * struct usb_device - kernel's representation of a USB device
451 * @devnum: device number; address on a USB bus
452 * @devpath: device ID string for use in messages (e.g., /port/...)
453 * @route: tree topology hex string for use with xHCI
454 * @state: device state: configured, not attached, etc.
455 * @speed: device speed: high/full/low (or error)
456 * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
457 * @ttport: device port on that tt hub
458 * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
459 * @parent: our hub, unless we're the root
460 * @bus: bus we're part of
461 * @ep0: endpoint 0 data (default control pipe)
462 * @dev: generic device interface
463 * @descriptor: USB device descriptor
464 * @bos: USB device BOS descriptor set
465 * @config: all of the device's configs
466 * @actconfig: the active configuration
467 * @ep_in: array of IN endpoints
468 * @ep_out: array of OUT endpoints
469 * @rawdescriptors: raw descriptors for each config
470 * @bus_mA: Current available from the bus
471 * @portnum: parent port number (origin 1)
472 * @level: number of USB hub ancestors
473 * @can_submit: URBs may be submitted
474 * @persist_enabled: USB_PERSIST enabled for this device
475 * @have_langid: whether string_langid is valid
476 * @authorized: policy has said we can use it;
477 * (user space) policy determines if we authorize this device to be
478 * used or not. By default, wired USB devices are authorized.
479 * WUSB devices are not, until we authorize them from user space.
480 * FIXME -- complete doc
481 * @authenticated: Crypto authentication passed
482 * @wusb: device is Wireless USB
483 * @lpm_capable: device supports LPM
484 * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
485 * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
486 * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
487 * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
488 * @usb3_lpm_enabled: USB3 hardware LPM enabled
489 * @string_langid: language ID for strings
490 * @product: iProduct string, if present (static)
491 * @manufacturer: iManufacturer string, if present (static)
492 * @serial: iSerialNumber string, if present (static)
493 * @filelist: usbfs files that are open to this device
494 * @maxchild: number of ports if hub
495 * @quirks: quirks of the whole device
496 * @urbnum: number of URBs submitted for the whole device
497 * @active_duration: total time device is not suspended
498 * @connect_time: time device was first connected
499 * @do_remote_wakeup: remote wakeup should be enabled
500 * @reset_resume: needs reset instead of resume
501 * @port_is_suspended: the upstream port is suspended (L2 or U3)
502 * @wusb_dev: if this is a Wireless USB device, link to the WUSB
503 * specific data for the device.
504 * @slot_id: Slot ID assigned by xHCI
505 * @removable: Device can be physically removed from this port
506 * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
507 * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
508 * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
509 * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm()
510 * to keep track of the number of functions that require USB 3.0 Link Power
511 * Management to be disabled for this usb_device. This count should only
512 * be manipulated by those functions, with the bandwidth_mutex is held.
513 *
514 * Notes:
515 * Usbcore drivers should not set usbdev->state directly. Instead use
516 * usb_set_device_state().
517 */
518 struct usb_device {
519 int devnum;
520 char devpath[16];
521 u32 route;
522 enum usb_device_state state;
523 enum usb_device_speed speed;
524
525 struct usb_tt *tt;
526 int ttport;
527
528 unsigned int toggle[2];
529
530 struct usb_device *parent;
531 struct usb_bus *bus;
532 struct usb_host_endpoint ep0;
533
534 struct device dev;
535
536 struct usb_device_descriptor descriptor;
537 struct usb_host_bos *bos;
538 struct usb_host_config *config;
539
540 struct usb_host_config *actconfig;
541 struct usb_host_endpoint *ep_in[16];
542 struct usb_host_endpoint *ep_out[16];
543
544 char **rawdescriptors;
545
546 unsigned short bus_mA;
547 u8 portnum;
548 u8 level;
549
550 unsigned can_submit:1;
551 unsigned persist_enabled:1;
552 unsigned have_langid:1;
553 unsigned authorized:1;
554 unsigned authenticated:1;
555 unsigned wusb:1;
556 unsigned lpm_capable:1;
557 unsigned usb2_hw_lpm_capable:1;
558 unsigned usb2_hw_lpm_besl_capable:1;
559 unsigned usb2_hw_lpm_enabled:1;
560 unsigned usb2_hw_lpm_allowed:1;
561 unsigned usb3_lpm_enabled:1;
562 int string_langid;
563
564 /* static strings from the device */
565 char *product;
566 char *manufacturer;
567 char *serial;
568
569 struct list_head filelist;
570
571 int maxchild;
572
573 u32 quirks;
574 atomic_t urbnum;
575
576 unsigned long active_duration;
577
578 #ifdef CONFIG_PM
579 unsigned long connect_time;
580
581 unsigned do_remote_wakeup:1;
582 unsigned reset_resume:1;
583 unsigned port_is_suspended:1;
584 #endif
585 struct wusb_dev *wusb_dev;
586 int slot_id;
587 enum usb_device_removable removable;
588 struct usb2_lpm_parameters l1_params;
589 struct usb3_lpm_parameters u1_params;
590 struct usb3_lpm_parameters u2_params;
591 unsigned lpm_disable_count;
592 };
593 #define to_usb_device(d) container_of(d, struct usb_device, dev)
594
595 static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf)
596 {
597 return to_usb_device(intf->dev.parent);
598 }
599
600 extern struct usb_device *usb_get_dev(struct usb_device *dev);
601 extern void usb_put_dev(struct usb_device *dev);
602 extern struct usb_device *usb_hub_find_child(struct usb_device *hdev,
603 int port1);
604
605 /**
606 * usb_hub_for_each_child - iterate over all child devices on the hub
607 * @hdev: USB device belonging to the usb hub
608 * @port1: portnum associated with child device
609 * @child: child device pointer
610 */
611 #define usb_hub_for_each_child(hdev, port1, child) \
612 for (port1 = 1, child = usb_hub_find_child(hdev, port1); \
613 port1 <= hdev->maxchild; \
614 child = usb_hub_find_child(hdev, ++port1)) \
615 if (!child) continue; else
616
617 /* USB device locking */
618 #define usb_lock_device(udev) device_lock(&(udev)->dev)
619 #define usb_unlock_device(udev) device_unlock(&(udev)->dev)
620 #define usb_trylock_device(udev) device_trylock(&(udev)->dev)
621 extern int usb_lock_device_for_reset(struct usb_device *udev,
622 const struct usb_interface *iface);
623
624 /* USB port reset for device reinitialization */
625 extern int usb_reset_device(struct usb_device *dev);
626 extern void usb_queue_reset_device(struct usb_interface *dev);
627
628 #ifdef CONFIG_ACPI
629 extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
630 bool enable);
631 extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
632 #else
633 static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
634 bool enable) { return 0; }
635 static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
636 { return true; }
637 #endif
638
639 /* USB autosuspend and autoresume */
640 #ifdef CONFIG_PM_RUNTIME
641 extern void usb_enable_autosuspend(struct usb_device *udev);
642 extern void usb_disable_autosuspend(struct usb_device *udev);
643
644 extern int usb_autopm_get_interface(struct usb_interface *intf);
645 extern void usb_autopm_put_interface(struct usb_interface *intf);
646 extern int usb_autopm_get_interface_async(struct usb_interface *intf);
647 extern void usb_autopm_put_interface_async(struct usb_interface *intf);
648 extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
649 extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
650
651 static inline void usb_mark_last_busy(struct usb_device *udev)
652 {
653 pm_runtime_mark_last_busy(&udev->dev);
654 }
655
656 #else
657
658 static inline int usb_enable_autosuspend(struct usb_device *udev)
659 { return 0; }
660 static inline int usb_disable_autosuspend(struct usb_device *udev)
661 { return 0; }
662
663 static inline int usb_autopm_get_interface(struct usb_interface *intf)
664 { return 0; }
665 static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
666 { return 0; }
667
668 static inline void usb_autopm_put_interface(struct usb_interface *intf)
669 { }
670 static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
671 { }
672 static inline void usb_autopm_get_interface_no_resume(
673 struct usb_interface *intf)
674 { }
675 static inline void usb_autopm_put_interface_no_suspend(
676 struct usb_interface *intf)
677 { }
678 static inline void usb_mark_last_busy(struct usb_device *udev)
679 { }
680 #endif
681
682 extern int usb_disable_lpm(struct usb_device *udev);
683 extern void usb_enable_lpm(struct usb_device *udev);
684 /* Same as above, but these functions lock/unlock the bandwidth_mutex. */
685 extern int usb_unlocked_disable_lpm(struct usb_device *udev);
686 extern void usb_unlocked_enable_lpm(struct usb_device *udev);
687
688 extern int usb_disable_ltm(struct usb_device *udev);
689 extern void usb_enable_ltm(struct usb_device *udev);
690
691 static inline bool usb_device_supports_ltm(struct usb_device *udev)
692 {
693 if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap)
694 return false;
695 return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT;
696 }
697
698 static inline bool usb_device_no_sg_constraint(struct usb_device *udev)
699 {
700 return udev && udev->bus && udev->bus->no_sg_constraint;
701 }
702
703
704 /*-------------------------------------------------------------------------*/
705
706 /* for drivers using iso endpoints */
707 extern int usb_get_current_frame_number(struct usb_device *usb_dev);
708
709 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
710 extern int usb_alloc_streams(struct usb_interface *interface,
711 struct usb_host_endpoint **eps, unsigned int num_eps,
712 unsigned int num_streams, gfp_t mem_flags);
713
714 /* Reverts a group of bulk endpoints back to not using stream IDs. */
715 extern int usb_free_streams(struct usb_interface *interface,
716 struct usb_host_endpoint **eps, unsigned int num_eps,
717 gfp_t mem_flags);
718
719 /* used these for multi-interface device registration */
720 extern int usb_driver_claim_interface(struct usb_driver *driver,
721 struct usb_interface *iface, void *priv);
722
723 /**
724 * usb_interface_claimed - returns true iff an interface is claimed
725 * @iface: the interface being checked
726 *
727 * Return: %true (nonzero) iff the interface is claimed, else %false
728 * (zero).
729 *
730 * Note:
731 * Callers must own the driver model's usb bus readlock. So driver
732 * probe() entries don't need extra locking, but other call contexts
733 * may need to explicitly claim that lock.
734 *
735 */
736 static inline int usb_interface_claimed(struct usb_interface *iface)
737 {
738 return (iface->dev.driver != NULL);
739 }
740
741 extern void usb_driver_release_interface(struct usb_driver *driver,
742 struct usb_interface *iface);
743 const struct usb_device_id *usb_match_id(struct usb_interface *interface,
744 const struct usb_device_id *id);
745 extern int usb_match_one_id(struct usb_interface *interface,
746 const struct usb_device_id *id);
747
748 extern int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *));
749 extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
750 int minor);
751 extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
752 unsigned ifnum);
753 extern struct usb_host_interface *usb_altnum_to_altsetting(
754 const struct usb_interface *intf, unsigned int altnum);
755 extern struct usb_host_interface *usb_find_alt_setting(
756 struct usb_host_config *config,
757 unsigned int iface_num,
758 unsigned int alt_num);
759
760 /* port claiming functions */
761 int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
762 struct usb_dev_state *owner);
763 int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
764 struct usb_dev_state *owner);
765
766 /**
767 * usb_make_path - returns stable device path in the usb tree
768 * @dev: the device whose path is being constructed
769 * @buf: where to put the string
770 * @size: how big is "buf"?
771 *
772 * Return: Length of the string (> 0) or negative if size was too small.
773 *
774 * Note:
775 * This identifier is intended to be "stable", reflecting physical paths in
776 * hardware such as physical bus addresses for host controllers or ports on
777 * USB hubs. That makes it stay the same until systems are physically
778 * reconfigured, by re-cabling a tree of USB devices or by moving USB host
779 * controllers. Adding and removing devices, including virtual root hubs
780 * in host controller driver modules, does not change these path identifiers;
781 * neither does rebooting or re-enumerating. These are more useful identifiers
782 * than changeable ("unstable") ones like bus numbers or device addresses.
783 *
784 * With a partial exception for devices connected to USB 2.0 root hubs, these
785 * identifiers are also predictable. So long as the device tree isn't changed,
786 * plugging any USB device into a given hub port always gives it the same path.
787 * Because of the use of "companion" controllers, devices connected to ports on
788 * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are
789 * high speed, and a different one if they are full or low speed.
790 */
791 static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
792 {
793 int actual;
794 actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name,
795 dev->devpath);
796 return (actual >= (int)size) ? -1 : actual;
797 }
798
799 /*-------------------------------------------------------------------------*/
800
801 #define USB_DEVICE_ID_MATCH_DEVICE \
802 (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
803 #define USB_DEVICE_ID_MATCH_DEV_RANGE \
804 (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI)
805 #define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
806 (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE)
807 #define USB_DEVICE_ID_MATCH_DEV_INFO \
808 (USB_DEVICE_ID_MATCH_DEV_CLASS | \
809 USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \
810 USB_DEVICE_ID_MATCH_DEV_PROTOCOL)
811 #define USB_DEVICE_ID_MATCH_INT_INFO \
812 (USB_DEVICE_ID_MATCH_INT_CLASS | \
813 USB_DEVICE_ID_MATCH_INT_SUBCLASS | \
814 USB_DEVICE_ID_MATCH_INT_PROTOCOL)
815
816 /**
817 * USB_DEVICE - macro used to describe a specific usb device
818 * @vend: the 16 bit USB Vendor ID
819 * @prod: the 16 bit USB Product ID
820 *
821 * This macro is used to create a struct usb_device_id that matches a
822 * specific device.
823 */
824 #define USB_DEVICE(vend, prod) \
825 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
826 .idVendor = (vend), \
827 .idProduct = (prod)
828 /**
829 * USB_DEVICE_VER - describe a specific usb device with a version range
830 * @vend: the 16 bit USB Vendor ID
831 * @prod: the 16 bit USB Product ID
832 * @lo: the bcdDevice_lo value
833 * @hi: the bcdDevice_hi value
834 *
835 * This macro is used to create a struct usb_device_id that matches a
836 * specific device, with a version range.
837 */
838 #define USB_DEVICE_VER(vend, prod, lo, hi) \
839 .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \
840 .idVendor = (vend), \
841 .idProduct = (prod), \
842 .bcdDevice_lo = (lo), \
843 .bcdDevice_hi = (hi)
844
845 /**
846 * USB_DEVICE_INTERFACE_CLASS - describe a usb device with a specific interface class
847 * @vend: the 16 bit USB Vendor ID
848 * @prod: the 16 bit USB Product ID
849 * @cl: bInterfaceClass value
850 *
851 * This macro is used to create a struct usb_device_id that matches a
852 * specific interface class of devices.
853 */
854 #define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \
855 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
856 USB_DEVICE_ID_MATCH_INT_CLASS, \
857 .idVendor = (vend), \
858 .idProduct = (prod), \
859 .bInterfaceClass = (cl)
860
861 /**
862 * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol
863 * @vend: the 16 bit USB Vendor ID
864 * @prod: the 16 bit USB Product ID
865 * @pr: bInterfaceProtocol value
866 *
867 * This macro is used to create a struct usb_device_id that matches a
868 * specific interface protocol of devices.
869 */
870 #define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \
871 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
872 USB_DEVICE_ID_MATCH_INT_PROTOCOL, \
873 .idVendor = (vend), \
874 .idProduct = (prod), \
875 .bInterfaceProtocol = (pr)
876
877 /**
878 * USB_DEVICE_INTERFACE_NUMBER - describe a usb device with a specific interface number
879 * @vend: the 16 bit USB Vendor ID
880 * @prod: the 16 bit USB Product ID
881 * @num: bInterfaceNumber value
882 *
883 * This macro is used to create a struct usb_device_id that matches a
884 * specific interface number of devices.
885 */
886 #define USB_DEVICE_INTERFACE_NUMBER(vend, prod, num) \
887 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
888 USB_DEVICE_ID_MATCH_INT_NUMBER, \
889 .idVendor = (vend), \
890 .idProduct = (prod), \
891 .bInterfaceNumber = (num)
892
893 /**
894 * USB_DEVICE_INFO - macro used to describe a class of usb devices
895 * @cl: bDeviceClass value
896 * @sc: bDeviceSubClass value
897 * @pr: bDeviceProtocol value
898 *
899 * This macro is used to create a struct usb_device_id that matches a
900 * specific class of devices.
901 */
902 #define USB_DEVICE_INFO(cl, sc, pr) \
903 .match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \
904 .bDeviceClass = (cl), \
905 .bDeviceSubClass = (sc), \
906 .bDeviceProtocol = (pr)
907
908 /**
909 * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces
910 * @cl: bInterfaceClass value
911 * @sc: bInterfaceSubClass value
912 * @pr: bInterfaceProtocol value
913 *
914 * This macro is used to create a struct usb_device_id that matches a
915 * specific class of interfaces.
916 */
917 #define USB_INTERFACE_INFO(cl, sc, pr) \
918 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \
919 .bInterfaceClass = (cl), \
920 .bInterfaceSubClass = (sc), \
921 .bInterfaceProtocol = (pr)
922
923 /**
924 * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces
925 * @vend: the 16 bit USB Vendor ID
926 * @prod: the 16 bit USB Product ID
927 * @cl: bInterfaceClass value
928 * @sc: bInterfaceSubClass value
929 * @pr: bInterfaceProtocol value
930 *
931 * This macro is used to create a struct usb_device_id that matches a
932 * specific device with a specific class of interfaces.
933 *
934 * This is especially useful when explicitly matching devices that have
935 * vendor specific bDeviceClass values, but standards-compliant interfaces.
936 */
937 #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \
938 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
939 | USB_DEVICE_ID_MATCH_DEVICE, \
940 .idVendor = (vend), \
941 .idProduct = (prod), \
942 .bInterfaceClass = (cl), \
943 .bInterfaceSubClass = (sc), \
944 .bInterfaceProtocol = (pr)
945
946 /**
947 * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
948 * @vend: the 16 bit USB Vendor ID
949 * @cl: bInterfaceClass value
950 * @sc: bInterfaceSubClass value
951 * @pr: bInterfaceProtocol value
952 *
953 * This macro is used to create a struct usb_device_id that matches a
954 * specific vendor with a specific class of interfaces.
955 *
956 * This is especially useful when explicitly matching devices that have
957 * vendor specific bDeviceClass values, but standards-compliant interfaces.
958 */
959 #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
960 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
961 | USB_DEVICE_ID_MATCH_VENDOR, \
962 .idVendor = (vend), \
963 .bInterfaceClass = (cl), \
964 .bInterfaceSubClass = (sc), \
965 .bInterfaceProtocol = (pr)
966
967 /* ----------------------------------------------------------------------- */
968
969 /* Stuff for dynamic usb ids */
970 struct usb_dynids {
971 spinlock_t lock;
972 struct list_head list;
973 };
974
975 struct usb_dynid {
976 struct list_head node;
977 struct usb_device_id id;
978 };
979
980 extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
981 const struct usb_device_id *id_table,
982 struct device_driver *driver,
983 const char *buf, size_t count);
984
985 extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
986
987 /**
988 * struct usbdrv_wrap - wrapper for driver-model structure
989 * @driver: The driver-model core driver structure.
990 * @for_devices: Non-zero for device drivers, 0 for interface drivers.
991 */
992 struct usbdrv_wrap {
993 struct device_driver driver;
994 int for_devices;
995 };
996
997 /**
998 * struct usb_driver - identifies USB interface driver to usbcore
999 * @name: The driver name should be unique among USB drivers,
1000 * and should normally be the same as the module name.
1001 * @probe: Called to see if the driver is willing to manage a particular
1002 * interface on a device. If it is, probe returns zero and uses
1003 * usb_set_intfdata() to associate driver-specific data with the
1004 * interface. It may also use usb_set_interface() to specify the
1005 * appropriate altsetting. If unwilling to manage the interface,
1006 * return -ENODEV, if genuine IO errors occurred, an appropriate
1007 * negative errno value.
1008 * @disconnect: Called when the interface is no longer accessible, usually
1009 * because its device has been (or is being) disconnected or the
1010 * driver module is being unloaded.
1011 * @unlocked_ioctl: Used for drivers that want to talk to userspace through
1012 * the "usbfs" filesystem. This lets devices provide ways to
1013 * expose information to user space regardless of where they
1014 * do (or don't) show up otherwise in the filesystem.
1015 * @suspend: Called when the device is going to be suspended by the
1016 * system either from system sleep or runtime suspend context. The
1017 * return value will be ignored in system sleep context, so do NOT
1018 * try to continue using the device if suspend fails in this case.
1019 * Instead, let the resume or reset-resume routine recover from
1020 * the failure.
1021 * @resume: Called when the device is being resumed by the system.
1022 * @reset_resume: Called when the suspended device has been reset instead
1023 * of being resumed.
1024 * @pre_reset: Called by usb_reset_device() when the device is about to be
1025 * reset. This routine must not return until the driver has no active
1026 * URBs for the device, and no more URBs may be submitted until the
1027 * post_reset method is called.
1028 * @post_reset: Called by usb_reset_device() after the device
1029 * has been reset
1030 * @id_table: USB drivers use ID table to support hotplugging.
1031 * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set
1032 * or your driver's probe function will never get called.
1033 * @dynids: used internally to hold the list of dynamically added device
1034 * ids for this driver.
1035 * @drvwrap: Driver-model core structure wrapper.
1036 * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
1037 * added to this driver by preventing the sysfs file from being created.
1038 * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
1039 * for interfaces bound to this driver.
1040 * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
1041 * endpoints before calling the driver's disconnect method.
1042 * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
1043 * to initiate lower power link state transitions when an idle timeout
1044 * occurs. Device-initiated USB 3.0 link PM will still be allowed.
1045 *
1046 * USB interface drivers must provide a name, probe() and disconnect()
1047 * methods, and an id_table. Other driver fields are optional.
1048 *
1049 * The id_table is used in hotplugging. It holds a set of descriptors,
1050 * and specialized data may be associated with each entry. That table
1051 * is used by both user and kernel mode hotplugging support.
1052 *
1053 * The probe() and disconnect() methods are called in a context where
1054 * they can sleep, but they should avoid abusing the privilege. Most
1055 * work to connect to a device should be done when the device is opened,
1056 * and undone at the last close. The disconnect code needs to address
1057 * concurrency issues with respect to open() and close() methods, as
1058 * well as forcing all pending I/O requests to complete (by unlinking
1059 * them as necessary, and blocking until the unlinks complete).
1060 */
1061 struct usb_driver {
1062 const char *name;
1063
1064 int (*probe) (struct usb_interface *intf,
1065 const struct usb_device_id *id);
1066
1067 void (*disconnect) (struct usb_interface *intf);
1068
1069 int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code,
1070 void *buf);
1071
1072 int (*suspend) (struct usb_interface *intf, pm_message_t message);
1073 int (*resume) (struct usb_interface *intf);
1074 int (*reset_resume)(struct usb_interface *intf);
1075
1076 int (*pre_reset)(struct usb_interface *intf);
1077 int (*post_reset)(struct usb_interface *intf);
1078
1079 const struct usb_device_id *id_table;
1080
1081 struct usb_dynids dynids;
1082 struct usbdrv_wrap drvwrap;
1083 unsigned int no_dynamic_id:1;
1084 unsigned int supports_autosuspend:1;
1085 unsigned int disable_hub_initiated_lpm:1;
1086 unsigned int soft_unbind:1;
1087 };
1088 #define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
1089
1090 /**
1091 * struct usb_device_driver - identifies USB device driver to usbcore
1092 * @name: The driver name should be unique among USB drivers,
1093 * and should normally be the same as the module name.
1094 * @probe: Called to see if the driver is willing to manage a particular
1095 * device. If it is, probe returns zero and uses dev_set_drvdata()
1096 * to associate driver-specific data with the device. If unwilling
1097 * to manage the device, return a negative errno value.
1098 * @disconnect: Called when the device is no longer accessible, usually
1099 * because it has been (or is being) disconnected or the driver's
1100 * module is being unloaded.
1101 * @suspend: Called when the device is going to be suspended by the system.
1102 * @resume: Called when the device is being resumed by the system.
1103 * @drvwrap: Driver-model core structure wrapper.
1104 * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
1105 * for devices bound to this driver.
1106 *
1107 * USB drivers must provide all the fields listed above except drvwrap.
1108 */
1109 struct usb_device_driver {
1110 const char *name;
1111
1112 int (*probe) (struct usb_device *udev);
1113 void (*disconnect) (struct usb_device *udev);
1114
1115 int (*suspend) (struct usb_device *udev, pm_message_t message);
1116 int (*resume) (struct usb_device *udev, pm_message_t message);
1117 struct usbdrv_wrap drvwrap;
1118 unsigned int supports_autosuspend:1;
1119 };
1120 #define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
1121 drvwrap.driver)
1122
1123 extern struct bus_type usb_bus_type;
1124
1125 /**
1126 * struct usb_class_driver - identifies a USB driver that wants to use the USB major number
1127 * @name: the usb class device name for this driver. Will show up in sysfs.
1128 * @devnode: Callback to provide a naming hint for a possible
1129 * device node to create.
1130 * @fops: pointer to the struct file_operations of this driver.
1131 * @minor_base: the start of the minor range for this driver.
1132 *
1133 * This structure is used for the usb_register_dev() and
1134 * usb_unregister_dev() functions, to consolidate a number of the
1135 * parameters used for them.
1136 */
1137 struct usb_class_driver {
1138 char *name;
1139 char *(*devnode)(struct device *dev, umode_t *mode);
1140 const struct file_operations *fops;
1141 int minor_base;
1142 };
1143
1144 /*
1145 * use these in module_init()/module_exit()
1146 * and don't forget MODULE_DEVICE_TABLE(usb, ...)
1147 */
1148 extern int usb_register_driver(struct usb_driver *, struct module *,
1149 const char *);
1150
1151 /* use a define to avoid include chaining to get THIS_MODULE & friends */
1152 #define usb_register(driver) \
1153 usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1154
1155 extern void usb_deregister(struct usb_driver *);
1156
1157 /**
1158 * module_usb_driver() - Helper macro for registering a USB driver
1159 * @__usb_driver: usb_driver struct
1160 *
1161 * Helper macro for USB drivers which do not do anything special in module
1162 * init/exit. This eliminates a lot of boilerplate. Each module may only
1163 * use this macro once, and calling it replaces module_init() and module_exit()
1164 */
1165 #define module_usb_driver(__usb_driver) \
1166 module_driver(__usb_driver, usb_register, \
1167 usb_deregister)
1168
1169 extern int usb_register_device_driver(struct usb_device_driver *,
1170 struct module *);
1171 extern void usb_deregister_device_driver(struct usb_device_driver *);
1172
1173 extern int usb_register_dev(struct usb_interface *intf,
1174 struct usb_class_driver *class_driver);
1175 extern void usb_deregister_dev(struct usb_interface *intf,
1176 struct usb_class_driver *class_driver);
1177
1178 extern int usb_disabled(void);
1179
1180 /* ----------------------------------------------------------------------- */
1181
1182 /*
1183 * URB support, for asynchronous request completions
1184 */
1185
1186 /*
1187 * urb->transfer_flags:
1188 *
1189 * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb().
1190 */
1191 #define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */
1192 #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired
1193 * slot in the schedule */
1194 #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
1195 #define URB_NO_FSBR 0x0020 /* UHCI-specific */
1196 #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */
1197 #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt
1198 * needed */
1199 #define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */
1200
1201 /* The following flags are used internally by usbcore and HCDs */
1202 #define URB_DIR_IN 0x0200 /* Transfer from device to host */
1203 #define URB_DIR_OUT 0
1204 #define URB_DIR_MASK URB_DIR_IN
1205
1206 #define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */
1207 #define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */
1208 #define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */
1209 #define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */
1210 #define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */
1211 #define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */
1212 #define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */
1213 #define URB_ALIGNED_TEMP_BUFFER 0x00800000 /* Temp buffer was alloc'd */
1214
1215 struct usb_iso_packet_descriptor {
1216 unsigned int offset;
1217 unsigned int length; /* expected length */
1218 unsigned int actual_length;
1219 int status;
1220 };
1221
1222 struct urb;
1223
1224 struct usb_anchor {
1225 struct list_head urb_list;
1226 wait_queue_head_t wait;
1227 spinlock_t lock;
1228 atomic_t suspend_wakeups;
1229 unsigned int poisoned:1;
1230 };
1231
1232 static inline void init_usb_anchor(struct usb_anchor *anchor)
1233 {
1234 memset(anchor, 0, sizeof(*anchor));
1235 INIT_LIST_HEAD(&anchor->urb_list);
1236 init_waitqueue_head(&anchor->wait);
1237 spin_lock_init(&anchor->lock);
1238 }
1239
1240 typedef void (*usb_complete_t)(struct urb *);
1241
1242 /**
1243 * struct urb - USB Request Block
1244 * @urb_list: For use by current owner of the URB.
1245 * @anchor_list: membership in the list of an anchor
1246 * @anchor: to anchor URBs to a common mooring
1247 * @ep: Points to the endpoint's data structure. Will eventually
1248 * replace @pipe.
1249 * @pipe: Holds endpoint number, direction, type, and more.
1250 * Create these values with the eight macros available;
1251 * usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl"
1252 * (control), "bulk", "int" (interrupt), or "iso" (isochronous).
1253 * For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint
1254 * numbers range from zero to fifteen. Note that "in" endpoint two
1255 * is a different endpoint (and pipe) from "out" endpoint two.
1256 * The current configuration controls the existence, type, and
1257 * maximum packet size of any given endpoint.
1258 * @stream_id: the endpoint's stream ID for bulk streams
1259 * @dev: Identifies the USB device to perform the request.
1260 * @status: This is read in non-iso completion functions to get the
1261 * status of the particular request. ISO requests only use it
1262 * to tell whether the URB was unlinked; detailed status for
1263 * each frame is in the fields of the iso_frame-desc.
1264 * @transfer_flags: A variety of flags may be used to affect how URB
1265 * submission, unlinking, or operation are handled. Different
1266 * kinds of URB can use different flags.
1267 * @transfer_buffer: This identifies the buffer to (or from) which the I/O
1268 * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set
1269 * (however, do not leave garbage in transfer_buffer even then).
1270 * This buffer must be suitable for DMA; allocate it with
1271 * kmalloc() or equivalent. For transfers to "in" endpoints, contents
1272 * of this buffer will be modified. This buffer is used for the data
1273 * stage of control transfers.
1274 * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP,
1275 * the device driver is saying that it provided this DMA address,
1276 * which the host controller driver should use in preference to the
1277 * transfer_buffer.
1278 * @sg: scatter gather buffer list, the buffer size of each element in
1279 * the list (except the last) must be divisible by the endpoint's
1280 * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
1281 * @num_mapped_sgs: (internal) number of mapped sg entries
1282 * @num_sgs: number of entries in the sg list
1283 * @transfer_buffer_length: How big is transfer_buffer. The transfer may
1284 * be broken up into chunks according to the current maximum packet
1285 * size for the endpoint, which is a function of the configuration
1286 * and is encoded in the pipe. When the length is zero, neither
1287 * transfer_buffer nor transfer_dma is used.
1288 * @actual_length: This is read in non-iso completion functions, and
1289 * it tells how many bytes (out of transfer_buffer_length) were
1290 * transferred. It will normally be the same as requested, unless
1291 * either an error was reported or a short read was performed.
1292 * The URB_SHORT_NOT_OK transfer flag may be used to make such
1293 * short reads be reported as errors.
1294 * @setup_packet: Only used for control transfers, this points to eight bytes
1295 * of setup data. Control transfers always start by sending this data
1296 * to the device. Then transfer_buffer is read or written, if needed.
1297 * @setup_dma: DMA pointer for the setup packet. The caller must not use
1298 * this field; setup_packet must point to a valid buffer.
1299 * @start_frame: Returns the initial frame for isochronous transfers.
1300 * @number_of_packets: Lists the number of ISO transfer buffers.
1301 * @interval: Specifies the polling interval for interrupt or isochronous
1302 * transfers. The units are frames (milliseconds) for full and low
1303 * speed devices, and microframes (1/8 millisecond) for highspeed
1304 * and SuperSpeed devices.
1305 * @error_count: Returns the number of ISO transfers that reported errors.
1306 * @context: For use in completion functions. This normally points to
1307 * request-specific driver context.
1308 * @complete: Completion handler. This URB is passed as the parameter to the
1309 * completion function. The completion function may then do what
1310 * it likes with the URB, including resubmitting or freeing it.
1311 * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to
1312 * collect the transfer status for each buffer.
1313 *
1314 * This structure identifies USB transfer requests. URBs must be allocated by
1315 * calling usb_alloc_urb() and freed with a call to usb_free_urb().
1316 * Initialization may be done using various usb_fill_*_urb() functions. URBs
1317 * are submitted using usb_submit_urb(), and pending requests may be canceled
1318 * using usb_unlink_urb() or usb_kill_urb().
1319 *
1320 * Data Transfer Buffers:
1321 *
1322 * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise
1323 * taken from the general page pool. That is provided by transfer_buffer
1324 * (control requests also use setup_packet), and host controller drivers
1325 * perform a dma mapping (and unmapping) for each buffer transferred. Those
1326 * mapping operations can be expensive on some platforms (perhaps using a dma
1327 * bounce buffer or talking to an IOMMU),
1328 * although they're cheap on commodity x86 and ppc hardware.
1329 *
1330 * Alternatively, drivers may pass the URB_NO_TRANSFER_DMA_MAP transfer flag,
1331 * which tells the host controller driver that no such mapping is needed for
1332 * the transfer_buffer since
1333 * the device driver is DMA-aware. For example, a device driver might
1334 * allocate a DMA buffer with usb_alloc_coherent() or call usb_buffer_map().
1335 * When this transfer flag is provided, host controller drivers will
1336 * attempt to use the dma address found in the transfer_dma
1337 * field rather than determining a dma address themselves.
1338 *
1339 * Note that transfer_buffer must still be set if the controller
1340 * does not support DMA (as indicated by bus.uses_dma) and when talking
1341 * to root hub. If you have to trasfer between highmem zone and the device
1342 * on such controller, create a bounce buffer or bail out with an error.
1343 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
1344 * capable, assign NULL to it, so that usbmon knows not to use the value.
1345 * The setup_packet must always be set, so it cannot be located in highmem.
1346 *
1347 * Initialization:
1348 *
1349 * All URBs submitted must initialize the dev, pipe, transfer_flags (may be
1350 * zero), and complete fields. All URBs must also initialize
1351 * transfer_buffer and transfer_buffer_length. They may provide the
1352 * URB_SHORT_NOT_OK transfer flag, indicating that short reads are
1353 * to be treated as errors; that flag is invalid for write requests.
1354 *
1355 * Bulk URBs may
1356 * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers
1357 * should always terminate with a short packet, even if it means adding an
1358 * extra zero length packet.
1359 *
1360 * Control URBs must provide a valid pointer in the setup_packet field.
1361 * Unlike the transfer_buffer, the setup_packet may not be mapped for DMA
1362 * beforehand.
1363 *
1364 * Interrupt URBs must provide an interval, saying how often (in milliseconds
1365 * or, for highspeed devices, 125 microsecond units)
1366 * to poll for transfers. After the URB has been submitted, the interval
1367 * field reflects how the transfer was actually scheduled.
1368 * The polling interval may be more frequent than requested.
1369 * For example, some controllers have a maximum interval of 32 milliseconds,
1370 * while others support intervals of up to 1024 milliseconds.
1371 * Isochronous URBs also have transfer intervals. (Note that for isochronous
1372 * endpoints, as well as high speed interrupt endpoints, the encoding of
1373 * the transfer interval in the endpoint descriptor is logarithmic.
1374 * Device drivers must convert that value to linear units themselves.)
1375 *
1376 * If an isochronous endpoint queue isn't already running, the host
1377 * controller will schedule a new URB to start as soon as bandwidth
1378 * utilization allows. If the queue is running then a new URB will be
1379 * scheduled to start in the first transfer slot following the end of the
1380 * preceding URB, if that slot has not already expired. If the slot has
1381 * expired (which can happen when IRQ delivery is delayed for a long time),
1382 * the scheduling behavior depends on the URB_ISO_ASAP flag. If the flag
1383 * is clear then the URB will be scheduled to start in the expired slot,
1384 * implying that some of its packets will not be transferred; if the flag
1385 * is set then the URB will be scheduled in the first unexpired slot,
1386 * breaking the queue's synchronization. Upon URB completion, the
1387 * start_frame field will be set to the (micro)frame number in which the
1388 * transfer was scheduled. Ranges for frame counter values are HC-specific
1389 * and can go from as low as 256 to as high as 65536 frames.
1390 *
1391 * Isochronous URBs have a different data transfer model, in part because
1392 * the quality of service is only "best effort". Callers provide specially
1393 * allocated URBs, with number_of_packets worth of iso_frame_desc structures
1394 * at the end. Each such packet is an individual ISO transfer. Isochronous
1395 * URBs are normally queued, submitted by drivers to arrange that
1396 * transfers are at least double buffered, and then explicitly resubmitted
1397 * in completion handlers, so
1398 * that data (such as audio or video) streams at as constant a rate as the
1399 * host controller scheduler can support.
1400 *
1401 * Completion Callbacks:
1402 *
1403 * The completion callback is made in_interrupt(), and one of the first
1404 * things that a completion handler should do is check the status field.
1405 * The status field is provided for all URBs. It is used to report
1406 * unlinked URBs, and status for all non-ISO transfers. It should not
1407 * be examined before the URB is returned to the completion handler.
1408 *
1409 * The context field is normally used to link URBs back to the relevant
1410 * driver or request state.
1411 *
1412 * When the completion callback is invoked for non-isochronous URBs, the
1413 * actual_length field tells how many bytes were transferred. This field
1414 * is updated even when the URB terminated with an error or was unlinked.
1415 *
1416 * ISO transfer status is reported in the status and actual_length fields
1417 * of the iso_frame_desc array, and the number of errors is reported in
1418 * error_count. Completion callbacks for ISO transfers will normally
1419 * (re)submit URBs to ensure a constant transfer rate.
1420 *
1421 * Note that even fields marked "public" should not be touched by the driver
1422 * when the urb is owned by the hcd, that is, since the call to
1423 * usb_submit_urb() till the entry into the completion routine.
1424 */
1425 struct urb {
1426 /* private: usb core and host controller only fields in the urb */
1427 struct kref kref; /* reference count of the URB */
1428 void *hcpriv; /* private data for host controller */
1429 atomic_t use_count; /* concurrent submissions counter */
1430 atomic_t reject; /* submissions will fail */
1431 int unlinked; /* unlink error code */
1432
1433 /* public: documented fields in the urb that can be used by drivers */
1434 struct list_head urb_list; /* list head for use by the urb's
1435 * current owner */
1436 struct list_head anchor_list; /* the URB may be anchored */
1437 struct usb_anchor *anchor;
1438 struct usb_device *dev; /* (in) pointer to associated device */
1439 struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */
1440 unsigned int pipe; /* (in) pipe information */
1441 unsigned int stream_id; /* (in) stream ID */
1442 int status; /* (return) non-ISO status */
1443 unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/
1444 void *transfer_buffer; /* (in) associated data buffer */
1445 dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
1446 struct scatterlist *sg; /* (in) scatter gather buffer list */
1447 int num_mapped_sgs; /* (internal) mapped sg entries */
1448 int num_sgs; /* (in) number of entries in the sg list */
1449 u32 transfer_buffer_length; /* (in) data buffer length */
1450 u32 actual_length; /* (return) actual transfer length */
1451 unsigned char *setup_packet; /* (in) setup packet (control only) */
1452 dma_addr_t setup_dma; /* (in) dma addr for setup_packet */
1453 int start_frame; /* (modify) start frame (ISO) */
1454 int number_of_packets; /* (in) number of ISO packets */
1455 int interval; /* (modify) transfer interval
1456 * (INT/ISO) */
1457 int error_count; /* (return) number of ISO errors */
1458 void *context; /* (in) context for completion */
1459 usb_complete_t complete; /* (in) completion routine */
1460 struct usb_iso_packet_descriptor iso_frame_desc[0];
1461 /* (in) ISO ONLY */
1462 };
1463
1464 /* ----------------------------------------------------------------------- */
1465
1466 /**
1467 * usb_fill_control_urb - initializes a control urb
1468 * @urb: pointer to the urb to initialize.
1469 * @dev: pointer to the struct usb_device for this urb.
1470 * @pipe: the endpoint pipe
1471 * @setup_packet: pointer to the setup_packet buffer
1472 * @transfer_buffer: pointer to the transfer buffer
1473 * @buffer_length: length of the transfer buffer
1474 * @complete_fn: pointer to the usb_complete_t function
1475 * @context: what to set the urb context to.
1476 *
1477 * Initializes a control urb with the proper information needed to submit
1478 * it to a device.
1479 */
1480 static inline void usb_fill_control_urb(struct urb *urb,
1481 struct usb_device *dev,
1482 unsigned int pipe,
1483 unsigned char *setup_packet,
1484 void *transfer_buffer,
1485 int buffer_length,
1486 usb_complete_t complete_fn,
1487 void *context)
1488 {
1489 urb->dev = dev;
1490 urb->pipe = pipe;
1491 urb->setup_packet = setup_packet;
1492 urb->transfer_buffer = transfer_buffer;
1493 urb->transfer_buffer_length = buffer_length;
1494 urb->complete = complete_fn;
1495 urb->context = context;
1496 }
1497
1498 /**
1499 * usb_fill_bulk_urb - macro to help initialize a bulk urb
1500 * @urb: pointer to the urb to initialize.
1501 * @dev: pointer to the struct usb_device for this urb.
1502 * @pipe: the endpoint pipe
1503 * @transfer_buffer: pointer to the transfer buffer
1504 * @buffer_length: length of the transfer buffer
1505 * @complete_fn: pointer to the usb_complete_t function
1506 * @context: what to set the urb context to.
1507 *
1508 * Initializes a bulk urb with the proper information needed to submit it
1509 * to a device.
1510 */
1511 static inline void usb_fill_bulk_urb(struct urb *urb,
1512 struct usb_device *dev,
1513 unsigned int pipe,
1514 void *transfer_buffer,
1515 int buffer_length,
1516 usb_complete_t complete_fn,
1517 void *context)
1518 {
1519 urb->dev = dev;
1520 urb->pipe = pipe;
1521 urb->transfer_buffer = transfer_buffer;
1522 urb->transfer_buffer_length = buffer_length;
1523 urb->complete = complete_fn;
1524 urb->context = context;
1525 }
1526
1527 /**
1528 * usb_fill_int_urb - macro to help initialize a interrupt urb
1529 * @urb: pointer to the urb to initialize.
1530 * @dev: pointer to the struct usb_device for this urb.
1531 * @pipe: the endpoint pipe
1532 * @transfer_buffer: pointer to the transfer buffer
1533 * @buffer_length: length of the transfer buffer
1534 * @complete_fn: pointer to the usb_complete_t function
1535 * @context: what to set the urb context to.
1536 * @interval: what to set the urb interval to, encoded like
1537 * the endpoint descriptor's bInterval value.
1538 *
1539 * Initializes a interrupt urb with the proper information needed to submit
1540 * it to a device.
1541 *
1542 * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic
1543 * encoding of the endpoint interval, and express polling intervals in
1544 * microframes (eight per millisecond) rather than in frames (one per
1545 * millisecond).
1546 *
1547 * Wireless USB also uses the logarithmic encoding, but specifies it in units of
1548 * 128us instead of 125us. For Wireless USB devices, the interval is passed
1549 * through to the host controller, rather than being translated into microframe
1550 * units.
1551 */
1552 static inline void usb_fill_int_urb(struct urb *urb,
1553 struct usb_device *dev,
1554 unsigned int pipe,
1555 void *transfer_buffer,
1556 int buffer_length,
1557 usb_complete_t complete_fn,
1558 void *context,
1559 int interval)
1560 {
1561 urb->dev = dev;
1562 urb->pipe = pipe;
1563 urb->transfer_buffer = transfer_buffer;
1564 urb->transfer_buffer_length = buffer_length;
1565 urb->complete = complete_fn;
1566 urb->context = context;
1567
1568 if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
1569 /* make sure interval is within allowed range */
1570 interval = clamp(interval, 1, 16);
1571
1572 urb->interval = 1 << (interval - 1);
1573 } else {
1574 urb->interval = interval;
1575 }
1576
1577 urb->start_frame = -1;
1578 }
1579
1580 extern void usb_init_urb(struct urb *urb);
1581 extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags);
1582 extern void usb_free_urb(struct urb *urb);
1583 #define usb_put_urb usb_free_urb
1584 extern struct urb *usb_get_urb(struct urb *urb);
1585 extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
1586 extern int usb_unlink_urb(struct urb *urb);
1587 extern void usb_kill_urb(struct urb *urb);
1588 extern void usb_poison_urb(struct urb *urb);
1589 extern void usb_unpoison_urb(struct urb *urb);
1590 extern void usb_block_urb(struct urb *urb);
1591 extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
1592 extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
1593 extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
1594 extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
1595 extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor);
1596 extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor);
1597 extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
1598 extern void usb_unanchor_urb(struct urb *urb);
1599 extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
1600 unsigned int timeout);
1601 extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
1602 extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
1603 extern int usb_anchor_empty(struct usb_anchor *anchor);
1604
1605 #define usb_unblock_urb usb_unpoison_urb
1606
1607 /**
1608 * usb_urb_dir_in - check if an URB describes an IN transfer
1609 * @urb: URB to be checked
1610 *
1611 * Return: 1 if @urb describes an IN transfer (device-to-host),
1612 * otherwise 0.
1613 */
1614 static inline int usb_urb_dir_in(struct urb *urb)
1615 {
1616 return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN;
1617 }
1618
1619 /**
1620 * usb_urb_dir_out - check if an URB describes an OUT transfer
1621 * @urb: URB to be checked
1622 *
1623 * Return: 1 if @urb describes an OUT transfer (host-to-device),
1624 * otherwise 0.
1625 */
1626 static inline int usb_urb_dir_out(struct urb *urb)
1627 {
1628 return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
1629 }
1630
1631 void *usb_alloc_coherent(struct usb_device *dev, size_t size,
1632 gfp_t mem_flags, dma_addr_t *dma);
1633 void usb_free_coherent(struct usb_device *dev, size_t size,
1634 void *addr, dma_addr_t dma);
1635
1636 #if 0
1637 struct urb *usb_buffer_map(struct urb *urb);
1638 void usb_buffer_dmasync(struct urb *urb);
1639 void usb_buffer_unmap(struct urb *urb);
1640 #endif
1641
1642 struct scatterlist;
1643 int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
1644 struct scatterlist *sg, int nents);
1645 #if 0
1646 void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
1647 struct scatterlist *sg, int n_hw_ents);
1648 #endif
1649 void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
1650 struct scatterlist *sg, int n_hw_ents);
1651
1652 /*-------------------------------------------------------------------*
1653 * SYNCHRONOUS CALL SUPPORT *
1654 *-------------------------------------------------------------------*/
1655
1656 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
1657 __u8 request, __u8 requesttype, __u16 value, __u16 index,
1658 void *data, __u16 size, int timeout);
1659 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
1660 void *data, int len, int *actual_length, int timeout);
1661 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
1662 void *data, int len, int *actual_length,
1663 int timeout);
1664
1665 /* wrappers around usb_control_msg() for the most common standard requests */
1666 extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
1667 unsigned char descindex, void *buf, int size);
1668 extern int usb_get_status(struct usb_device *dev,
1669 int type, int target, void *data);
1670 extern int usb_string(struct usb_device *dev, int index,
1671 char *buf, size_t size);
1672
1673 /* wrappers that also update important state inside usbcore */
1674 extern int usb_clear_halt(struct usb_device *dev, int pipe);
1675 extern int usb_reset_configuration(struct usb_device *dev);
1676 extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
1677 extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
1678
1679 /* this request isn't really synchronous, but it belongs with the others */
1680 extern int usb_driver_set_configuration(struct usb_device *udev, int config);
1681
1682 /* choose and set configuration for device */
1683 extern int usb_choose_configuration(struct usb_device *udev);
1684 extern int usb_set_configuration(struct usb_device *dev, int configuration);
1685
1686 /*
1687 * timeouts, in milliseconds, used for sending/receiving control messages
1688 * they typically complete within a few frames (msec) after they're issued
1689 * USB identifies 5 second timeouts, maybe more in a few cases, and a few
1690 * slow devices (like some MGE Ellipse UPSes) actually push that limit.
1691 */
1692 #define USB_CTRL_GET_TIMEOUT 5000
1693 #define USB_CTRL_SET_TIMEOUT 5000
1694
1695
1696 /**
1697 * struct usb_sg_request - support for scatter/gather I/O
1698 * @status: zero indicates success, else negative errno
1699 * @bytes: counts bytes transferred.
1700 *
1701 * These requests are initialized using usb_sg_init(), and then are used
1702 * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most
1703 * members of the request object aren't for driver access.
1704 *
1705 * The status and bytecount values are valid only after usb_sg_wait()
1706 * returns. If the status is zero, then the bytecount matches the total
1707 * from the request.
1708 *
1709 * After an error completion, drivers may need to clear a halt condition
1710 * on the endpoint.
1711 */
1712 struct usb_sg_request {
1713 int status;
1714 size_t bytes;
1715
1716 /* private:
1717 * members below are private to usbcore,
1718 * and are not provided for driver access!
1719 */
1720 spinlock_t lock;
1721
1722 struct usb_device *dev;
1723 int pipe;
1724
1725 int entries;
1726 struct urb **urbs;
1727
1728 int count;
1729 struct completion complete;
1730 };
1731
1732 int usb_sg_init(
1733 struct usb_sg_request *io,
1734 struct usb_device *dev,
1735 unsigned pipe,
1736 unsigned period,
1737 struct scatterlist *sg,
1738 int nents,
1739 size_t length,
1740 gfp_t mem_flags
1741 );
1742 void usb_sg_cancel(struct usb_sg_request *io);
1743 void usb_sg_wait(struct usb_sg_request *io);
1744
1745
1746 /* ----------------------------------------------------------------------- */
1747
1748 /*
1749 * For various legacy reasons, Linux has a small cookie that's paired with
1750 * a struct usb_device to identify an endpoint queue. Queue characteristics
1751 * are defined by the endpoint's descriptor. This cookie is called a "pipe",
1752 * an unsigned int encoded as:
1753 *
1754 * - direction: bit 7 (0 = Host-to-Device [Out],
1755 * 1 = Device-to-Host [In] ...
1756 * like endpoint bEndpointAddress)
1757 * - device address: bits 8-14 ... bit positions known to uhci-hcd
1758 * - endpoint: bits 15-18 ... bit positions known to uhci-hcd
1759 * - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt,
1760 * 10 = control, 11 = bulk)
1761 *
1762 * Given the device address and endpoint descriptor, pipes are redundant.
1763 */
1764
1765 /* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */
1766 /* (yet ... they're the values used by usbfs) */
1767 #define PIPE_ISOCHRONOUS 0
1768 #define PIPE_INTERRUPT 1
1769 #define PIPE_CONTROL 2
1770 #define PIPE_BULK 3
1771
1772 #define usb_pipein(pipe) ((pipe) & USB_DIR_IN)
1773 #define usb_pipeout(pipe) (!usb_pipein(pipe))
1774
1775 #define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f)
1776 #define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf)
1777
1778 #define usb_pipetype(pipe) (((pipe) >> 30) & 3)
1779 #define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS)
1780 #define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT)
1781 #define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
1782 #define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
1783
1784 static inline unsigned int __create_pipe(struct usb_device *dev,
1785 unsigned int endpoint)
1786 {
1787 return (dev->devnum << 8) | (endpoint << 15);
1788 }
1789
1790 /* Create various pipes... */
1791 #define usb_sndctrlpipe(dev, endpoint) \
1792 ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint))
1793 #define usb_rcvctrlpipe(dev, endpoint) \
1794 ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
1795 #define usb_sndisocpipe(dev, endpoint) \
1796 ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint))
1797 #define usb_rcvisocpipe(dev, endpoint) \
1798 ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
1799 #define usb_sndbulkpipe(dev, endpoint) \
1800 ((PIPE_BULK << 30) | __create_pipe(dev, endpoint))
1801 #define usb_rcvbulkpipe(dev, endpoint) \
1802 ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
1803 #define usb_sndintpipe(dev, endpoint) \
1804 ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint))
1805 #define usb_rcvintpipe(dev, endpoint) \
1806 ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
1807
1808 static inline struct usb_host_endpoint *
1809 usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
1810 {
1811 struct usb_host_endpoint **eps;
1812 eps = usb_pipein(pipe) ? dev->ep_in : dev->ep_out;
1813 return eps[usb_pipeendpoint(pipe)];
1814 }
1815
1816 /*-------------------------------------------------------------------------*/
1817
1818 static inline __u16
1819 usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
1820 {
1821 struct usb_host_endpoint *ep;
1822 unsigned epnum = usb_pipeendpoint(pipe);
1823
1824 if (is_out) {
1825 WARN_ON(usb_pipein(pipe));
1826 ep = udev->ep_out[epnum];
1827 } else {
1828 WARN_ON(usb_pipeout(pipe));
1829 ep = udev->ep_in[epnum];
1830 }
1831 if (!ep)
1832 return 0;
1833
1834 /* NOTE: only 0x07ff bits are for packet size... */
1835 return usb_endpoint_maxp(&ep->desc);
1836 }
1837
1838 /* ----------------------------------------------------------------------- */
1839
1840 /* translate USB error codes to codes user space understands */
1841 static inline int usb_translate_errors(int error_code)
1842 {
1843 switch (error_code) {
1844 case 0:
1845 case -ENOMEM:
1846 case -ENODEV:
1847 case -EOPNOTSUPP:
1848 return error_code;
1849 default:
1850 return -EIO;
1851 }
1852 }
1853
1854 /* Events from the usb core */
1855 #define USB_DEVICE_ADD 0x0001
1856 #define USB_DEVICE_REMOVE 0x0002
1857 #define USB_BUS_ADD 0x0003
1858 #define USB_BUS_REMOVE 0x0004
1859 extern void usb_register_notify(struct notifier_block *nb);
1860 extern void usb_unregister_notify(struct notifier_block *nb);
1861
1862 /* debugfs stuff */
1863 extern struct dentry *usb_debug_root;
1864
1865 #endif /* __KERNEL__ */
1866
1867 #endif 1 /*
2 *
3 * V 4 L 2 D R I V E R H E L P E R A P I
4 *
5 * Moved from videodev2.h
6 *
7 * Some commonly needed functions for drivers (v4l2-common.o module)
8 */
9 #ifndef _V4L2_DEV_H
10 #define _V4L2_DEV_H
11
12 #include <linux/poll.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/cdev.h>
16 #include <linux/mutex.h>
17 #include <linux/videodev2.h>
18
19 #include <media/media-entity.h>
20
21 #define VIDEO_MAJOR 81
22
23 #define VFL_TYPE_GRABBER 0
24 #define VFL_TYPE_VBI 1
25 #define VFL_TYPE_RADIO 2
26 #define VFL_TYPE_SUBDEV 3
27 #define VFL_TYPE_SDR 4
28 #define VFL_TYPE_MAX 5
29
30 /* Is this a receiver, transmitter or mem-to-mem? */
31 /* Ignored for VFL_TYPE_SUBDEV. */
32 #define VFL_DIR_RX 0
33 #define VFL_DIR_TX 1
34 #define VFL_DIR_M2M 2
35
36 struct v4l2_ioctl_callbacks;
37 struct video_device;
38 struct v4l2_device;
39 struct v4l2_ctrl_handler;
40
41 /* Flag to mark the video_device struct as registered.
42 Drivers can clear this flag if they want to block all future
43 device access. It is cleared by video_unregister_device. */
44 #define V4L2_FL_REGISTERED (0)
45 /* file->private_data points to struct v4l2_fh */
46 #define V4L2_FL_USES_V4L2_FH (1)
47 /* Use the prio field of v4l2_fh for core priority checking */
48 #define V4L2_FL_USE_FH_PRIO (2)
49
50 /* Priority helper functions */
51
52 struct v4l2_prio_state {
53 atomic_t prios[4];
54 };
55
56 void v4l2_prio_init(struct v4l2_prio_state *global);
57 int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
58 enum v4l2_priority new);
59 void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local);
60 void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local);
61 enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global);
62 int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63
64
65 struct v4l2_file_operations {
66 struct module *owner;
67 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
68 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
69 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70 long (*ioctl) (struct file *, unsigned int, unsigned long);
71 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
72 #ifdef CONFIG_COMPAT
73 long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
74 #endif
75 unsigned long (*get_unmapped_area) (struct file *, unsigned long,
76 unsigned long, unsigned long, unsigned long);
77 int (*mmap) (struct file *, struct vm_area_struct *);
78 int (*open) (struct file *);
79 int (*release) (struct file *);
80 };
81
82 /*
83 * Newer version of video_device, handled by videodev2.c
84 * This version moves redundant code from video device code to
85 * the common handler
86 */
87
88 struct video_device
89 {
90 #if defined(CONFIG_MEDIA_CONTROLLER)
91 struct media_entity entity;
92 #endif
93 /* device ops */
94 const struct v4l2_file_operations *fops;
95
96 /* sysfs */
97 struct device dev; /* v4l device */
98 struct cdev *cdev; /* character device */
99
100 struct v4l2_device *v4l2_dev; /* v4l2_device parent */
101 /* Only set parent if that can't be deduced from v4l2_dev */
102 struct device *dev_parent; /* device parent */
103
104 /* Control handler associated with this device node. May be NULL. */
105 struct v4l2_ctrl_handler *ctrl_handler;
106
107 /* vb2_queue associated with this device node. May be NULL. */
108 struct vb2_queue *queue;
109
110 /* Priority state. If NULL, then v4l2_dev->prio will be used. */
111 struct v4l2_prio_state *prio;
112
113 /* device info */
114 char name[32];
115 int vfl_type; /* device type */
116 int vfl_dir; /* receiver, transmitter or m2m */
117 /* 'minor' is set to -1 if the registration failed */
118 int minor;
119 u16 num;
120 /* use bitops to set/clear/test flags */
121 unsigned long flags;
122 /* attribute to differentiate multiple indices on one physical device */
123 int index;
124
125 /* V4L2 file handles */
126 spinlock_t fh_lock; /* Lock for all v4l2_fhs */
127 struct list_head fh_list; /* List of struct v4l2_fh */
128
129 int debug; /* Activates debug level*/
130
131 /* Video standard vars */
132 v4l2_std_id tvnorms; /* Supported tv norms */
133
134 /* callbacks */
135 void (*release)(struct video_device *vdev);
136
137 /* ioctl callbacks */
138 const struct v4l2_ioctl_ops *ioctl_ops;
139 DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
140
141 /* serialization lock */
142 DECLARE_BITMAP(disable_locking, BASE_VIDIOC_PRIVATE);
143 struct mutex *lock;
144 };
145
146 #define media_entity_to_video_device(__e) \
147 container_of(__e, struct video_device, entity)
148 /* dev to video-device */
149 #define to_video_device(cd) container_of(cd, struct video_device, dev)
150
151 int __must_check __video_register_device(struct video_device *vdev, int type,
152 int nr, int warn_if_nr_in_use, struct module *owner);
153
154 /* Register video devices. Note that if video_register_device fails,
155 the release() callback of the video_device structure is *not* called, so
156 the caller is responsible for freeing any data. Usually that means that
157 you call video_device_release() on failure. */
158 static inline int __must_check video_register_device(struct video_device *vdev,
159 int type, int nr)
160 {
161 return __video_register_device(vdev, type, nr, 1, vdev->fops->owner);
162 }
163
164 /* Same as video_register_device, but no warning is issued if the desired
165 device node number was already in use. */
166 static inline int __must_check video_register_device_no_warn(
167 struct video_device *vdev, int type, int nr)
168 {
169 return __video_register_device(vdev, type, nr, 0, vdev->fops->owner);
170 }
171
172 /* Unregister video devices. Will do nothing if vdev == NULL or
173 video_is_registered() returns false. */
174 void video_unregister_device(struct video_device *vdev);
175
176 /* helper functions to alloc/release struct video_device, the
177 latter can also be used for video_device->release(). */
178 struct video_device * __must_check video_device_alloc(void);
179
180 /* this release function frees the vdev pointer */
181 void video_device_release(struct video_device *vdev);
182
183 /* this release function does nothing, use when the video_device is a
184 static global struct. Note that having a static video_device is
185 a dubious construction at best. */
186 void video_device_release_empty(struct video_device *vdev);
187
188 /* returns true if cmd is a known V4L2 ioctl */
189 bool v4l2_is_known_ioctl(unsigned int cmd);
190
191 /* mark that this command shouldn't use core locking */
192 static inline void v4l2_disable_ioctl_locking(struct video_device *vdev, unsigned int cmd)
193 {
194 if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
195 set_bit(_IOC_NR(cmd), vdev->disable_locking);
196 }
197
198 /* Mark that this command isn't implemented. This must be called before
199 video_device_register. See also the comments in determine_valid_ioctls().
200 This function allows drivers to provide just one v4l2_ioctl_ops struct, but
201 disable ioctls based on the specific card that is actually found. */
202 static inline void v4l2_disable_ioctl(struct video_device *vdev, unsigned int cmd)
203 {
204 if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
205 set_bit(_IOC_NR(cmd), vdev->valid_ioctls);
206 }
207
208 /* helper functions to access driver private data. */
209 static inline void *video_get_drvdata(struct video_device *vdev)
210 {
211 return dev_get_drvdata(&vdev->dev);
212 }
213
214 static inline void video_set_drvdata(struct video_device *vdev, void *data)
215 {
216 dev_set_drvdata(&vdev->dev, data);
217 }
218
219 struct video_device *video_devdata(struct file *file);
220
221 /* Combine video_get_drvdata and video_devdata as this is
222 used very often. */
223 static inline void *video_drvdata(struct file *file)
224 {
225 return video_get_drvdata(video_devdata(file));
226 }
227
228 static inline const char *video_device_node_name(struct video_device *vdev)
229 {
230 return dev_name(&vdev->dev);
231 }
232
233 static inline int video_is_registered(struct video_device *vdev)
234 {
235 return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
236 }
237
238 #endif /* _V4L2_DEV_H */ 1 /*
2 * This file holds USB constants and structures that are needed for
3 * USB device APIs. These are used by the USB device model, which is
4 * defined in chapter 9 of the USB 2.0 specification and in the
5 * Wireless USB 1.0 (spread around). Linux has several APIs in C that
6 * need these:
7 *
8 * - the master/host side Linux-USB kernel driver API;
9 * - the "usbfs" user space API; and
10 * - the Linux "gadget" slave/device/peripheral side driver API.
11 *
12 * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
13 * act either as a USB master/host or as a USB slave/device. That means
14 * the master and slave side APIs benefit from working well together.
15 *
16 * There's also "Wireless USB", using low power short range radios for
17 * peripheral interconnection but otherwise building on the USB framework.
18 *
19 * Note all descriptors are declared '__attribute__((packed))' so that:
20 *
21 * [a] they never get padded, either internally (USB spec writers
22 * probably handled that) or externally;
23 *
24 * [b] so that accessing bigger-than-a-bytes fields will never
25 * generate bus errors on any platform, even when the location of
26 * its descriptor inside a bundle isn't "naturally aligned", and
27 *
28 * [c] for consistency, removing all doubt even when it appears to
29 * someone that the two other points are non-issues for that
30 * particular descriptor type.
31 */
32
33 #ifndef _UAPI__LINUX_USB_CH9_H
34 #define _UAPI__LINUX_USB_CH9_H
35
36 #include <linux/types.h> /* __u8 etc */
37 #include <asm/byteorder.h> /* le16_to_cpu */
38
39 /*-------------------------------------------------------------------------*/
40
41 /* CONTROL REQUEST SUPPORT */
42
43 /*
44 * USB directions
45 *
46 * This bit flag is used in endpoint descriptors' bEndpointAddress field.
47 * It's also one of three fields in control requests bRequestType.
48 */
49 #define USB_DIR_OUT 0 /* to device */
50 #define USB_DIR_IN 0x80 /* to host */
51
52 /*
53 * USB types, the second of three bRequestType fields
54 */
55 #define USB_TYPE_MASK (0x03 << 5)
56 #define USB_TYPE_STANDARD (0x00 << 5)
57 #define USB_TYPE_CLASS (0x01 << 5)
58 #define USB_TYPE_VENDOR (0x02 << 5)
59 #define USB_TYPE_RESERVED (0x03 << 5)
60
61 /*
62 * USB recipients, the third of three bRequestType fields
63 */
64 #define USB_RECIP_MASK 0x1f
65 #define USB_RECIP_DEVICE 0x00
66 #define USB_RECIP_INTERFACE 0x01
67 #define USB_RECIP_ENDPOINT 0x02
68 #define USB_RECIP_OTHER 0x03
69 /* From Wireless USB 1.0 */
70 #define USB_RECIP_PORT 0x04
71 #define USB_RECIP_RPIPE 0x05
72
73 /*
74 * Standard requests, for the bRequest field of a SETUP packet.
75 *
76 * These are qualified by the bRequestType field, so that for example
77 * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
78 * by a GET_STATUS request.
79 */
80 #define USB_REQ_GET_STATUS 0x00
81 #define USB_REQ_CLEAR_FEATURE 0x01
82 #define USB_REQ_SET_FEATURE 0x03
83 #define USB_REQ_SET_ADDRESS 0x05
84 #define USB_REQ_GET_DESCRIPTOR 0x06
85 #define USB_REQ_SET_DESCRIPTOR 0x07
86 #define USB_REQ_GET_CONFIGURATION 0x08
87 #define USB_REQ_SET_CONFIGURATION 0x09
88 #define USB_REQ_GET_INTERFACE 0x0A
89 #define USB_REQ_SET_INTERFACE 0x0B
90 #define USB_REQ_SYNCH_FRAME 0x0C
91 #define USB_REQ_SET_SEL 0x30
92 #define USB_REQ_SET_ISOCH_DELAY 0x31
93
94 #define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
95 #define USB_REQ_GET_ENCRYPTION 0x0E
96 #define USB_REQ_RPIPE_ABORT 0x0E
97 #define USB_REQ_SET_HANDSHAKE 0x0F
98 #define USB_REQ_RPIPE_RESET 0x0F
99 #define USB_REQ_GET_HANDSHAKE 0x10
100 #define USB_REQ_SET_CONNECTION 0x11
101 #define USB_REQ_SET_SECURITY_DATA 0x12
102 #define USB_REQ_GET_SECURITY_DATA 0x13
103 #define USB_REQ_SET_WUSB_DATA 0x14
104 #define USB_REQ_LOOPBACK_DATA_WRITE 0x15
105 #define USB_REQ_LOOPBACK_DATA_READ 0x16
106 #define USB_REQ_SET_INTERFACE_DS 0x17
107
108 /* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command,
109 * used by hubs to put ports into a new L1 suspend state, except that it
110 * forgot to define its number ...
111 */
112
113 /*
114 * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
115 * are read as a bit array returned by USB_REQ_GET_STATUS. (So there
116 * are at most sixteen features of each type.) Hubs may also support a
117 * new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend.
118 */
119 #define USB_DEVICE_SELF_POWERED 0 /* (read only) */
120 #define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */
121 #define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */
122 #define USB_DEVICE_BATTERY 2 /* (wireless) */
123 #define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */
124 #define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/
125 #define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */
126 #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */
127 #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
128
129 /*
130 * Test Mode Selectors
131 * See USB 2.0 spec Table 9-7
132 */
133 #define TEST_J 1
134 #define TEST_K 2
135 #define TEST_SE0_NAK 3
136 #define TEST_PACKET 4
137 #define TEST_FORCE_EN 5
138
139 /*
140 * New Feature Selectors as added by USB 3.0
141 * See USB 3.0 spec Table 9-7
142 */
143 #define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */
144 #define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */
145 #define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */
146 #define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */
147
148 #define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00
149 /*
150 * Suspend Options, Table 9-8 USB 3.0 spec
151 */
152 #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
153 #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
154
155 /*
156 * Interface status, Figure 9-5 USB 3.0 spec
157 */
158 #define USB_INTRF_STAT_FUNC_RW_CAP 1
159 #define USB_INTRF_STAT_FUNC_RW 2
160
161 #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
162
163 /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
164 #define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */
165 #define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */
166 #define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */
167
168 /**
169 * struct usb_ctrlrequest - SETUP data for a USB device control request
170 * @bRequestType: matches the USB bmRequestType field
171 * @bRequest: matches the USB bRequest field
172 * @wValue: matches the USB wValue field (le16 byte order)
173 * @wIndex: matches the USB wIndex field (le16 byte order)
174 * @wLength: matches the USB wLength field (le16 byte order)
175 *
176 * This structure is used to send control requests to a USB device. It matches
177 * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
178 * USB spec for a fuller description of the different fields, and what they are
179 * used for.
180 *
181 * Note that the driver for any interface can issue control requests.
182 * For most devices, interfaces don't coordinate with each other, so
183 * such requests may be made at any time.
184 */
185 struct usb_ctrlrequest {
186 __u8 bRequestType;
187 __u8 bRequest;
188 __le16 wValue;
189 __le16 wIndex;
190 __le16 wLength;
191 } __attribute__ ((packed));
192
193 /*-------------------------------------------------------------------------*/
194
195 /*
196 * STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or
197 * (rarely) accepted by SET_DESCRIPTOR.
198 *
199 * Note that all multi-byte values here are encoded in little endian
200 * byte order "on the wire". Within the kernel and when exposed
201 * through the Linux-USB APIs, they are not converted to cpu byte
202 * order; it is the responsibility of the client code to do this.
203 * The single exception is when device and configuration descriptors (but
204 * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
205 * in this case the fields are converted to host endianness by the kernel.
206 */
207
208 /*
209 * Descriptor types ... USB 2.0 spec table 9.5
210 */
211 #define USB_DT_DEVICE 0x01
212 #define USB_DT_CONFIG 0x02
213 #define USB_DT_STRING 0x03
214 #define USB_DT_INTERFACE 0x04
215 #define USB_DT_ENDPOINT 0x05
216 #define USB_DT_DEVICE_QUALIFIER 0x06
217 #define USB_DT_OTHER_SPEED_CONFIG 0x07
218 #define USB_DT_INTERFACE_POWER 0x08
219 /* these are from a minor usb 2.0 revision (ECN) */
220 #define USB_DT_OTG 0x09
221 #define USB_DT_DEBUG 0x0a
222 #define USB_DT_INTERFACE_ASSOCIATION 0x0b
223 /* these are from the Wireless USB spec */
224 #define USB_DT_SECURITY 0x0c
225 #define USB_DT_KEY 0x0d
226 #define USB_DT_ENCRYPTION_TYPE 0x0e
227 #define USB_DT_BOS 0x0f
228 #define USB_DT_DEVICE_CAPABILITY 0x10
229 #define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
230 #define USB_DT_WIRE_ADAPTER 0x21
231 #define USB_DT_RPIPE 0x22
232 #define USB_DT_CS_RADIO_CONTROL 0x23
233 /* From the T10 UAS specification */
234 #define USB_DT_PIPE_USAGE 0x24
235 /* From the USB 3.0 spec */
236 #define USB_DT_SS_ENDPOINT_COMP 0x30
237
238 /* Conventional codes for class-specific descriptors. The convention is
239 * defined in the USB "Common Class" Spec (3.11). Individual class specs
240 * are authoritative for their usage, not the "common class" writeup.
241 */
242 #define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE)
243 #define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG)
244 #define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING)
245 #define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE)
246 #define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT)
247
248 /* All standard descriptors have these 2 fields at the beginning */
249 struct usb_descriptor_header {
250 __u8 bLength;
251 __u8 bDescriptorType;
252 } __attribute__ ((packed));
253
254
255 /*-------------------------------------------------------------------------*/
256
257 /* USB_DT_DEVICE: Device descriptor */
258 struct usb_device_descriptor {
259 __u8 bLength;
260 __u8 bDescriptorType;
261
262 __le16 bcdUSB;
263 __u8 bDeviceClass;
264 __u8 bDeviceSubClass;
265 __u8 bDeviceProtocol;
266 __u8 bMaxPacketSize0;
267 __le16 idVendor;
268 __le16 idProduct;
269 __le16 bcdDevice;
270 __u8 iManufacturer;
271 __u8 iProduct;
272 __u8 iSerialNumber;
273 __u8 bNumConfigurations;
274 } __attribute__ ((packed));
275
276 #define USB_DT_DEVICE_SIZE 18
277
278
279 /*
280 * Device and/or Interface Class codes
281 * as found in bDeviceClass or bInterfaceClass
282 * and defined by www.usb.org documents
283 */
284 #define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */
285 #define USB_CLASS_AUDIO 1
286 #define USB_CLASS_COMM 2
287 #define USB_CLASS_HID 3
288 #define USB_CLASS_PHYSICAL 5
289 #define USB_CLASS_STILL_IMAGE 6
290 #define USB_CLASS_PRINTER 7
291 #define USB_CLASS_MASS_STORAGE 8
292 #define USB_CLASS_HUB 9
293 #define USB_CLASS_CDC_DATA 0x0a
294 #define USB_CLASS_CSCID 0x0b /* chip+ smart card */
295 #define USB_CLASS_CONTENT_SEC 0x0d /* content security */
296 #define USB_CLASS_VIDEO 0x0e
297 #define USB_CLASS_WIRELESS_CONTROLLER 0xe0
298 #define USB_CLASS_MISC 0xef
299 #define USB_CLASS_APP_SPEC 0xfe
300 #define USB_CLASS_VENDOR_SPEC 0xff
301
302 #define USB_SUBCLASS_VENDOR_SPEC 0xff
303
304 /*-------------------------------------------------------------------------*/
305
306 /* USB_DT_CONFIG: Configuration descriptor information.
307 *
308 * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the
309 * descriptor type is different. Highspeed-capable devices can look
310 * different depending on what speed they're currently running. Only
311 * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG
312 * descriptors.
313 */
314 struct usb_config_descriptor {
315 __u8 bLength;
316 __u8 bDescriptorType;
317
318 __le16 wTotalLength;
319 __u8 bNumInterfaces;
320 __u8 bConfigurationValue;
321 __u8 iConfiguration;
322 __u8 bmAttributes;
323 __u8 bMaxPower;
324 } __attribute__ ((packed));
325
326 #define USB_DT_CONFIG_SIZE 9
327
328 /* from config descriptor bmAttributes */
329 #define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */
330 #define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */
331 #define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */
332 #define USB_CONFIG_ATT_BATTERY (1 << 4) /* battery powered */
333
334 /*-------------------------------------------------------------------------*/
335
336 /* USB_DT_STRING: String descriptor */
337 struct usb_string_descriptor {
338 __u8 bLength;
339 __u8 bDescriptorType;
340
341 __le16 wData[1]; /* UTF-16LE encoded */
342 } __attribute__ ((packed));
343
344 /* note that "string" zero is special, it holds language codes that
345 * the device supports, not Unicode characters.
346 */
347
348 /*-------------------------------------------------------------------------*/
349
350 /* USB_DT_INTERFACE: Interface descriptor */
351 struct usb_interface_descriptor {
352 __u8 bLength;
353 __u8 bDescriptorType;
354
355 __u8 bInterfaceNumber;
356 __u8 bAlternateSetting;
357 __u8 bNumEndpoints;
358 __u8 bInterfaceClass;
359 __u8 bInterfaceSubClass;
360 __u8 bInterfaceProtocol;
361 __u8 iInterface;
362 } __attribute__ ((packed));
363
364 #define USB_DT_INTERFACE_SIZE 9
365
366 /*-------------------------------------------------------------------------*/
367
368 /* USB_DT_ENDPOINT: Endpoint descriptor */
369 struct usb_endpoint_descriptor {
370 __u8 bLength;
371 __u8 bDescriptorType;
372
373 __u8 bEndpointAddress;
374 __u8 bmAttributes;
375 __le16 wMaxPacketSize;
376 __u8 bInterval;
377
378 /* NOTE: these two are _only_ in audio endpoints. */
379 /* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */
380 __u8 bRefresh;
381 __u8 bSynchAddress;
382 } __attribute__ ((packed));
383
384 #define USB_DT_ENDPOINT_SIZE 7
385 #define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */
386
387
388 /*
389 * Endpoints
390 */
391 #define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */
392 #define USB_ENDPOINT_DIR_MASK 0x80
393
394 #define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */
395 #define USB_ENDPOINT_XFER_CONTROL 0
396 #define USB_ENDPOINT_XFER_ISOC 1
397 #define USB_ENDPOINT_XFER_BULK 2
398 #define USB_ENDPOINT_XFER_INT 3
399 #define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
400
401 /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
402 #define USB_ENDPOINT_INTRTYPE 0x30
403 #define USB_ENDPOINT_INTR_PERIODIC (0 << 4)
404 #define USB_ENDPOINT_INTR_NOTIFICATION (1 << 4)
405
406 #define USB_ENDPOINT_SYNCTYPE 0x0c
407 #define USB_ENDPOINT_SYNC_NONE (0 << 2)
408 #define USB_ENDPOINT_SYNC_ASYNC (1 << 2)
409 #define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2)
410 #define USB_ENDPOINT_SYNC_SYNC (3 << 2)
411
412 #define USB_ENDPOINT_USAGE_MASK 0x30
413 #define USB_ENDPOINT_USAGE_DATA 0x00
414 #define USB_ENDPOINT_USAGE_FEEDBACK 0x10
415 #define USB_ENDPOINT_USAGE_IMPLICIT_FB 0x20 /* Implicit feedback Data endpoint */
416
417 /*-------------------------------------------------------------------------*/
418
419 /**
420 * usb_endpoint_num - get the endpoint's number
421 * @epd: endpoint to be checked
422 *
423 * Returns @epd's number: 0 to 15.
424 */
425 static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
426 {
427 return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
428 }
429
430 /**
431 * usb_endpoint_type - get the endpoint's transfer type
432 * @epd: endpoint to be checked
433 *
434 * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
435 * to @epd's transfer type.
436 */
437 static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
438 {
439 return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
440 }
441
442 /**
443 * usb_endpoint_dir_in - check if the endpoint has IN direction
444 * @epd: endpoint to be checked
445 *
446 * Returns true if the endpoint is of type IN, otherwise it returns false.
447 */
448 static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
449 {
450 return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
451 }
452
453 /**
454 * usb_endpoint_dir_out - check if the endpoint has OUT direction
455 * @epd: endpoint to be checked
456 *
457 * Returns true if the endpoint is of type OUT, otherwise it returns false.
458 */
459 static inline int usb_endpoint_dir_out(
460 const struct usb_endpoint_descriptor *epd)
461 {
462 return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
463 }
464
465 /**
466 * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
467 * @epd: endpoint to be checked
468 *
469 * Returns true if the endpoint is of type bulk, otherwise it returns false.
470 */
471 static inline int usb_endpoint_xfer_bulk(
472 const struct usb_endpoint_descriptor *epd)
473 {
474 return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
475 USB_ENDPOINT_XFER_BULK);
476 }
477
478 /**
479 * usb_endpoint_xfer_control - check if the endpoint has control transfer type
480 * @epd: endpoint to be checked
481 *
482 * Returns true if the endpoint is of type control, otherwise it returns false.
483 */
484 static inline int usb_endpoint_xfer_control(
485 const struct usb_endpoint_descriptor *epd)
486 {
487 return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
488 USB_ENDPOINT_XFER_CONTROL);
489 }
490
491 /**
492 * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
493 * @epd: endpoint to be checked
494 *
495 * Returns true if the endpoint is of type interrupt, otherwise it returns
496 * false.
497 */
498 static inline int usb_endpoint_xfer_int(
499 const struct usb_endpoint_descriptor *epd)
500 {
501 return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
502 USB_ENDPOINT_XFER_INT);
503 }
504
505 /**
506 * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
507 * @epd: endpoint to be checked
508 *
509 * Returns true if the endpoint is of type isochronous, otherwise it returns
510 * false.
511 */
512 static inline int usb_endpoint_xfer_isoc(
513 const struct usb_endpoint_descriptor *epd)
514 {
515 return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
516 USB_ENDPOINT_XFER_ISOC);
517 }
518
519 /**
520 * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
521 * @epd: endpoint to be checked
522 *
523 * Returns true if the endpoint has bulk transfer type and IN direction,
524 * otherwise it returns false.
525 */
526 static inline int usb_endpoint_is_bulk_in(
527 const struct usb_endpoint_descriptor *epd)
528 {
529 return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd);
530 }
531
532 /**
533 * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
534 * @epd: endpoint to be checked
535 *
536 * Returns true if the endpoint has bulk transfer type and OUT direction,
537 * otherwise it returns false.
538 */
539 static inline int usb_endpoint_is_bulk_out(
540 const struct usb_endpoint_descriptor *epd)
541 {
542 return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd);
543 }
544
545 /**
546 * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
547 * @epd: endpoint to be checked
548 *
549 * Returns true if the endpoint has interrupt transfer type and IN direction,
550 * otherwise it returns false.
551 */
552 static inline int usb_endpoint_is_int_in(
553 const struct usb_endpoint_descriptor *epd)
554 {
555 return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd);
556 }
557
558 /**
559 * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
560 * @epd: endpoint to be checked
561 *
562 * Returns true if the endpoint has interrupt transfer type and OUT direction,
563 * otherwise it returns false.
564 */
565 static inline int usb_endpoint_is_int_out(
566 const struct usb_endpoint_descriptor *epd)
567 {
568 return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd);
569 }
570
571 /**
572 * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
573 * @epd: endpoint to be checked
574 *
575 * Returns true if the endpoint has isochronous transfer type and IN direction,
576 * otherwise it returns false.
577 */
578 static inline int usb_endpoint_is_isoc_in(
579 const struct usb_endpoint_descriptor *epd)
580 {
581 return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd);
582 }
583
584 /**
585 * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
586 * @epd: endpoint to be checked
587 *
588 * Returns true if the endpoint has isochronous transfer type and OUT direction,
589 * otherwise it returns false.
590 */
591 static inline int usb_endpoint_is_isoc_out(
592 const struct usb_endpoint_descriptor *epd)
593 {
594 return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd);
595 }
596
597 /**
598 * usb_endpoint_maxp - get endpoint's max packet size
599 * @epd: endpoint to be checked
600 *
601 * Returns @epd's max packet
602 */
603 static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
604 {
605 return __le16_to_cpu(epd->wMaxPacketSize);
606 }
607
608 static inline int usb_endpoint_interrupt_type(
609 const struct usb_endpoint_descriptor *epd)
610 {
611 return epd->bmAttributes & USB_ENDPOINT_INTRTYPE;
612 }
613
614 /*-------------------------------------------------------------------------*/
615
616 /* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
617 struct usb_ss_ep_comp_descriptor {
618 __u8 bLength;
619 __u8 bDescriptorType;
620
621 __u8 bMaxBurst;
622 __u8 bmAttributes;
623 __le16 wBytesPerInterval;
624 } __attribute__ ((packed));
625
626 #define USB_DT_SS_EP_COMP_SIZE 6
627
628 /* Bits 4:0 of bmAttributes if this is a bulk endpoint */
629 static inline int
630 usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
631 {
632 int max_streams;
633
634 if (!comp)
635 return 0;
636
637 max_streams = comp->bmAttributes & 0x1f;
638
639 if (!max_streams)
640 return 0;
641
642 max_streams = 1 << max_streams;
643
644 return max_streams;
645 }
646
647 /* Bits 1:0 of bmAttributes if this is an isoc endpoint */
648 #define USB_SS_MULT(p) (1 + ((p) & 0x3))
649
650 /*-------------------------------------------------------------------------*/
651
652 /* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
653 struct usb_qualifier_descriptor {
654 __u8 bLength;
655 __u8 bDescriptorType;
656
657 __le16 bcdUSB;
658 __u8 bDeviceClass;
659 __u8 bDeviceSubClass;
660 __u8 bDeviceProtocol;
661 __u8 bMaxPacketSize0;
662 __u8 bNumConfigurations;
663 __u8 bRESERVED;
664 } __attribute__ ((packed));
665
666
667 /*-------------------------------------------------------------------------*/
668
669 /* USB_DT_OTG (from OTG 1.0a supplement) */
670 struct usb_otg_descriptor {
671 __u8 bLength;
672 __u8 bDescriptorType;
673
674 __u8 bmAttributes; /* support for HNP, SRP, etc */
675 } __attribute__ ((packed));
676
677 /* from usb_otg_descriptor.bmAttributes */
678 #define USB_OTG_SRP (1 << 0)
679 #define USB_OTG_HNP (1 << 1) /* swap host/device roles */
680
681 /*-------------------------------------------------------------------------*/
682
683 /* USB_DT_DEBUG: for special highspeed devices, replacing serial console */
684 struct usb_debug_descriptor {
685 __u8 bLength;
686 __u8 bDescriptorType;
687
688 /* bulk endpoints with 8 byte maxpacket */
689 __u8 bDebugInEndpoint;
690 __u8 bDebugOutEndpoint;
691 } __attribute__((packed));
692
693 /*-------------------------------------------------------------------------*/
694
695 /* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */
696 struct usb_interface_assoc_descriptor {
697 __u8 bLength;
698 __u8 bDescriptorType;
699
700 __u8 bFirstInterface;
701 __u8 bInterfaceCount;
702 __u8 bFunctionClass;
703 __u8 bFunctionSubClass;
704 __u8 bFunctionProtocol;
705 __u8 iFunction;
706 } __attribute__ ((packed));
707
708
709 /*-------------------------------------------------------------------------*/
710
711 /* USB_DT_SECURITY: group of wireless security descriptors, including
712 * encryption types available for setting up a CC/association.
713 */
714 struct usb_security_descriptor {
715 __u8 bLength;
716 __u8 bDescriptorType;
717
718 __le16 wTotalLength;
719 __u8 bNumEncryptionTypes;
720 } __attribute__((packed));
721
722 /*-------------------------------------------------------------------------*/
723
724 /* USB_DT_KEY: used with {GET,SET}_SECURITY_DATA; only public keys
725 * may be retrieved.
726 */
727 struct usb_key_descriptor {
728 __u8 bLength;
729 __u8 bDescriptorType;
730
731 __u8 tTKID[3];
732 __u8 bReserved;
733 __u8 bKeyData[0];
734 } __attribute__((packed));
735
736 /*-------------------------------------------------------------------------*/
737
738 /* USB_DT_ENCRYPTION_TYPE: bundled in DT_SECURITY groups */
739 struct usb_encryption_descriptor {
740 __u8 bLength;
741 __u8 bDescriptorType;
742
743 __u8 bEncryptionType;
744 #define USB_ENC_TYPE_UNSECURE 0
745 #define USB_ENC_TYPE_WIRED 1 /* non-wireless mode */
746 #define USB_ENC_TYPE_CCM_1 2 /* aes128/cbc session */
747 #define USB_ENC_TYPE_RSA_1 3 /* rsa3072/sha1 auth */
748 __u8 bEncryptionValue; /* use in SET_ENCRYPTION */
749 __u8 bAuthKeyIndex;
750 } __attribute__((packed));
751
752
753 /*-------------------------------------------------------------------------*/
754
755 /* USB_DT_BOS: group of device-level capabilities */
756 struct usb_bos_descriptor {
757 __u8 bLength;
758 __u8 bDescriptorType;
759
760 __le16 wTotalLength;
761 __u8 bNumDeviceCaps;
762 } __attribute__((packed));
763
764 #define USB_DT_BOS_SIZE 5
765 /*-------------------------------------------------------------------------*/
766
767 /* USB_DT_DEVICE_CAPABILITY: grouped with BOS */
768 struct usb_dev_cap_header {
769 __u8 bLength;
770 __u8 bDescriptorType;
771 __u8 bDevCapabilityType;
772 } __attribute__((packed));
773
774 #define USB_CAP_TYPE_WIRELESS_USB 1
775
776 struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
777 __u8 bLength;
778 __u8 bDescriptorType;
779 __u8 bDevCapabilityType;
780
781 __u8 bmAttributes;
782 #define USB_WIRELESS_P2P_DRD (1 << 1)
783 #define USB_WIRELESS_BEACON_MASK (3 << 2)
784 #define USB_WIRELESS_BEACON_SELF (1 << 2)
785 #define USB_WIRELESS_BEACON_DIRECTED (2 << 2)
786 #define USB_WIRELESS_BEACON_NONE (3 << 2)
787 __le16 wPHYRates; /* bit rates, Mbps */
788 #define USB_WIRELESS_PHY_53 (1 << 0) /* always set */
789 #define USB_WIRELESS_PHY_80 (1 << 1)
790 #define USB_WIRELESS_PHY_107 (1 << 2) /* always set */
791 #define USB_WIRELESS_PHY_160 (1 << 3)
792 #define USB_WIRELESS_PHY_200 (1 << 4) /* always set */
793 #define USB_WIRELESS_PHY_320 (1 << 5)
794 #define USB_WIRELESS_PHY_400 (1 << 6)
795 #define USB_WIRELESS_PHY_480 (1 << 7)
796 __u8 bmTFITXPowerInfo; /* TFI power levels */
797 __u8 bmFFITXPowerInfo; /* FFI power levels */
798 __le16 bmBandGroup;
799 __u8 bReserved;
800 } __attribute__((packed));
801
802 /* USB 2.0 Extension descriptor */
803 #define USB_CAP_TYPE_EXT 2
804
805 struct usb_ext_cap_descriptor { /* Link Power Management */
806 __u8 bLength;
807 __u8 bDescriptorType;
808 __u8 bDevCapabilityType;
809 __le32 bmAttributes;
810 #define USB_LPM_SUPPORT (1 << 1) /* supports LPM */
811 #define USB_BESL_SUPPORT (1 << 2) /* supports BESL */
812 #define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/
813 #define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */
814 #define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8)
815 #define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12)
816 } __attribute__((packed));
817
818 #define USB_DT_USB_EXT_CAP_SIZE 7
819
820 /*
821 * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB
822 * specific device level capabilities
823 */
824 #define USB_SS_CAP_TYPE 3
825 struct usb_ss_cap_descriptor { /* Link Power Management */
826 __u8 bLength;
827 __u8 bDescriptorType;
828 __u8 bDevCapabilityType;
829 __u8 bmAttributes;
830 #define USB_LTM_SUPPORT (1 << 1) /* supports LTM */
831 __le16 wSpeedSupported;
832 #define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */
833 #define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */
834 #define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */
835 #define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */
836 __u8 bFunctionalitySupport;
837 __u8 bU1devExitLat;
838 __le16 bU2DevExitLat;
839 } __attribute__((packed));
840
841 #define USB_DT_USB_SS_CAP_SIZE 10
842
843 /*
844 * Container ID Capability descriptor: Defines the instance unique ID used to
845 * identify the instance across all operating modes
846 */
847 #define CONTAINER_ID_TYPE 4
848 struct usb_ss_container_id_descriptor {
849 __u8 bLength;
850 __u8 bDescriptorType;
851 __u8 bDevCapabilityType;
852 __u8 bReserved;
853 __u8 ContainerID[16]; /* 128-bit number */
854 } __attribute__((packed));
855
856 #define USB_DT_USB_SS_CONTN_ID_SIZE 20
857 /*-------------------------------------------------------------------------*/
858
859 /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with
860 * each endpoint descriptor for a wireless device
861 */
862 struct usb_wireless_ep_comp_descriptor {
863 __u8 bLength;
864 __u8 bDescriptorType;
865
866 __u8 bMaxBurst;
867 __u8 bMaxSequence;
868 __le16 wMaxStreamDelay;
869 __le16 wOverTheAirPacketSize;
870 __u8 bOverTheAirInterval;
871 __u8 bmCompAttributes;
872 #define USB_ENDPOINT_SWITCH_MASK 0x03 /* in bmCompAttributes */
873 #define USB_ENDPOINT_SWITCH_NO 0
874 #define USB_ENDPOINT_SWITCH_SWITCH 1
875 #define USB_ENDPOINT_SWITCH_SCALE 2
876 } __attribute__((packed));
877
878 /*-------------------------------------------------------------------------*/
879
880 /* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless
881 * host and a device for connection set up, mutual authentication, and
882 * exchanging short lived session keys. The handshake depends on a CC.
883 */
884 struct usb_handshake {
885 __u8 bMessageNumber;
886 __u8 bStatus;
887 __u8 tTKID[3];
888 __u8 bReserved;
889 __u8 CDID[16];
890 __u8 nonce[16];
891 __u8 MIC[8];
892 } __attribute__((packed));
893
894 /*-------------------------------------------------------------------------*/
895
896 /* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC).
897 * A CC may also be set up using non-wireless secure channels (including
898 * wired USB!), and some devices may support CCs with multiple hosts.
899 */
900 struct usb_connection_context {
901 __u8 CHID[16]; /* persistent host id */
902 __u8 CDID[16]; /* device id (unique w/in host context) */
903 __u8 CK[16]; /* connection key */
904 } __attribute__((packed));
905
906 /*-------------------------------------------------------------------------*/
907
908 /* USB 2.0 defines three speeds, here's how Linux identifies them */
909
910 enum usb_device_speed {
911 USB_SPEED_UNKNOWN = 0, /* enumerating */
912 USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
913 USB_SPEED_HIGH, /* usb 2.0 */
914 USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
915 USB_SPEED_SUPER, /* usb 3.0 */
916 };
917
918
919 enum usb_device_state {
920 /* NOTATTACHED isn't in the USB spec, and this state acts
921 * the same as ATTACHED ... but it's clearer this way.
922 */
923 USB_STATE_NOTATTACHED = 0,
924
925 /* chapter 9 and authentication (wireless) device states */
926 USB_STATE_ATTACHED,
927 USB_STATE_POWERED, /* wired */
928 USB_STATE_RECONNECTING, /* auth */
929 USB_STATE_UNAUTHENTICATED, /* auth */
930 USB_STATE_DEFAULT, /* limited function */
931 USB_STATE_ADDRESS,
932 USB_STATE_CONFIGURED, /* most functions */
933
934 USB_STATE_SUSPENDED
935
936 /* NOTE: there are actually four different SUSPENDED
937 * states, returning to POWERED, DEFAULT, ADDRESS, or
938 * CONFIGURED respectively when SOF tokens flow again.
939 * At this level there's no difference between L1 and L2
940 * suspend states. (L2 being original USB 1.1 suspend.)
941 */
942 };
943
944 enum usb3_link_state {
945 USB3_LPM_U0 = 0,
946 USB3_LPM_U1,
947 USB3_LPM_U2,
948 USB3_LPM_U3
949 };
950
951 /*
952 * A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
953 * 0xff means the parent hub will accept transitions to U1, but will not
954 * initiate a transition.
955 *
956 * A U1 timeout of 0x1 to 0x7F also causes the hub to initiate a transition to
957 * U1 after that many microseconds. Timeouts of 0x80 to 0xFE are reserved
958 * values.
959 *
960 * A U2 timeout of 0x0 means the parent hub will reject any transitions to U2.
961 * 0xff means the parent hub will accept transitions to U2, but will not
962 * initiate a transition.
963 *
964 * A U2 timeout of 0x1 to 0xFE also causes the hub to initiate a transition to
965 * U2 after N*256 microseconds. Therefore a U2 timeout value of 0x1 means a U2
966 * idle timer of 256 microseconds, 0x2 means 512 microseconds, 0xFE means
967 * 65.024ms.
968 */
969 #define USB3_LPM_DISABLED 0x0
970 #define USB3_LPM_U1_MAX_TIMEOUT 0x7F
971 #define USB3_LPM_U2_MAX_TIMEOUT 0xFE
972 #define USB3_LPM_DEVICE_INITIATED 0xFF
973
974 struct usb_set_sel_req {
975 __u8 u1_sel;
976 __u8 u1_pel;
977 __le16 u2_sel;
978 __le16 u2_pel;
979 } __attribute__ ((packed));
980
981 /*
982 * The Set System Exit Latency control transfer provides one byte each for
983 * U1 SEL and U1 PEL, so the max exit latency is 0xFF. U2 SEL and U2 PEL each
984 * are two bytes long.
985 */
986 #define USB3_LPM_MAX_U1_SEL_PEL 0xFF
987 #define USB3_LPM_MAX_U2_SEL_PEL 0xFFFF
988
989 /*-------------------------------------------------------------------------*/
990
991 /*
992 * As per USB compliance update, a device that is actively drawing
993 * more than 100mA from USB must report itself as bus-powered in
994 * the GetStatus(DEVICE) call.
995 * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
996 */
997 #define USB_SELF_POWER_VBUS_MAX_DRAW 100
998
999 #endif /* _UAPI__LINUX_USB_CH9_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-3.16-rc1.tar.xz | drivers/media/usb/usbtv/usbtv.ko | 132_1a | CPAchecker | Bug | Fixed | 2015-03-11 11:07:46 | L0155 |
[Home]