Error Trace

[Home]

Bug # 94

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
32 typedef __u16 __le16;
34 typedef __u32 __le32;
229 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
159 typedef unsigned int oom_flags_t;
177 struct __anonstruct_atomic_t_6 { int counter; } ;
177 typedef struct __anonstruct_atomic_t_6 atomic_t;
182 struct __anonstruct_atomic64_t_7 { long counter; } ;
182 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
183 struct list_head { struct list_head *next; struct list_head *prev; } ;
188 struct hlist_node ;
188 struct hlist_head { struct hlist_node *first; } ;
192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
67 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_9 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4; struct __anonstruct____missing_field_name_10 __annonCompField5; } ;
66 struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6; } ;
12 typedef unsigned long pteval_t;
13 typedef unsigned long pmdval_t;
15 typedef unsigned long pgdval_t;
16 typedef unsigned long pgprotval_t;
18 struct __anonstruct_pte_t_11 { pteval_t pte; } ;
18 typedef struct __anonstruct_pte_t_11 pte_t;
20 struct pgprot { pgprotval_t pgprot; } ;
218 typedef struct pgprot pgprot_t;
220 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ;
220 typedef struct __anonstruct_pgd_t_12 pgd_t;
259 struct __anonstruct_pmd_t_14 { pmdval_t pmd; } ;
259 typedef struct __anonstruct_pmd_t_14 pmd_t;
361 struct page ;
361 typedef struct page *pgtable_t;
372 struct file ;
385 struct seq_file ;
423 struct thread_struct ;
425 struct mm_struct ;
426 struct task_struct ;
427 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
131 typedef void (*ctor_fn_t)();
234 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
48 struct device ;
420 struct file_operations ;
432 struct completion ;
692 struct lockdep_map ;
19 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
328 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
102 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
652 typedef struct cpumask *cpumask_var_t;
260 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_24 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_25 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_23 { struct __anonstruct____missing_field_name_24 __annonCompField10; struct __anonstruct____missing_field_name_25 __annonCompField11; } ;
26 union __anonunion____missing_field_name_26 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_23 __annonCompField12; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_26 __annonCompField13; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
155 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
161 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 __reserved[464U]; } ;
179 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
194 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
170 struct seq_operations ;
369 struct perf_event ;
370 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; struct fpu fpu; } ;
23 typedef atomic64_t atomic_long_t;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
546 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_34 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_33 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_34 __annonCompField15; } ;
33 struct spinlock { union __anonunion____missing_field_name_33 __annonCompField16; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_35 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_35 rwlock_t;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
64 struct usb_device ;
135 struct timespec ;
136 struct compat_timespec ;
137 struct __anonstruct_futex_37 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
137 struct __anonstruct_nanosleep_38 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
137 struct pollfd ;
137 struct __anonstruct_poll_39 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
137 union __anonunion____missing_field_name_36 { struct __anonstruct_futex_37 futex; struct __anonstruct_nanosleep_38 nanosleep; struct __anonstruct_poll_39 poll; } ;
137 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_36 __annonCompField17; } ;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
404 struct __anonstruct_seqlock_t_52 { struct seqcount seqcount; spinlock_t lock; } ;
404 typedef struct __anonstruct_seqlock_t_52 seqlock_t;
598 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_53 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_53 kuid_t;
27 struct __anonstruct_kgid_t_54 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_54 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
95 struct __anonstruct_nodemask_t_55 { unsigned long bits[16U]; } ;
95 typedef struct __anonstruct_nodemask_t_55 nodemask_t;
810 struct rw_semaphore ;
811 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
172 struct completion { unsigned int done; wait_queue_head_t wait; } ;
446 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1133 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
238 struct hrtimer ;
239 enum hrtimer_restart ;
240 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
838 struct nsproxy ;
259 struct workqueue_struct ;
260 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
615 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
25 struct ldt_struct ;
25 struct __anonstruct_mm_context_t_124 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; atomic_t perf_rdpmc_allowed; } ;
25 typedef struct __anonstruct_mm_context_t_124 mm_context_t;
1296 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_160 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_161 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_159 { struct __anonstruct____missing_field_name_160 __annonCompField32; struct __anonstruct____missing_field_name_161 __annonCompField33; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_159 __annonCompField34; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
31 typedef void compound_page_dtor(struct page *);
32 union __anonunion____missing_field_name_162 { struct address_space *mapping; void *s_mem; } ;
32 union __anonunion____missing_field_name_164 { unsigned long index; void *freelist; } ;
32 struct __anonstruct____missing_field_name_168 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
32 union __anonunion____missing_field_name_167 { atomic_t _mapcount; struct __anonstruct____missing_field_name_168 __annonCompField37; int units; } ;
32 struct __anonstruct____missing_field_name_166 { union __anonunion____missing_field_name_167 __annonCompField38; atomic_t _count; } ;
32 union __anonunion____missing_field_name_165 { unsigned long counters; struct __anonstruct____missing_field_name_166 __annonCompField39; unsigned int active; } ;
32 struct __anonstruct____missing_field_name_163 { union __anonunion____missing_field_name_164 __annonCompField36; union __anonunion____missing_field_name_165 __annonCompField40; } ;
32 struct __anonstruct____missing_field_name_170 { struct page *next; int pages; int pobjects; } ;
32 struct slab ;
32 struct __anonstruct____missing_field_name_171 { compound_page_dtor *compound_dtor; unsigned long compound_order; } ;
32 union __anonunion____missing_field_name_169 { struct list_head lru; struct __anonstruct____missing_field_name_170 __annonCompField42; struct slab *slab_page; struct callback_head callback_head; struct __anonstruct____missing_field_name_171 __annonCompField43; pgtable_t pmd_huge_pte; } ;
32 struct kmem_cache ;
32 union __anonunion____missing_field_name_172 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ;
32 struct page { unsigned long flags; union __anonunion____missing_field_name_162 __annonCompField35; struct __anonstruct____missing_field_name_163 __annonCompField41; union __anonunion____missing_field_name_169 __annonCompField44; union __anonunion____missing_field_name_172 __annonCompField45; struct mem_cgroup *mem_cgroup; } ;
172 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
257 struct userfaultfd_ctx ;
257 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
264 struct __anonstruct_shared_173 { struct rb_node rb; unsigned long rb_subtree_last; } ;
264 struct anon_vma ;
264 struct vm_operations_struct ;
264 struct mempolicy ;
264 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_173 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
337 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
342 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
355 struct task_rss_stat { int events; int count[3U]; } ;
363 struct mm_rss_stat { atomic_long_t count[3U]; } ;
368 struct kioctx_table ;
369 struct linux_binfmt ;
369 struct mmu_notifier_mm ;
369 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
53 union __anonunion____missing_field_name_178 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_178 __annonCompField46; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
185 struct dentry ;
186 struct iattr ;
187 struct super_block ;
188 struct file_system_type ;
189 struct kernfs_open_node ;
190 struct kernfs_iattrs ;
213 struct kernfs_root ;
213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_183 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_183 __annonCompField47; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;
155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
188 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
481 struct sock ;
482 struct kobject ;
483 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
489 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
82 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
155 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
509 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_184 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_184 __annonCompField48; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
469 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
38 struct module_param_attrs ;
38 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
48 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
74 struct exception_table_entry ;
290 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
297 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
304 struct module_sect_attrs ;
304 struct module_notes_attrs ;
304 struct tracepoint ;
304 struct trace_event_call ;
304 struct trace_enum_map ;
304 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; struct mod_tree_node mtn_core; struct mod_tree_node mtn_init; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp_alive; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_192 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_192 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_194 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_195 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_196 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_197 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_199 { void *_lower; void *_upper; } ;
11 struct __anonstruct__sigfault_198 { void *_addr; short _addr_lsb; struct __anonstruct__addr_bnd_199 _addr_bnd; } ;
11 struct __anonstruct__sigpoll_200 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_201 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_193 { int _pad[28U]; struct __anonstruct__kill_194 _kill; struct __anonstruct__timer_195 _timer; struct __anonstruct__rt_196 _rt; struct __anonstruct__sigchld_197 _sigchld; struct __anonstruct__sigfault_198 _sigfault; struct __anonstruct__sigpoll_200 _sigpoll; struct __anonstruct__sigsys_201 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_193 _sifields; } ;
113 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
243 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
257 struct k_sigaction { struct sigaction sa; } ;
443 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
450 struct pid_namespace ;
450 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
53 struct seccomp_filter ;
54 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ;
123 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
156 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
466 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
39 struct assoc_array_ptr ;
39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
123 union __anonunion____missing_field_name_220 { struct list_head graveyard_link; struct rb_node serial_node; } ;
123 struct key_user ;
123 union __anonunion____missing_field_name_221 { time_t expiry; time_t revoked_at; } ;
123 struct __anonstruct____missing_field_name_223 { struct key_type *type; char *description; } ;
123 union __anonunion____missing_field_name_222 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_223 __annonCompField51; } ;
123 union __anonunion_type_data_224 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ;
123 union __anonunion_payload_226 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ;
123 union __anonunion____missing_field_name_225 { union __anonunion_payload_226 payload; struct assoc_array keys; } ;
123 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_220 __annonCompField49; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_221 __annonCompField50; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_222 __annonCompField52; union __anonunion_type_data_224 type_data; union __anonunion____missing_field_name_225 __annonCompField53; } ;
358 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
377 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
327 struct percpu_rw_semaphore { unsigned int *fast_read_ctr; atomic_t write_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ;
53 struct cgroup ;
54 struct cgroup_root ;
55 struct cgroup_subsys ;
56 struct cgroup_taskset ;
103 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ;
129 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct callback_head callback_head; } ;
202 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct kernfs_node *procs_kn; struct kernfs_node *populated_kn; unsigned int subtree_control; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; } ;
275 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
311 struct cftype { char name[64U]; unsigned long private; umode_t mode; size_t max_write_len; unsigned int flags; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
393 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_e_css_changed)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); int (*can_fork)(struct task_struct *, void **); void (*cancel_fork)(struct task_struct *, void *); void (*fork)(struct task_struct *, void *); void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int disabled; int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
477 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
516 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
524 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
531 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
556 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
572 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
594 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; int running; } ;
630 struct autogroup ;
631 struct tty_struct ;
631 struct taskstats ;
631 struct tty_audit_buf ;
631 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
798 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
841 struct backing_dev_info ;
842 struct reclaim_state ;
843 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
857 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
905 struct wake_q_node { struct wake_q_node *next; } ;
1134 struct io_context ;
1168 struct pipe_inode_info ;
1170 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1177 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1197 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1232 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1264 struct rt_rq ;
1264 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1280 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1346 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1365 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ;
1791 struct sched_class ;
1791 struct files_struct ;
1791 struct compat_robust_list_head ;
1791 struct numa_group ;
1791 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct thread_struct thread; } ;
62 struct exception_table_entry { int insn; int fixup; } ;
13 typedef unsigned long kernel_ulong_t;
39 struct usb_device_id { __u16 match_flags; __u16 idVendor; __u16 idProduct; __u16 bcdDevice_lo; __u16 bcdDevice_hi; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 bInterfaceNumber; kernel_ulong_t driver_info; } ;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
221 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
253 struct usb_device_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __le16 idVendor; __le16 idProduct; __le16 bcdDevice; __u8 iManufacturer; __u8 iProduct; __u8 iSerialNumber; __u8 bNumConfigurations; } ;
275 struct usb_config_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumInterfaces; __u8 bConfigurationValue; __u8 iConfiguration; __u8 bmAttributes; __u8 bMaxPower; } ;
343 struct usb_interface_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bInterfaceNumber; __u8 bAlternateSetting; __u8 bNumEndpoints; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 iInterface; } ;
363 struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; __u8 bRefresh; __u8 bSynchAddress; } ;
613 struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; } ;
704 struct usb_interface_assoc_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bFirstInterface; __u8 bInterfaceCount; __u8 bFunctionClass; __u8 bFunctionSubClass; __u8 bFunctionProtocol; __u8 iFunction; } ;
763 struct usb_bos_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumDeviceCaps; } ;
813 struct usb_ext_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __le32 bmAttributes; } ;
823 struct usb_ss_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wSpeedSupported; __u8 bFunctionalitySupport; __u8 bU1devExitLat; __le16 bU2DevExitLat; } ;
852 struct usb_ss_container_id_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 ContainerID[16U]; } ;
917 enum usb_device_speed { USB_SPEED_UNKNOWN = 0, USB_SPEED_LOW = 1, USB_SPEED_FULL = 2, USB_SPEED_HIGH = 3, USB_SPEED_WIRELESS = 4, USB_SPEED_SUPER = 5 } ;
926 enum usb_device_state { USB_STATE_NOTATTACHED = 0, USB_STATE_ATTACHED = 1, USB_STATE_POWERED = 2, USB_STATE_RECONNECTING = 3, USB_STATE_UNAUTHENTICATED = 4, USB_STATE_DEFAULT = 5, USB_STATE_ADDRESS = 6, USB_STATE_CONFIGURED = 7, USB_STATE_SUSPENDED = 8 } ;
63 struct irq_domain ;
672 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
68 struct path ;
69 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ;
35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
227 struct pinctrl ;
228 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
48 struct dma_map_ops ;
48 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
14 struct device_private ;
15 struct device_driver ;
16 struct driver_private ;
17 struct class ;
18 struct subsys_private ;
19 struct bus_type ;
20 struct device_node ;
21 struct fwnode_handle ;
22 struct iommu_ops ;
23 struct iommu_group ;
61 struct device_attribute ;
61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
139 struct device_type ;
197 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
203 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
353 struct class_attribute ;
353 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
446 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
514 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
542 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
675 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
684 struct dma_coherent_mem ;
684 struct cma ;
684 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
838 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_257 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_256 { struct __anonstruct____missing_field_name_257 __annonCompField64; } ;
114 struct lockref { union __anonunion____missing_field_name_256 __annonCompField65; } ;
50 struct vfsmount ;
51 struct __anonstruct____missing_field_name_259 { u32 hash; u32 len; } ;
51 union __anonunion____missing_field_name_258 { struct __anonstruct____missing_field_name_259 __annonCompField66; u64 hash_len; } ;
51 struct qstr { union __anonunion____missing_field_name_258 __annonCompField67; const unsigned char *name; } ;
90 struct dentry_operations ;
90 union __anonunion_d_u_260 { struct hlist_node d_alias; struct callback_head d_rcu; } ;
90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_260 d_u; } ;
142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct inode * (*d_select_inode)(struct dentry *, unsigned int); } ;
586 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
58 struct __anonstruct____missing_field_name_264 { struct radix_tree_node *parent; void *private_data; } ;
58 union __anonunion____missing_field_name_263 { struct __anonstruct____missing_field_name_264 __annonCompField68; struct callback_head callback_head; } ;
58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion____missing_field_name_263 __annonCompField69; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
47 struct block_device ;
60 struct bdi_writeback ;
61 struct export_operations ;
64 struct kiocb ;
65 struct poll_table_struct ;
66 struct kstatfs ;
67 struct swap_info_struct ;
68 struct iov_iter ;
75 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
212 struct dquot ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_270 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_270 kprojid_t;
166 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_271 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_271 __annonCompField71; enum quota_type type; } ;
184 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ;
206 struct quota_format_type ;
207 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
272 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
299 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ;
310 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); } ;
325 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
348 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
394 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
405 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
418 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
432 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
496 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
526 struct writeback_control ;
527 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
366 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *, loff_t ); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
423 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
443 struct request_queue ;
444 struct hd_struct ;
444 struct gendisk ;
444 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
560 struct posix_acl ;
561 struct inode_operations ;
561 union __anonunion____missing_field_name_274 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
561 union __anonunion____missing_field_name_275 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
561 struct file_lock_context ;
561 struct cdev ;
561 union __anonunion____missing_field_name_276 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; } ;
561 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_274 __annonCompField72; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; union __anonunion____missing_field_name_275 __annonCompField73; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_276 __annonCompField74; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ;
807 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
815 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
838 union __anonunion_f_u_277 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
838 struct file { union __anonunion_f_u_277 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
923 typedef void *fl_owner_t;
924 struct file_lock ;
925 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
931 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
958 struct nlm_lockowner ;
959 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_279 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_278 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_279 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_278 fl_u; } ;
1011 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1227 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1262 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1288 struct super_operations ;
1288 struct xattr_handler ;
1288 struct mtd_info ;
1288 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; } ;
1537 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1551 struct dir_context ;
1576 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1583 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); } ;
1643 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*follow_link)(struct dentry *, void **); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct inode *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1697 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
1936 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
276 struct usb_driver ;
277 struct wusb_dev ;
278 struct ep_device ;
279 struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; unsigned char *extra; int extralen; int enabled; int streams; } ;
77 struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; struct usb_host_endpoint *endpoint; char *string; } ;
92 enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING = 1, USB_INTERFACE_BOUND = 2, USB_INTERFACE_UNBINDING = 3 } ;
99 struct usb_interface { struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; unsigned int num_altsetting; struct usb_interface_assoc_descriptor *intf_assoc; int minor; enum usb_interface_condition condition; unsigned char sysfs_files_created; unsigned char ep_devs_created; unsigned char unregistering; unsigned char needs_remote_wakeup; unsigned char needs_altsetting0; unsigned char needs_binding; unsigned char resetting_device; struct device dev; struct device *usb_dev; atomic_t pm_usage_cnt; struct work_struct reset_ws; } ;
201 struct usb_interface_cache { unsigned int num_altsetting; struct kref ref; struct usb_host_interface altsetting[0U]; } ;
256 struct usb_host_config { struct usb_config_descriptor desc; char *string; struct usb_interface_assoc_descriptor *intf_assoc[16U]; struct usb_interface *interface[32U]; struct usb_interface_cache *intf_cache[32U]; unsigned char *extra; int extralen; } ;
320 struct usb_host_bos { struct usb_bos_descriptor *desc; struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ss_container_id_descriptor *ss_id; } ;
332 struct usb_devmap { unsigned long devicemap[2U]; } ;
344 struct mon_bus ;
344 struct usb_bus { struct device *controller; int busnum; const char *bus_name; u8 uses_dma; u8 uses_pio_for_control; u8 otg_port; unsigned char is_b_host; unsigned char b_hnp_enable; unsigned char no_stop_on_short; unsigned char no_sg_constraint; unsigned int sg_tablesize; int devnum_next; struct usb_devmap devmap; struct usb_device *root_hub; struct usb_bus *hs_companion; struct list_head bus_list; struct mutex usb_address0_mutex; int bandwidth_allocated; int bandwidth_int_reqs; int bandwidth_isoc_reqs; unsigned int resuming_ports; struct mon_bus *mon_bus; int monitored; } ;
395 struct usb_tt ;
396 enum usb_device_removable { USB_DEVICE_REMOVABLE_UNKNOWN = 0, USB_DEVICE_REMOVABLE = 1, USB_DEVICE_FIXED = 2 } ;
409 struct usb2_lpm_parameters { unsigned int besl; int timeout; } ;
430 struct usb3_lpm_parameters { unsigned int mel; unsigned int pel; unsigned int sel; int timeout; } ;
469 struct usb_device { int devnum; char devpath[16U]; u32 route; enum usb_device_state state; enum usb_device_speed speed; struct usb_tt *tt; int ttport; unsigned int toggle[2U]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16U]; struct usb_host_endpoint *ep_out[16U]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; unsigned char can_submit; unsigned char persist_enabled; unsigned char have_langid; unsigned char authorized; unsigned char authenticated; unsigned char wusb; unsigned char lpm_capable; unsigned char usb2_hw_lpm_capable; unsigned char usb2_hw_lpm_besl_capable; unsigned char usb2_hw_lpm_enabled; unsigned char usb2_hw_lpm_allowed; unsigned char usb3_lpm_enabled; int string_langid; char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; unsigned long connect_time; unsigned char do_remote_wakeup; unsigned char reset_resume; unsigned char port_is_suspended; struct wusb_dev *wusb_dev; int slot_id; enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned int lpm_disable_count; } ;
819 struct usb_dynids { spinlock_t lock; struct list_head list; } ;
1007 struct usbdrv_wrap { struct device_driver driver; int for_devices; } ;
1017 struct usb_driver { const char *name; int (*probe)(struct usb_interface *, const struct usb_device_id *); void (*disconnect)(struct usb_interface *); int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); int (*suspend)(struct usb_interface *, pm_message_t ); int (*resume)(struct usb_interface *); int (*reset_resume)(struct usb_interface *); int (*pre_reset)(struct usb_interface *); int (*post_reset)(struct usb_interface *); const struct usb_device_id *id_table; struct usb_dynids dynids; struct usbdrv_wrap drvwrap; unsigned char no_dynamic_id; unsigned char supports_autosuspend; unsigned char disable_hub_initiated_lpm; unsigned char soft_unbind; } ;
1145 struct usb_class_driver { char *name; char * (*devnode)(struct device *, umode_t *); const struct file_operations *fops; int minor_base; } ;
1200 struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; unsigned int actual_length; int status; } ;
1242 struct urb ;
1243 struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned char poisoned; } ;
1262 struct scatterlist ;
1262 struct urb { struct kref kref; void *hcpriv; atomic_t use_count; atomic_t reject; int unlinked; struct list_head urb_list; struct list_head anchor_list; struct usb_anchor *anchor; struct usb_device *dev; struct usb_host_endpoint *ep; unsigned int pipe; unsigned int stream_id; int status; unsigned int transfer_flags; void *transfer_buffer; dma_addr_t transfer_dma; struct scatterlist *sg; int num_mapped_sgs; int num_sgs; u32 transfer_buffer_length; u32 actual_length; unsigned char *setup_packet; dma_addr_t setup_dma; int start_frame; int number_of_packets; int interval; int error_count; void *context; void (*complete)(struct urb *); struct usb_iso_packet_descriptor iso_frame_desc[0U]; } ;
1894 struct pollfd { int fd; short events; short revents; } ;
32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ;
210 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; unsigned long max_pgoff; pte_t *pte; } ;
242 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
2316 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
381 struct __kfifo { unsigned int in; unsigned int out; unsigned int mask; unsigned int esize; void *data; } ;
65 union __anonunion____missing_field_name_281 { struct __kfifo kfifo; unsigned char *type; const unsigned char *const_type; char (*rectype)[0U]; void *ptr; const void *ptr_const; } ;
65 struct kfifo { union __anonunion____missing_field_name_281 __annonCompField75; unsigned char buf[0U]; } ;
832 struct lirc_buffer { wait_queue_head_t wait_poll; spinlock_t fifo_lock; unsigned int chunk_size; unsigned int size; struct kfifo fifo; } ;
120 struct rc_dev ;
120 struct lirc_driver { char name[40U]; int minor; __u32 code_length; unsigned int buffer_size; int sample_rate; __u32 features; unsigned int chunk_size; void *data; int min_timeout; int max_timeout; int (*add_to_buf)(void *, struct lirc_buffer *); struct lirc_buffer *rbuf; int (*set_use_inc)(void *); void (*set_use_dec)(void *); struct rc_dev *rdev; const struct file_operations *fops; struct device *dev; struct module *owner; } ;
74 struct rx_data { int count; int prev_bit; int initial_space; } ;
103 struct tx_t { unsigned char data_buf[35U]; struct completion finished; atomic_t busy; int status; } ;
110 struct imon_context { struct usb_device *usbdev; int display; int display_isopen; int ir_isopen; int dev_present; struct mutex ctx_lock; wait_queue_head_t remove_ok; int vfd_proto_6p; struct lirc_driver *driver; struct usb_endpoint_descriptor *rx_endpoint; struct usb_endpoint_descriptor *tx_endpoint; struct urb *rx_urb; struct urb *tx_urb; unsigned char usb_rx_buf[8U]; unsigned char usb_tx_buf[8U]; struct rx_data rx; struct tx_t tx; } ;
135 typedef int ldv_func_ret_type;
1 long int __builtin_expect(long exp, long c);
33 extern struct module __this_module;
142 int printk(const char *, ...);
53 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);
3 bool ldv_is_err(const void *ptr);
6 long int ldv_ptr_err(const void *ptr);
12 void * memdup_user(const void *, size_t );
30 void * __memcpy(void *, const void *, size_t );
65 char * strcpy(char *, const char *);
66 void warn_slowpath_fmt(const char *, const int, const char *, ...);
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
25 int atomic_read(const atomic_t *v);
37 void atomic_set(atomic_t *v, int i);
119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
173 int mutex_trylock(struct mutex *);
176 int ldv_mutex_trylock_8(struct mutex *ldv_func_arg1);
178 void mutex_unlock(struct mutex *);
181 void ldv_mutex_unlock_6(struct mutex *ldv_func_arg1);
185 void ldv_mutex_unlock_9(struct mutex *ldv_func_arg1);
189 void ldv_mutex_unlock_10(struct mutex *ldv_func_arg1);
193 void ldv_mutex_unlock_14(struct mutex *ldv_func_arg1);
197 void ldv_mutex_unlock_15(struct mutex *ldv_func_arg1);
201 void ldv_mutex_unlock_17(struct mutex *ldv_func_arg1);
205 void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1);
209 void ldv_mutex_unlock_19(struct mutex *ldv_func_arg1);
213 void ldv_mutex_unlock_22(struct mutex *ldv_func_arg1);
217 void ldv_mutex_unlock_24(struct mutex *ldv_func_arg1);
221 void ldv_mutex_unlock_26(struct mutex *ldv_func_arg1);
225 void ldv_mutex_unlock_27(struct mutex *ldv_func_arg1);
229 void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1);
233 void ldv_mutex_unlock_33(struct mutex *ldv_func_arg1);
237 void ldv_mutex_unlock_34(struct mutex *ldv_func_arg1);
241 void ldv_mutex_unlock_35(struct mutex *ldv_func_arg1);
18 void mutex_lock(struct mutex *);
21 void ldv_mutex_lock_5(struct mutex *ldv_func_arg1);
25 void ldv_mutex_lock_7(struct mutex *ldv_func_arg1);
29 void ldv_mutex_lock_11(struct mutex *ldv_func_arg1);
33 void ldv_mutex_lock_12(struct mutex *ldv_func_arg1);
37 void ldv_mutex_lock_13(struct mutex *ldv_func_arg1);
41 void ldv_mutex_lock_16(struct mutex *ldv_func_arg1);
45 void ldv_mutex_lock_20(struct mutex *ldv_func_arg1);
49 void ldv_mutex_lock_21(struct mutex *ldv_func_arg1);
53 void ldv_mutex_lock_23(struct mutex *ldv_func_arg1);
57 void ldv_mutex_lock_25(struct mutex *ldv_func_arg1);
61 void ldv_mutex_lock_28(struct mutex *ldv_func_arg1);
65 void ldv_mutex_lock_29(struct mutex *ldv_func_arg1);
69 void ldv_mutex_lock_31(struct mutex *ldv_func_arg1);
73 void ldv_mutex_lock_32(struct mutex *ldv_func_arg1);
78 void ldv_mutex_lock_ctx_lock_of_imon_context(struct mutex *lock);
82 void ldv_mutex_unlock_ctx_lock_of_imon_context(struct mutex *lock);
91 void ldv_mutex_lock_driver_lock(struct mutex *lock);
95 void ldv_mutex_unlock_driver_lock(struct mutex *lock);
104 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock);
108 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock);
117 void ldv_mutex_lock_lock(struct mutex *lock);
121 void ldv_mutex_unlock_lock(struct mutex *lock);
130 void ldv_mutex_lock_mutex_of_device(struct mutex *lock);
131 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock);
134 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);
45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
289 raw_spinlock_t * spinlock_check(spinlock_t *lock);
360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
72 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);
149 void __wake_up(wait_queue_head_t *, unsigned int, int, void *);
73 void init_completion(struct completion *x);
93 int wait_for_completion_interruptible(struct completion *);
106 void complete(struct completion *);
107 void complete_all(struct completion *);
143 void kfree(const void *);
289 void * __kmalloc(size_t , gfp_t );
428 void * kmalloc(size_t size, gfp_t flags);
591 void * kzalloc(size_t size, gfp_t flags);
437 int usb_endpoint_type(const struct usb_endpoint_descriptor *epd);
888 void * dev_get_drvdata(const struct device *dev);
893 void dev_set_drvdata(struct device *dev, void *data);
1109 void dev_err(const struct device *, const char *, ...);
1111 void dev_warn(const struct device *, const char *, ...);
1115 void _dev_info(const struct device *, const char *, ...);
796 unsigned int iminor(const struct inode *inode);
2669 loff_t noop_llseek(struct file *, loff_t , int);
189 void * usb_get_intfdata(struct usb_interface *intf);
194 void usb_set_intfdata(struct usb_interface *intf, void *data);
616 struct usb_device * interface_to_usbdev(struct usb_interface *intf);
621 struct usb_device * usb_get_dev(struct usb_device *);
764 const struct usb_device_id * usb_match_id(struct usb_interface *, const struct usb_device_id *);
770 struct usb_interface * usb_find_interface(struct usb_driver *, int);
1194 int usb_register_dev(struct usb_interface *, struct usb_class_driver *);
1196 void usb_deregister_dev(struct usb_interface *, struct usb_class_driver *);
1573 void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *transfer_buffer, int buffer_length, void (*complete_fn)(struct urb *), void *context, int interval);
1602 struct urb * usb_alloc_urb(int, gfp_t );
1603 void usb_free_urb(struct urb *);
1606 int usb_submit_urb(struct urb *, gfp_t );
1608 void usb_kill_urb(struct urb *);
1805 unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint);
182 int __kfifo_int_must_check_helper(int val);
771 int __kfifo_alloc(struct __kfifo *, unsigned int, size_t , gfp_t );
774 void __kfifo_free(struct __kfifo *);
779 unsigned int __kfifo_in(struct __kfifo *, const void *, unsigned int);
800 unsigned int __kfifo_in_r(struct __kfifo *, const void *, unsigned int, size_t );
47 int lirc_buffer_init(struct lirc_buffer *buf, unsigned int chunk_size, unsigned int size);
62 void lirc_buffer_free(struct lirc_buffer *buf);
110 unsigned int lirc_buffer_write(struct lirc_buffer *buf, unsigned char *orig);
199 int lirc_register_driver(struct lirc_driver *);
203 int lirc_unregister_driver(int);
53 int imon_probe(struct usb_interface *interface, const struct usb_device_id *id);
55 void imon_disconnect(struct usb_interface *interface);
56 void usb_rx_callback(struct urb *urb);
57 void usb_tx_callback(struct urb *urb);
60 int imon_resume(struct usb_interface *intf);
61 int imon_suspend(struct usb_interface *intf, pm_message_t message);
64 int display_open(struct inode *inode, struct file *file);
65 int display_close(struct inode *inode, struct file *file);
68 ssize_t vfd_write(struct file *file, const char *buf, size_t n_bytes, loff_t *pos);
72 int ir_open(void *data);
73 void ir_close(void *data);
112 const struct file_operations display_fops = { &__this_module, &noop_llseek, 0, &vfd_write, 0, 0, 0, 0, 0, 0, 0, &display_open, 0, &display_close, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
130 struct usb_device_id imon_usb_id_table[5U] = { { 3U, 2728U, 32769U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 1256U, 65328U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 2728U, 65498U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 5570U, 65498U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };
147 struct usb_device_id vfd_proto_6p_list[2U] = { { 3U, 5570U, 65498U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };
153 struct usb_device_id ir_only_list[3U] = { { 3U, 2728U, 32769U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 1256U, 65328U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };
160 struct usb_driver imon_driver = { "lirc_imon", &imon_probe, &imon_disconnect, 0, &imon_suspend, &imon_resume, 0, 0, 0, (const struct usb_device_id *)(&imon_usb_id_table), { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } }, { { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0 }, 0U, 0U, 0U, 0U };
169 struct usb_class_driver imon_class = { (char *)"lcd%d", 0, &display_fops, 144 };
176 struct mutex driver_lock = { { 1 }, { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "driver_lock.wait_lock", 0, 0UL } } } }, { &(driver_lock.wait_list), &(driver_lock.wait_list) }, 0, (void *)(&driver_lock), { 0, { 0, 0 }, "driver_lock", 0, 0UL } };
178 int debug = 0;
186 const struct usb_device_id __mod_usb__imon_usb_id_table_device_table[5U] = { };
190 void free_imon_context(struct imon_context *context);
204 void deregister_from_lirc(struct imon_context *context);
322 int send_packet(struct imon_context *context);
553 void submit_data(struct imon_context *context);
576 void imon_incoming_packet(struct imon_context *context, struct urb *urb, int intf);
1002 void ldv_check_final_state();
1005 void ldv_check_return_value(int);
1008 void ldv_check_return_value_probe(int);
1011 void ldv_initialize();
1014 void ldv_handler_precall();
1017 int nondet_int();
1020 int LDV_IN_INTERRUPT = 0;
1023 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
20 void ldv_stop();
25 int ldv_undef_int();
30 int ldv_undef_int_negative();
14 void * ldv_err_ptr(long error);
28 bool ldv_is_err_or_null(const void *ptr);
8 int ldv_mutex_ctx_lock_of_imon_context = 1;
11 int ldv_mutex_lock_interruptible_ctx_lock_of_imon_context(struct mutex *lock);
37 int ldv_mutex_lock_killable_ctx_lock_of_imon_context(struct mutex *lock);
72 int ldv_mutex_trylock_ctx_lock_of_imon_context(struct mutex *lock);
98 int ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context(atomic_t *cnt, struct mutex *lock);
123 int ldv_mutex_is_locked_ctx_lock_of_imon_context(struct mutex *lock);
163 void ldv_usb_lock_device_ctx_lock_of_imon_context();
170 int ldv_usb_trylock_device_ctx_lock_of_imon_context();
176 int ldv_usb_lock_device_for_reset_ctx_lock_of_imon_context();
189 void ldv_usb_unlock_device_ctx_lock_of_imon_context();
194 int ldv_mutex_driver_lock = 1;
197 int ldv_mutex_lock_interruptible_driver_lock(struct mutex *lock);
223 int ldv_mutex_lock_killable_driver_lock(struct mutex *lock);
258 int ldv_mutex_trylock_driver_lock(struct mutex *lock);
284 int ldv_atomic_dec_and_mutex_lock_driver_lock(atomic_t *cnt, struct mutex *lock);
309 int ldv_mutex_is_locked_driver_lock(struct mutex *lock);
349 void ldv_usb_lock_device_driver_lock();
356 int ldv_usb_trylock_device_driver_lock();
362 int ldv_usb_lock_device_for_reset_driver_lock();
375 void ldv_usb_unlock_device_driver_lock();
380 int ldv_mutex_i_mutex_of_inode = 1;
383 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock);
409 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock);
444 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock);
470 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock);
495 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock);
535 void ldv_usb_lock_device_i_mutex_of_inode();
542 int ldv_usb_trylock_device_i_mutex_of_inode();
548 int ldv_usb_lock_device_for_reset_i_mutex_of_inode();
561 void ldv_usb_unlock_device_i_mutex_of_inode();
566 int ldv_mutex_lock = 1;
569 int ldv_mutex_lock_interruptible_lock(struct mutex *lock);
595 int ldv_mutex_lock_killable_lock(struct mutex *lock);
630 int ldv_mutex_trylock_lock(struct mutex *lock);
656 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock);
681 int ldv_mutex_is_locked_lock(struct mutex *lock);
721 void ldv_usb_lock_device_lock();
728 int ldv_usb_trylock_device_lock();
734 int ldv_usb_lock_device_for_reset_lock();
747 void ldv_usb_unlock_device_lock();
752 int ldv_mutex_mutex_of_device = 1;
755 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock);
781 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock);
842 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock);
867 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock);
907 void ldv_usb_lock_device_mutex_of_device();
914 int ldv_usb_trylock_device_mutex_of_device();
920 int ldv_usb_lock_device_for_reset_mutex_of_device();
933 void ldv_usb_unlock_device_mutex_of_device();
return ;
}
-entry_point
{
1025 struct usb_interface *var_group1;
1026 const struct usb_device_id *var_imon_probe_12_p1;
1027 int res_imon_probe_12;
1028 struct pm_message var_imon_suspend_14_p1;
1029 int ldv_s_imon_driver_usb_driver;
1030 int tmp;
1031 int tmp___0;
1111 ldv_s_imon_driver_usb_driver = 0;
1101 LDV_IN_INTERRUPT = 1;
1110 ldv_initialize() { /* Function call is skipped due to function is undefined */}
1114 goto ldv_32240;
1114 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1114 assume(tmp___0 != 0);
1117 goto ldv_32239;
1115 ldv_32239:;
1118 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
1118 switch (tmp)
1119 assume(tmp == 0);
1123 assume(ldv_s_imon_driver_usb_driver == 0);
1140 -imon_probe(var_group1, var_imon_probe_12_p1)
{
684 struct usb_device *usbdev;
685 struct usb_host_interface *iface_desc;
686 struct usb_endpoint_descriptor *rx_endpoint;
687 struct usb_endpoint_descriptor *tx_endpoint;
688 struct urb *rx_urb;
689 struct urb *tx_urb;
690 struct lirc_driver *driver;
691 struct lirc_buffer *rbuf;
692 struct device *dev;
693 int ifnum;
694 int lirc_minor;
695 int num_endpts;
696 int retval;
697 int display_ep_found;
698 int ir_ep_found;
699 int vfd_proto_6p;
700 struct imon_context *context;
701 int i;
702 unsigned short vendor;
703 unsigned short product;
704 void *tmp;
705 const struct usb_device_id *tmp___0;
706 struct usb_device *tmp___1;
707 struct _ddebug descriptor;
708 long tmp___2;
709 struct usb_endpoint_descriptor *ep;
710 int ep_dir;
711 int ep_type;
712 struct _ddebug descriptor___0;
713 long tmp___3;
714 struct _ddebug descriptor___1;
715 long tmp___4;
716 struct _ddebug descriptor___2;
717 long tmp___5;
718 const struct usb_device_id *tmp___6;
719 struct _ddebug descriptor___3;
720 long tmp___7;
721 void *tmp___8;
722 void *tmp___9;
723 int tmp___10;
724 struct lock_class_key __key;
725 unsigned int tmp___11;
726 struct _ddebug descriptor___4;
727 long tmp___12;
728 int tmp___13;
685 usbdev = (struct usb_device *)0;
686 iface_desc = (struct usb_host_interface *)0;
687 rx_endpoint = (struct usb_endpoint_descriptor *)0;
688 tx_endpoint = (struct usb_endpoint_descriptor *)0;
689 rx_urb = (struct urb *)0;
690 tx_urb = (struct urb *)0;
691 driver = (struct lirc_driver *)0;
692 rbuf = (struct lirc_buffer *)0;
693 dev = &(interface->dev);
695 lirc_minor = 0;
697 retval = -12;
698 display_ep_found = 0;
699 ir_ep_found = 0;
700 vfd_proto_6p = 0;
701 context = (struct imon_context *)0;
706 -ldv_mutex_lock_28(&driver_lock)
{
299 -ldv_mutex_lock_driver_lock(ldv_func_arg1)
{
252 assume(!(ldv_mutex_driver_lock != 1));
254 ldv_mutex_driver_lock = 2;
255 return ;;
}
301 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
302 return ;;
}
708 -kzalloc(496UL, 208U)
{
593 void *tmp;
593 -kmalloc(size, flags | 32768U)
{
430 void *tmp___2;
445 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
445 return tmp___2;;
}
593 return tmp;;
}
708 context = (struct imon_context *)tmp;
709 assume(!(((unsigned long)context) == ((unsigned long)((struct imon_context *)0))));
716 tmp___0 = usb_match_id(interface, (const struct usb_device_id *)(&ir_only_list)) { /* Function call is skipped due to function is undefined */}
716 assume(!(((unsigned long)tmp___0) != ((unsigned long)((const struct usb_device_id *)0))));
719 context->display = 1;
721 -interface_to_usbdev(interface)
{
618 const struct device *__mptr;
618 __mptr = (const struct device *)(intf->dev.parent);
618 return ((struct usb_device *)__mptr) + 18446744073709551472UL;;
}
721 usbdev = usb_get_dev(tmp___1) { /* Function call is skipped due to function is undefined */}
722 iface_desc = interface->cur_altsetting;
723 num_endpts = (int)(iface_desc->desc.bNumEndpoints);
724 ifnum = (int)(iface_desc->desc.bInterfaceNumber);
725 vendor = usbdev->descriptor.idVendor;
726 product = usbdev->descriptor.idProduct;
728 descriptor.modname = "lirc_imon";
728 descriptor.function = "imon_probe";
728 descriptor.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.c";
728 descriptor.format = "%s: found iMON device (%04x:%04x, intf%d)\n";
728 descriptor.lineno = 729U;
728 descriptor.flags = 0U;
728 -__builtin_expect(((long)(descriptor.flags)) & 1L, 0L)
{
52 return exp;;
}
728 assume(!(tmp___2 != 0L));
736 i = 0;
736 goto ldv_32167;
736 assume(i < num_endpts);
736 assume(ir_ep_found == 0);
738 goto ldv_32166;
737 ldv_32166:;
741 ep = &(((iface_desc->endpoint) + ((unsigned long)i))->desc);
742 int __CPAchecker_TMP_0 = (int)(ep->bEndpointAddress);
742 ep_dir = __CPAchecker_TMP_0 & 128;
743 -usb_endpoint_type((const struct usb_endpoint_descriptor *)ep)
{
439 int __CPAchecker_TMP_0 = (int)(epd->bmAttributes);
439 return __CPAchecker_TMP_0 & 3;;
}
745 assume(ir_ep_found == 0);
745 assume(ep_dir == 128);
745 assume(ep_type == 3);
749 rx_endpoint = ep;
750 ir_ep_found = 1;
751 descriptor___0.modname = "lirc_imon";
751 descriptor___0.function = "imon_probe";
751 descriptor___0.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.c";
751 descriptor___0.format = "%s: found IR endpoint\n";
751 descriptor___0.lineno = 751U;
751 descriptor___0.flags = 0U;
751 -__builtin_expect(((long)(descriptor___0.flags)) & 1L, 0L)
{
52 return exp;;
}
751 assume(!(tmp___3 != 0L));
736 i = i + 1;
737 ldv_32167:;
736 assume(!(i < num_endpts));
766 assume(!((context->display) == 0));
772 assume(!(ir_ep_found == 0));
780 assume(!(display_ep_found != 0));
788 -kzalloc(144UL, 208U)
{
593 void *tmp;
593 -kmalloc(size, flags | 32768U)
{
430 void *tmp___2;
445 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
445 return tmp___2;;
}
593 return tmp;;
}
788 driver = (struct lirc_driver *)tmp___8;
789 assume(!(((unsigned long)driver) == ((unsigned long)((struct lirc_driver *)0))));
792 -kmalloc(192UL, 208U)
{
430 void *tmp___2;
445 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
445 return tmp___2;;
}
792 rbuf = (struct lirc_buffer *)tmp___9;
793 assume(!(((unsigned long)rbuf) == ((unsigned long)((struct lirc_buffer *)0))));
796 -lirc_buffer_init(rbuf, 4U, 128U)
{
50 int ret;
51 struct lock_class_key __key;
52 struct lock_class_key __key___0;
53 struct kfifo *__tmp;
54 struct __kfifo *__kfifo;
55 int tmp;
53 __init_waitqueue_head(&(buf->wait_poll), "&buf->wait_poll", &__key) { /* Function call is skipped due to function is undefined */}
54 -spinlock_check(&(buf->fifo_lock))
{
291 return &(lock->__annonCompField16.rlock);;
}
54 __raw_spin_lock_init(&(buf->fifo_lock.__annonCompField16.rlock), "&(&buf->fifo_lock)->rlock", &__key___0) { /* Function call is skipped due to function is undefined */}
55 buf->chunk_size = chunk_size;
56 buf->size = size;
57 __tmp = &(buf->fifo);
57 __kfifo = &(__tmp->__annonCompField75.kfifo);
57 tmp = __kfifo_alloc(__kfifo, size * chunk_size, 1UL, 208U) { /* Function call is skipped due to function is undefined */}
57 -__kfifo_int_must_check_helper(tmp)
{
184 return val;;
}
59 return ret;;
}
796 assume(!(tmp___10 != 0));
800 rx_urb = usb_alloc_urb(0, 208U) { /* Function call is skipped due to function is undefined */}
801 assume(!(((unsigned long)rx_urb) == ((unsigned long)((struct urb *)0))));
805 tx_urb = usb_alloc_urb(0, 208U) { /* Function call is skipped due to function is undefined */}
806 assume(!(((unsigned long)tx_urb) == ((unsigned long)((struct urb *)0))));
812 __mutex_init(&(context->ctx_lock), "&context->ctx_lock", &__key) { /* Function call is skipped due to function is undefined */}
813 context->vfd_proto_6p = vfd_proto_6p;
815 strcpy((char *)(&(driver->name)), "lirc_imon") { /* Function call is skipped due to function is undefined */}
816 driver->minor = -1;
817 driver->code_length = 32U;
818 driver->sample_rate = 0;
819 driver->features = 262144U;
820 driver->data = (void *)context;
821 driver->rbuf = rbuf;
822 driver->set_use_inc = &ir_open;
823 driver->set_use_dec = &ir_close;
824 driver->dev = &(interface->dev);
825 driver->owner = &__this_module;
827 -ldv_mutex_lock_29(&(context->ctx_lock))
{
307 -ldv_mutex_lock_ctx_lock_of_imon_context(ldv_func_arg1)
{
66 assume(!(ldv_mutex_ctx_lock_of_imon_context != 1));
68 ldv_mutex_ctx_lock_of_imon_context = 2;
69 return ;;
}
309 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
310 return ;;
}
829 context->driver = driver;
832 lirc_minor = lirc_register_driver(driver) { /* Function call is skipped due to function is undefined */}
833 assume(!(lirc_minor < 0));
838 _dev_info((const struct device *)dev, "Registered iMON driver (lirc minor: %d)\n", lirc_minor) { /* Function call is skipped due to function is undefined */}
842 driver->minor = lirc_minor;
844 context->usbdev = usbdev;
845 context->dev_present = 1;
846 context->rx_endpoint = rx_endpoint;
847 context->rx_urb = rx_urb;
853 context->tx_endpoint = tx_endpoint;
854 context->tx_urb = tx_urb;
856 assume(!(display_ep_found != 0));
859 unsigned int __CPAchecker_TMP_1 = (unsigned int)(context->rx_endpoint->bEndpointAddress);
859 -__create_pipe(context->usbdev, __CPAchecker_TMP_1)
{
1808 return ((unsigned int)((dev->devnum) << 8)) | (endpoint << 15);;
}
859 int __CPAchecker_TMP_2 = (int)(context->rx_endpoint->bInterval);
859 -usb_fill_int_urb(context->rx_urb, context->usbdev, tmp___11 | 1073741952U, (void *)(&(context->usb_rx_buf)), 8, &usb_rx_callback, (void *)context, __CPAchecker_TMP_2)
{
1578 int _min1;
1579 int _max1;
1580 int _max2;
1581 int _min2;
1582 urb->dev = dev;
1583 urb->pipe = pipe;
1584 urb->transfer_buffer = transfer_buffer;
1585 urb->transfer_buffer_length = (u32 )buffer_length;
1586 urb->complete = complete_fn;
1587 urb->context = context;
1589 unsigned int __CPAchecker_TMP_0 = (unsigned int)(dev->speed);
1589 assume(!(__CPAchecker_TMP_0 == 3U));
1589 unsigned int __CPAchecker_TMP_1 = (unsigned int)(dev->speed);
1589 assume(!(__CPAchecker_TMP_1 == 5U));
1595 urb->interval = interval;
1598 urb->start_frame = -1;
1599 return ;;
}
866 retval = usb_submit_urb(context->rx_urb, 208U) { /* Function call is skipped due to function is undefined */}
867 assume(!(retval != 0));
872 -usb_set_intfdata(interface, (void *)context)
{
196 -dev_set_drvdata(&(intf->dev), data)
{
895 dev->driver_data = data;
896 return ;;
}
197 return ;;
}
874 assume((context->display) != 0);
874 assume(!(ifnum == 0));
885 _dev_info((const struct device *)dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", (int)vendor, (int)product, ifnum, usbdev->bus->busnum, usbdev->devnum) { /* Function call is skipped due to function is undefined */}
889 goto driver_unlock;
913 -ldv_mutex_unlock_30(&driver_lock)
{
315 -ldv_mutex_unlock_driver_lock(ldv_func_arg1)
{
341 assume(!(ldv_mutex_driver_lock != 2));
343 ldv_mutex_driver_lock = 1;
344 return ;;
}
317 mutex_unlock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
318 return ;;
}
915 return retval;;
}
1141 ldv_check_return_value(res_imon_probe_12) { /* Function call is skipped due to function is undefined */}
1142 ldv_check_return_value_probe(res_imon_probe_12) { /* Function call is skipped due to function is undefined */}
1143 assume(!(res_imon_probe_12 != 0));
1145 ldv_s_imon_driver_usb_driver = ldv_s_imon_driver_usb_driver + 1;
1151 goto ldv_32234;
1241 ldv_32234:;
1242 ldv_32240:;
1114 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1114 assume(!(tmp___0 != 0));
1114 assume(ldv_s_imon_driver_usb_driver != 0);
1117 goto ldv_32239;
1115 ldv_32239:;
1118 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
1118 switch (tmp)
1119 assume(!(tmp == 0));
1152 assume(tmp == 1);
1155 assume(ldv_s_imon_driver_usb_driver == 1);
1172 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1173 -imon_suspend(var_group1, var_imon_suspend_14_p1)
{
963 struct imon_context *context;
964 void *tmp;
963 -usb_get_intfdata(intf)
{
191 void *tmp;
191 -dev_get_drvdata((const struct device *)(&(intf->dev)))
{
890 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
890 return __CPAchecker_TMP_0;;
}
191 return tmp;;
}
963 context = (struct imon_context *)tmp;
965 usb_kill_urb(context->rx_urb) { /* Function call is skipped due to function is undefined */}
967 return 0;;
}
1174 ldv_s_imon_driver_usb_driver = ldv_s_imon_driver_usb_driver + 1;
1180 goto ldv_32234;
1241 ldv_32234:;
1242 ldv_32240:;
1114 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1114 assume(!(tmp___0 != 0));
1114 assume(ldv_s_imon_driver_usb_driver != 0);
1117 goto ldv_32239;
1115 ldv_32239:;
1118 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
1118 switch (tmp)
1119 assume(!(tmp == 0));
1152 assume(!(tmp == 1));
1181 assume(tmp == 2);
1184 assume(ldv_s_imon_driver_usb_driver == 2);
1201 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1202 -imon_resume(var_group1)
{
972 struct imon_context *context;
973 void *tmp;
974 unsigned int tmp___0;
975 int tmp___1;
972 -usb_get_intfdata(intf)
{
191 void *tmp;
191 -dev_get_drvdata((const struct device *)(&(intf->dev)))
{
890 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
890 return __CPAchecker_TMP_0;;
}
191 return tmp;;
}
972 context = (struct imon_context *)tmp;
974 unsigned int __CPAchecker_TMP_0 = (unsigned int)(context->rx_endpoint->bEndpointAddress);
974 -__create_pipe(context->usbdev, __CPAchecker_TMP_0)
{
1808 return ((unsigned int)((dev->devnum) << 8)) | (endpoint << 15);;
}
974 int __CPAchecker_TMP_1 = (int)(context->rx_endpoint->bInterval);
974 -usb_fill_int_urb(context->rx_urb, context->usbdev, tmp___0 | 1073741952U, (void *)(&(context->usb_rx_buf)), 8, &usb_rx_callback, (void *)context, __CPAchecker_TMP_1)
{
1578 int _min1;
1579 int _max1;
1580 int _max2;
1581 int _min2;
1582 urb->dev = dev;
1583 urb->pipe = pipe;
1584 urb->transfer_buffer = transfer_buffer;
1585 urb->transfer_buffer_length = (u32 )buffer_length;
1586 urb->complete = complete_fn;
1587 urb->context = context;
1589 unsigned int __CPAchecker_TMP_0 = (unsigned int)(dev->speed);
1589 assume(!(__CPAchecker_TMP_0 == 3U));
1589 unsigned int __CPAchecker_TMP_1 = (unsigned int)(dev->speed);
1589 assume(!(__CPAchecker_TMP_1 == 5U));
1595 urb->interval = interval;
1598 urb->start_frame = -1;
1599 return ;;
}
981 tmp___1 = usb_submit_urb(context->rx_urb, 32U) { /* Function call is skipped due to function is undefined */}
981 return tmp___1;;
}
1203 ldv_s_imon_driver_usb_driver = ldv_s_imon_driver_usb_driver + 1;
1209 goto ldv_32234;
1241 ldv_32234:;
1242 ldv_32240:;
1114 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1114 assume(!(tmp___0 != 0));
1114 assume(ldv_s_imon_driver_usb_driver != 0);
1117 goto ldv_32239;
1115 ldv_32239:;
1118 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
1118 switch (tmp)
1119 assume(!(tmp == 0));
1152 assume(!(tmp == 1));
1181 assume(!(tmp == 2));
1210 assume(tmp == 3);
1213 assume(ldv_s_imon_driver_usb_driver == 3);
1230 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1231 -imon_disconnect(var_group1)
{
923 struct imon_context *context;
924 int ifnum;
925 void *tmp;
926 int tmp___0;
927 -ldv_mutex_lock_31(&driver_lock)
{
323 -ldv_mutex_lock_driver_lock(ldv_func_arg1)
{
252 assume(!(ldv_mutex_driver_lock != 1));
254 ldv_mutex_driver_lock = 2;
255 return ;;
}
325 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
326 return ;;
}
929 -usb_get_intfdata(interface)
{
191 void *tmp;
191 -dev_get_drvdata((const struct device *)(&(intf->dev)))
{
890 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
890 return __CPAchecker_TMP_0;;
}
191 return tmp;;
}
929 context = (struct imon_context *)tmp;
930 ifnum = (int)(interface->cur_altsetting->desc.bInterfaceNumber);
932 -ldv_mutex_lock_32(&(context->ctx_lock))
{
331 -ldv_mutex_lock_ctx_lock_of_imon_context(ldv_func_arg1)
{
66 assume(ldv_mutex_ctx_lock_of_imon_context != 1);
66 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
Source code
1 2 /* 3 * lirc_imon.c: LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD 4 * including the iMON PAD model 5 * 6 * Copyright(C) 2004 Venky Raju(dev@venky.ws) 7 * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com> 8 * 9 * lirc_imon is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 */ 23 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/errno.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/slab.h> 30 #include <linux/uaccess.h> 31 #include <linux/usb.h> 32 33 #include <media/lirc.h> 34 #include <media/lirc_dev.h> 35 36 37 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 38 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 39 #define MOD_NAME "lirc_imon" 40 #define MOD_VERSION "0.8" 41 42 #define DISPLAY_MINOR_BASE 144 43 #define DEVICE_NAME "lcd%d" 44 45 #define BUF_CHUNK_SIZE 4 46 #define BUF_SIZE 128 47 48 #define BIT_DURATION 250 /* each bit received is 250us */ 49 50 /*** P R O T O T Y P E S ***/ 51 52 /* USB Callback prototypes */ 53 static int imon_probe(struct usb_interface *interface, 54 const struct usb_device_id *id); 55 static void imon_disconnect(struct usb_interface *interface); 56 static void usb_rx_callback(struct urb *urb); 57 static void usb_tx_callback(struct urb *urb); 58 59 /* suspend/resume support */ 60 static int imon_resume(struct usb_interface *intf); 61 static int imon_suspend(struct usb_interface *intf, pm_message_t message); 62 63 /* Display file_operations function prototypes */ 64 static int display_open(struct inode *inode, struct file *file); 65 static int display_close(struct inode *inode, struct file *file); 66 67 /* VFD write operation */ 68 static ssize_t vfd_write(struct file *file, const char __user *buf, 69 size_t n_bytes, loff_t *pos); 70 71 /* LIRC driver function prototypes */ 72 static int ir_open(void *data); 73 static void ir_close(void *data); 74 75 /*** G L O B A L S ***/ 76 #define IMON_DATA_BUF_SZ 35 77 78 struct imon_context { 79 struct usb_device *usbdev; 80 /* Newer devices have two interfaces */ 81 int display; /* not all controllers do */ 82 int display_isopen; /* display port has been opened */ 83 int ir_isopen; /* IR port open */ 84 int dev_present; /* USB device presence */ 85 struct mutex ctx_lock; /* to lock this object */ 86 wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ 87 88 int vfd_proto_6p; /* some VFD require a 6th packet */ 89 90 struct lirc_driver *driver; 91 struct usb_endpoint_descriptor *rx_endpoint; 92 struct usb_endpoint_descriptor *tx_endpoint; 93 struct urb *rx_urb; 94 struct urb *tx_urb; 95 unsigned char usb_rx_buf[8]; 96 unsigned char usb_tx_buf[8]; 97 98 struct rx_data { 99 int count; /* length of 0 or 1 sequence */ 100 int prev_bit; /* logic level of sequence */ 101 int initial_space; /* initial space flag */ 102 } rx; 103 104 struct tx_t { 105 unsigned char data_buf[IMON_DATA_BUF_SZ]; /* user data buffer */ 106 struct completion finished; /* wait for write to finish */ 107 atomic_t busy; /* write in progress */ 108 int status; /* status of tx completion */ 109 } tx; 110 }; 111 112 static const struct file_operations display_fops = { 113 .owner = THIS_MODULE, 114 .open = &display_open, 115 .write = &vfd_write, 116 .release = &display_close, 117 .llseek = noop_llseek, 118 }; 119 120 /* 121 * USB Device ID for iMON USB Control Boards 122 * 123 * The Windows drivers contain 6 different inf files, more or less one for 124 * each new device until the 0x0034-0x0046 devices, which all use the same 125 * driver. Some of the devices in the 34-46 range haven't been definitively 126 * identified yet. Early devices have either a TriGem Computer, Inc. or a 127 * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later 128 * devices use the SoundGraph vendor ID (0x15c2). 129 */ 130 static struct usb_device_id imon_usb_id_table[] = { 131 /* TriGem iMON (IR only) -- TG_iMON.inf */ 132 { USB_DEVICE(0x0aa8, 0x8001) }, 133 134 /* SoundGraph iMON (IR only) -- sg_imon.inf */ 135 { USB_DEVICE(0x04e8, 0xff30) }, 136 137 /* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */ 138 { USB_DEVICE(0x0aa8, 0xffda) }, 139 140 /* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */ 141 { USB_DEVICE(0x15c2, 0xffda) }, 142 143 {} 144 }; 145 146 /* Some iMON VFD models requires a 6th packet for VFD writes */ 147 static struct usb_device_id vfd_proto_6p_list[] = { 148 { USB_DEVICE(0x15c2, 0xffda) }, 149 {} 150 }; 151 152 /* Some iMON devices have no lcd/vfd, don't set one up */ 153 static struct usb_device_id ir_only_list[] = { 154 { USB_DEVICE(0x0aa8, 0x8001) }, 155 { USB_DEVICE(0x04e8, 0xff30) }, 156 {} 157 }; 158 159 /* USB Device data */ 160 static struct usb_driver imon_driver = { 161 .name = MOD_NAME, 162 .probe = imon_probe, 163 .disconnect = imon_disconnect, 164 .suspend = imon_suspend, 165 .resume = imon_resume, 166 .id_table = imon_usb_id_table, 167 }; 168 169 static struct usb_class_driver imon_class = { 170 .name = DEVICE_NAME, 171 .fops = &display_fops, 172 .minor_base = DISPLAY_MINOR_BASE, 173 }; 174 175 /* to prevent races between open() and disconnect(), probing, etc */ 176 static DEFINE_MUTEX(driver_lock); 177 178 static int debug; 179 180 /*** M O D U L E C O D E ***/ 181 182 MODULE_AUTHOR(MOD_AUTHOR); 183 MODULE_DESCRIPTION(MOD_DESC); 184 MODULE_VERSION(MOD_VERSION); 185 MODULE_LICENSE("GPL"); 186 MODULE_DEVICE_TABLE(usb, imon_usb_id_table); 187 module_param(debug, int, S_IRUGO | S_IWUSR); 188 MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)"); 189 190 static void free_imon_context(struct imon_context *context) 191 { 192 struct device *dev = context->driver->dev; 193 194 usb_free_urb(context->tx_urb); 195 usb_free_urb(context->rx_urb); 196 lirc_buffer_free(context->driver->rbuf); 197 kfree(context->driver->rbuf); 198 kfree(context->driver); 199 kfree(context); 200 201 dev_dbg(dev, "%s: iMON context freed\n", __func__); 202 } 203 204 static void deregister_from_lirc(struct imon_context *context) 205 { 206 int retval; 207 int minor = context->driver->minor; 208 209 retval = lirc_unregister_driver(minor); 210 if (retval) 211 dev_err(&context->usbdev->dev, 212 "unable to deregister from lirc(%d)", retval); 213 else 214 dev_info(&context->usbdev->dev, 215 "Deregistered iMON driver (minor:%d)\n", minor); 216 217 } 218 219 /** 220 * Called when the Display device (e.g. /dev/lcd0) 221 * is opened by the application. 222 */ 223 static int display_open(struct inode *inode, struct file *file) 224 { 225 struct usb_interface *interface; 226 struct imon_context *context = NULL; 227 int subminor; 228 int retval = 0; 229 230 /* prevent races with disconnect */ 231 mutex_lock(&driver_lock); 232 233 subminor = iminor(inode); 234 interface = usb_find_interface(&imon_driver, subminor); 235 if (!interface) { 236 pr_err("%s: could not find interface for minor %d\n", 237 __func__, subminor); 238 retval = -ENODEV; 239 goto exit; 240 } 241 context = usb_get_intfdata(interface); 242 243 if (!context) { 244 dev_err(&interface->dev, "no context found for minor %d\n", 245 subminor); 246 retval = -ENODEV; 247 goto exit; 248 } 249 250 mutex_lock(&context->ctx_lock); 251 252 if (!context->display) { 253 dev_err(&interface->dev, 254 "%s: display not supported by device\n", __func__); 255 retval = -ENODEV; 256 } else if (context->display_isopen) { 257 dev_err(&interface->dev, 258 "%s: display port is already open\n", __func__); 259 retval = -EBUSY; 260 } else { 261 context->display_isopen = 1; 262 file->private_data = context; 263 dev_info(context->driver->dev, "display port opened\n"); 264 } 265 266 mutex_unlock(&context->ctx_lock); 267 268 exit: 269 mutex_unlock(&driver_lock); 270 return retval; 271 } 272 273 /** 274 * Called when the display device (e.g. /dev/lcd0) 275 * is closed by the application. 276 */ 277 static int display_close(struct inode *inode, struct file *file) 278 { 279 struct imon_context *context = NULL; 280 int retval = 0; 281 282 context = file->private_data; 283 284 if (!context) { 285 pr_err("%s: no context for device\n", __func__); 286 return -ENODEV; 287 } 288 289 mutex_lock(&context->ctx_lock); 290 291 if (!context->display) { 292 dev_err(&context->usbdev->dev, 293 "%s: display not supported by device\n", __func__); 294 retval = -ENODEV; 295 } else if (!context->display_isopen) { 296 dev_err(&context->usbdev->dev, 297 "%s: display is not open\n", __func__); 298 retval = -EIO; 299 } else { 300 context->display_isopen = 0; 301 dev_info(context->driver->dev, "display port closed\n"); 302 if (!context->dev_present && !context->ir_isopen) { 303 /* 304 * Device disconnected before close and IR port is not 305 * open. If IR port is open, context will be deleted by 306 * ir_close. 307 */ 308 mutex_unlock(&context->ctx_lock); 309 free_imon_context(context); 310 return retval; 311 } 312 } 313 314 mutex_unlock(&context->ctx_lock); 315 return retval; 316 } 317 318 /** 319 * Sends a packet to the device -- this function must be called 320 * with context->ctx_lock held. 321 */ 322 static int send_packet(struct imon_context *context) 323 { 324 unsigned int pipe; 325 int interval = 0; 326 int retval = 0; 327 328 /* Check if we need to use control or interrupt urb */ 329 pipe = usb_sndintpipe(context->usbdev, 330 context->tx_endpoint->bEndpointAddress); 331 interval = context->tx_endpoint->bInterval; 332 333 usb_fill_int_urb(context->tx_urb, context->usbdev, pipe, 334 context->usb_tx_buf, 335 sizeof(context->usb_tx_buf), 336 usb_tx_callback, context, interval); 337 338 context->tx_urb->actual_length = 0; 339 340 init_completion(&context->tx.finished); 341 atomic_set(&context->tx.busy, 1); 342 343 retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); 344 if (retval) { 345 atomic_set(&context->tx.busy, 0); 346 dev_err(&context->usbdev->dev, "error submitting urb(%d)\n", 347 retval); 348 } else { 349 /* Wait for transmission to complete (or abort) */ 350 mutex_unlock(&context->ctx_lock); 351 retval = wait_for_completion_interruptible( 352 &context->tx.finished); 353 if (retval) 354 dev_err(&context->usbdev->dev, 355 "%s: task interrupted\n", __func__); 356 mutex_lock(&context->ctx_lock); 357 358 retval = context->tx.status; 359 if (retval) 360 dev_err(&context->usbdev->dev, 361 "packet tx failed (%d)\n", retval); 362 } 363 364 return retval; 365 } 366 367 /** 368 * Writes data to the VFD. The iMON VFD is 2x16 characters 369 * and requires data in 5 consecutive USB interrupt packets, 370 * each packet but the last carrying 7 bytes. 371 * 372 * I don't know if the VFD board supports features such as 373 * scrolling, clearing rows, blanking, etc. so at 374 * the caller must provide a full screen of data. If fewer 375 * than 32 bytes are provided spaces will be appended to 376 * generate a full screen. 377 */ 378 static ssize_t vfd_write(struct file *file, const char __user *buf, 379 size_t n_bytes, loff_t *pos) 380 { 381 int i; 382 int offset; 383 int seq; 384 int retval = 0; 385 struct imon_context *context; 386 const unsigned char vfd_packet6[] = { 387 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; 388 int *data_buf = NULL; 389 390 context = file->private_data; 391 if (!context) { 392 pr_err("%s: no context for device\n", __func__); 393 return -ENODEV; 394 } 395 396 mutex_lock(&context->ctx_lock); 397 398 if (!context->dev_present) { 399 dev_err(&context->usbdev->dev, 400 "%s: no iMON device present\n", __func__); 401 retval = -ENODEV; 402 goto exit; 403 } 404 405 if (n_bytes <= 0 || n_bytes > IMON_DATA_BUF_SZ - 3) { 406 dev_err(&context->usbdev->dev, 407 "%s: invalid payload size\n", __func__); 408 retval = -EINVAL; 409 goto exit; 410 } 411 412 data_buf = memdup_user(buf, n_bytes); 413 if (IS_ERR(data_buf)) { 414 retval = PTR_ERR(data_buf); 415 data_buf = NULL; 416 goto exit; 417 } 418 419 memcpy(context->tx.data_buf, data_buf, n_bytes); 420 421 /* Pad with spaces */ 422 for (i = n_bytes; i < IMON_DATA_BUF_SZ - 3; ++i) 423 context->tx.data_buf[i] = ' '; 424 425 for (i = IMON_DATA_BUF_SZ - 3; i < IMON_DATA_BUF_SZ; ++i) 426 context->tx.data_buf[i] = 0xFF; 427 428 offset = 0; 429 seq = 0; 430 431 do { 432 memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7); 433 context->usb_tx_buf[7] = (unsigned char) seq; 434 435 retval = send_packet(context); 436 if (retval) { 437 dev_err(&context->usbdev->dev, 438 "send packet failed for packet #%d\n", 439 seq / 2); 440 goto exit; 441 } else { 442 seq += 2; 443 offset += 7; 444 } 445 446 } while (offset < IMON_DATA_BUF_SZ); 447 448 if (context->vfd_proto_6p) { 449 /* Send packet #6 */ 450 memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); 451 context->usb_tx_buf[7] = (unsigned char) seq; 452 retval = send_packet(context); 453 if (retval) 454 dev_err(&context->usbdev->dev, 455 "send packet failed for packet #%d\n", 456 seq / 2); 457 } 458 459 exit: 460 mutex_unlock(&context->ctx_lock); 461 kfree(data_buf); 462 463 return (!retval) ? n_bytes : retval; 464 } 465 466 /** 467 * Callback function for USB core API: transmit data 468 */ 469 static void usb_tx_callback(struct urb *urb) 470 { 471 struct imon_context *context; 472 473 if (!urb) 474 return; 475 context = (struct imon_context *)urb->context; 476 if (!context) 477 return; 478 479 context->tx.status = urb->status; 480 481 /* notify waiters that write has finished */ 482 atomic_set(&context->tx.busy, 0); 483 complete(&context->tx.finished); 484 } 485 486 /** 487 * Called by lirc_dev when the application opens /dev/lirc 488 */ 489 static int ir_open(void *data) 490 { 491 struct imon_context *context; 492 493 /* prevent races with disconnect */ 494 mutex_lock(&driver_lock); 495 496 context = data; 497 498 /* initial IR protocol decode variables */ 499 context->rx.count = 0; 500 context->rx.initial_space = 1; 501 context->rx.prev_bit = 0; 502 503 context->ir_isopen = 1; 504 dev_info(context->driver->dev, "IR port opened\n"); 505 506 mutex_unlock(&driver_lock); 507 return 0; 508 } 509 510 /** 511 * Called by lirc_dev when the application closes /dev/lirc 512 */ 513 static void ir_close(void *data) 514 { 515 struct imon_context *context; 516 517 context = data; 518 if (!context) { 519 pr_err("%s: no context for device\n", __func__); 520 return; 521 } 522 523 mutex_lock(&context->ctx_lock); 524 525 context->ir_isopen = 0; 526 dev_info(context->driver->dev, "IR port closed\n"); 527 528 if (!context->dev_present) { 529 /* 530 * Device disconnected while IR port was still open. Driver 531 * was not deregistered at disconnect time, so do it now. 532 */ 533 deregister_from_lirc(context); 534 535 if (!context->display_isopen) { 536 mutex_unlock(&context->ctx_lock); 537 free_imon_context(context); 538 return; 539 } 540 /* 541 * If display port is open, context will be deleted by 542 * display_close 543 */ 544 } 545 546 mutex_unlock(&context->ctx_lock); 547 } 548 549 /** 550 * Convert bit count to time duration (in us) and submit 551 * the value to lirc_dev. 552 */ 553 static void submit_data(struct imon_context *context) 554 { 555 unsigned char buf[4]; 556 int value = context->rx.count; 557 int i; 558 559 dev_dbg(context->driver->dev, "submitting data to LIRC\n"); 560 561 value *= BIT_DURATION; 562 value &= PULSE_MASK; 563 if (context->rx.prev_bit) 564 value |= PULSE_BIT; 565 566 for (i = 0; i < 4; ++i) 567 buf[i] = value>>(i*8); 568 569 lirc_buffer_write(context->driver->rbuf, buf); 570 wake_up(&context->driver->rbuf->wait_poll); 571 } 572 573 /** 574 * Process the incoming packet 575 */ 576 static void imon_incoming_packet(struct imon_context *context, 577 struct urb *urb, int intf) 578 { 579 int len = urb->actual_length; 580 unsigned char *buf = urb->transfer_buffer; 581 struct device *dev = context->driver->dev; 582 int octet, bit; 583 unsigned char mask; 584 585 /* 586 * just bail out if no listening IR client 587 */ 588 if (!context->ir_isopen) 589 return; 590 591 if (len != 8) { 592 dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n", 593 __func__, len, intf); 594 return; 595 } 596 597 if (debug) 598 dev_info(dev, "raw packet: %*ph\n", len, buf); 599 /* 600 * Translate received data to pulse and space lengths. 601 * Received data is active low, i.e. pulses are 0 and 602 * spaces are 1. 603 * 604 * My original algorithm was essentially similar to 605 * Changwoo Ryu's with the exception that he switched 606 * the incoming bits to active high and also fed an 607 * initial space to LIRC at the start of a new sequence 608 * if the previous bit was a pulse. 609 * 610 * I've decided to adopt his algorithm. 611 */ 612 613 if (buf[7] == 1 && context->rx.initial_space) { 614 /* LIRC requires a leading space */ 615 context->rx.prev_bit = 0; 616 context->rx.count = 4; 617 submit_data(context); 618 context->rx.count = 0; 619 } 620 621 for (octet = 0; octet < 5; ++octet) { 622 mask = 0x80; 623 for (bit = 0; bit < 8; ++bit) { 624 int curr_bit = !(buf[octet] & mask); 625 626 if (curr_bit != context->rx.prev_bit) { 627 if (context->rx.count) { 628 submit_data(context); 629 context->rx.count = 0; 630 } 631 context->rx.prev_bit = curr_bit; 632 } 633 ++context->rx.count; 634 mask >>= 1; 635 } 636 } 637 638 if (buf[7] == 10) { 639 if (context->rx.count) { 640 submit_data(context); 641 context->rx.count = 0; 642 } 643 context->rx.initial_space = context->rx.prev_bit; 644 } 645 } 646 647 /** 648 * Callback function for USB core API: receive data 649 */ 650 static void usb_rx_callback(struct urb *urb) 651 { 652 struct imon_context *context; 653 int intfnum = 0; 654 655 if (!urb) 656 return; 657 658 context = (struct imon_context *)urb->context; 659 if (!context) 660 return; 661 662 switch (urb->status) { 663 case -ENOENT: /* usbcore unlink successful! */ 664 return; 665 666 case 0: 667 imon_incoming_packet(context, urb, intfnum); 668 break; 669 670 default: 671 dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n", 672 __func__, urb->status); 673 break; 674 } 675 676 usb_submit_urb(context->rx_urb, GFP_ATOMIC); 677 } 678 679 /** 680 * Callback function for USB core API: Probe 681 */ 682 static int imon_probe(struct usb_interface *interface, 683 const struct usb_device_id *id) 684 { 685 struct usb_device *usbdev = NULL; 686 struct usb_host_interface *iface_desc = NULL; 687 struct usb_endpoint_descriptor *rx_endpoint = NULL; 688 struct usb_endpoint_descriptor *tx_endpoint = NULL; 689 struct urb *rx_urb = NULL; 690 struct urb *tx_urb = NULL; 691 struct lirc_driver *driver = NULL; 692 struct lirc_buffer *rbuf = NULL; 693 struct device *dev = &interface->dev; 694 int ifnum; 695 int lirc_minor = 0; 696 int num_endpts; 697 int retval = -ENOMEM; 698 int display_ep_found = 0; 699 int ir_ep_found = 0; 700 int vfd_proto_6p = 0; 701 struct imon_context *context = NULL; 702 int i; 703 u16 vendor, product; 704 705 /* prevent races probing devices w/multiple interfaces */ 706 mutex_lock(&driver_lock); 707 708 context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); 709 if (!context) 710 goto driver_unlock; 711 712 /* 713 * Try to auto-detect the type of display if the user hasn't set 714 * it by hand via the display_type modparam. Default is VFD. 715 */ 716 if (usb_match_id(interface, ir_only_list)) 717 context->display = 0; 718 else 719 context->display = 1; 720 721 usbdev = usb_get_dev(interface_to_usbdev(interface)); 722 iface_desc = interface->cur_altsetting; 723 num_endpts = iface_desc->desc.bNumEndpoints; 724 ifnum = iface_desc->desc.bInterfaceNumber; 725 vendor = le16_to_cpu(usbdev->descriptor.idVendor); 726 product = le16_to_cpu(usbdev->descriptor.idProduct); 727 728 dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", 729 __func__, vendor, product, ifnum); 730 731 /* 732 * Scan the endpoint list and set: 733 * first input endpoint = IR endpoint 734 * first output endpoint = display endpoint 735 */ 736 for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { 737 struct usb_endpoint_descriptor *ep; 738 int ep_dir; 739 int ep_type; 740 741 ep = &iface_desc->endpoint[i].desc; 742 ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; 743 ep_type = usb_endpoint_type(ep); 744 745 if (!ir_ep_found && 746 ep_dir == USB_DIR_IN && 747 ep_type == USB_ENDPOINT_XFER_INT) { 748 749 rx_endpoint = ep; 750 ir_ep_found = 1; 751 dev_dbg(dev, "%s: found IR endpoint\n", __func__); 752 753 } else if (!display_ep_found && ep_dir == USB_DIR_OUT && 754 ep_type == USB_ENDPOINT_XFER_INT) { 755 tx_endpoint = ep; 756 display_ep_found = 1; 757 dev_dbg(dev, "%s: found display endpoint\n", __func__); 758 } 759 } 760 761 /* 762 * Some iMON receivers have no display. Unfortunately, it seems 763 * that SoundGraph recycles device IDs between devices both with 764 * and without... :\ 765 */ 766 if (context->display == 0) { 767 display_ep_found = 0; 768 dev_dbg(dev, "%s: device has no display\n", __func__); 769 } 770 771 /* Input endpoint is mandatory */ 772 if (!ir_ep_found) { 773 dev_err(dev, "%s: no valid input (IR) endpoint found.\n", 774 __func__); 775 retval = -ENODEV; 776 goto free_context; 777 } 778 779 /* Determine if display requires 6 packets */ 780 if (display_ep_found) { 781 if (usb_match_id(interface, vfd_proto_6p_list)) 782 vfd_proto_6p = 1; 783 784 dev_dbg(dev, "%s: vfd_proto_6p: %d\n", 785 __func__, vfd_proto_6p); 786 } 787 788 driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); 789 if (!driver) 790 goto free_context; 791 792 rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); 793 if (!rbuf) 794 goto free_driver; 795 796 if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) { 797 dev_err(dev, "%s: lirc_buffer_init failed\n", __func__); 798 goto free_rbuf; 799 } 800 rx_urb = usb_alloc_urb(0, GFP_KERNEL); 801 if (!rx_urb) { 802 dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__); 803 goto free_lirc_buf; 804 } 805 tx_urb = usb_alloc_urb(0, GFP_KERNEL); 806 if (!tx_urb) { 807 dev_err(dev, "%s: usb_alloc_urb failed for display urb\n", 808 __func__); 809 goto free_rx_urb; 810 } 811 812 mutex_init(&context->ctx_lock); 813 context->vfd_proto_6p = vfd_proto_6p; 814 815 strcpy(driver->name, MOD_NAME); 816 driver->minor = -1; 817 driver->code_length = BUF_CHUNK_SIZE * 8; 818 driver->sample_rate = 0; 819 driver->features = LIRC_CAN_REC_MODE2; 820 driver->data = context; 821 driver->rbuf = rbuf; 822 driver->set_use_inc = ir_open; 823 driver->set_use_dec = ir_close; 824 driver->dev = &interface->dev; 825 driver->owner = THIS_MODULE; 826 827 mutex_lock(&context->ctx_lock); 828 829 context->driver = driver; 830 /* start out in keyboard mode */ 831 832 lirc_minor = lirc_register_driver(driver); 833 if (lirc_minor < 0) { 834 dev_err(dev, "%s: lirc_register_driver failed\n", __func__); 835 goto free_tx_urb; 836 } 837 838 dev_info(dev, "Registered iMON driver (lirc minor: %d)\n", 839 lirc_minor); 840 841 /* Needed while unregistering! */ 842 driver->minor = lirc_minor; 843 844 context->usbdev = usbdev; 845 context->dev_present = 1; 846 context->rx_endpoint = rx_endpoint; 847 context->rx_urb = rx_urb; 848 849 /* 850 * tx is used to send characters to lcd/vfd, associate RF 851 * remotes, set IR protocol, and maybe more... 852 */ 853 context->tx_endpoint = tx_endpoint; 854 context->tx_urb = tx_urb; 855 856 if (display_ep_found) 857 context->display = 1; 858 859 usb_fill_int_urb(context->rx_urb, context->usbdev, 860 usb_rcvintpipe(context->usbdev, 861 context->rx_endpoint->bEndpointAddress), 862 context->usb_rx_buf, sizeof(context->usb_rx_buf), 863 usb_rx_callback, context, 864 context->rx_endpoint->bInterval); 865 866 retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); 867 if (retval) { 868 dev_err(dev, "usb_submit_urb failed for intf0 (%d)\n", retval); 869 goto unregister_lirc; 870 } 871 872 usb_set_intfdata(interface, context); 873 874 if (context->display && ifnum == 0) { 875 dev_dbg(dev, "%s: Registering iMON display with sysfs\n", 876 __func__); 877 878 if (usb_register_dev(interface, &imon_class)) { 879 /* Not a fatal error, so ignore */ 880 dev_info(dev, "%s: could not get a minor number for display\n", 881 __func__); 882 } 883 } 884 885 dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", 886 vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); 887 888 /* Everything went fine. Just unlock and return retval (with is 0) */ 889 goto driver_unlock; 890 891 unregister_lirc: 892 lirc_unregister_driver(driver->minor); 893 894 free_tx_urb: 895 usb_free_urb(tx_urb); 896 897 free_rx_urb: 898 usb_free_urb(rx_urb); 899 900 free_lirc_buf: 901 lirc_buffer_free(rbuf); 902 903 free_rbuf: 904 kfree(rbuf); 905 906 free_driver: 907 kfree(driver); 908 free_context: 909 kfree(context); 910 context = NULL; 911 912 driver_unlock: 913 mutex_unlock(&driver_lock); 914 915 return retval; 916 } 917 918 /** 919 * Callback function for USB core API: disconnect 920 */ 921 static void imon_disconnect(struct usb_interface *interface) 922 { 923 struct imon_context *context; 924 int ifnum; 925 926 /* prevent races with ir_open()/display_open() */ 927 mutex_lock(&driver_lock); 928 929 context = usb_get_intfdata(interface); 930 ifnum = interface->cur_altsetting->desc.bInterfaceNumber; 931 932 mutex_lock(&context->ctx_lock); 933 934 usb_set_intfdata(interface, NULL); 935 936 /* Abort ongoing write */ 937 if (atomic_read(&context->tx.busy)) { 938 usb_kill_urb(context->tx_urb); 939 complete_all(&context->tx.finished); 940 } 941 942 context->dev_present = 0; 943 usb_kill_urb(context->rx_urb); 944 if (context->display) 945 usb_deregister_dev(interface, &imon_class); 946 947 if (!context->ir_isopen && !context->dev_present) { 948 deregister_from_lirc(context); 949 mutex_unlock(&context->ctx_lock); 950 if (!context->display_isopen) 951 free_imon_context(context); 952 } else 953 mutex_unlock(&context->ctx_lock); 954 955 mutex_unlock(&driver_lock); 956 957 dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n", 958 __func__, ifnum); 959 } 960 961 static int imon_suspend(struct usb_interface *intf, pm_message_t message) 962 { 963 struct imon_context *context = usb_get_intfdata(intf); 964 965 usb_kill_urb(context->rx_urb); 966 967 return 0; 968 } 969 970 static int imon_resume(struct usb_interface *intf) 971 { 972 struct imon_context *context = usb_get_intfdata(intf); 973 974 usb_fill_int_urb(context->rx_urb, context->usbdev, 975 usb_rcvintpipe(context->usbdev, 976 context->rx_endpoint->bEndpointAddress), 977 context->usb_rx_buf, sizeof(context->usb_rx_buf), 978 usb_rx_callback, context, 979 context->rx_endpoint->bInterval); 980 981 return usb_submit_urb(context->rx_urb, GFP_ATOMIC); 982 } 983 984 module_usb_driver(imon_driver); 985 986 987 988 989 990 /* LDV_COMMENT_BEGIN_MAIN */ 991 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 992 993 /*###########################################################################*/ 994 995 /*############## Driver Environment Generator 0.2 output ####################*/ 996 997 /*###########################################################################*/ 998 999 1000 1001 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1002 void ldv_check_final_state(void); 1003 1004 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1005 void ldv_check_return_value(int res); 1006 1007 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1008 void ldv_check_return_value_probe(int res); 1009 1010 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1011 void ldv_initialize(void); 1012 1013 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1014 void ldv_handler_precall(void); 1015 1016 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1017 int nondet_int(void); 1018 1019 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1020 int LDV_IN_INTERRUPT; 1021 1022 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1023 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1024 1025 1026 1027 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1028 /*============================= VARIABLE DECLARATION PART =============================*/ 1029 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1030 /* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/ 1031 /* LDV_COMMENT_BEGIN_PREP */ 1032 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1033 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1034 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1035 #define MOD_NAME "lirc_imon" 1036 #define MOD_VERSION "0.8" 1037 #define DISPLAY_MINOR_BASE 144 1038 #define DEVICE_NAME "lcd%d" 1039 #define BUF_CHUNK_SIZE 4 1040 #define BUF_SIZE 128 1041 #define BIT_DURATION 250 1042 #define IMON_DATA_BUF_SZ 35 1043 /* LDV_COMMENT_END_PREP */ 1044 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */ 1045 struct usb_interface * var_group1; 1046 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */ 1047 const struct usb_device_id * var_imon_probe_12_p1; 1048 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "imon_probe" */ 1049 static int res_imon_probe_12; 1050 /* content: static void imon_disconnect(struct usb_interface *interface)*/ 1051 /* LDV_COMMENT_BEGIN_PREP */ 1052 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1053 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1054 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1055 #define MOD_NAME "lirc_imon" 1056 #define MOD_VERSION "0.8" 1057 #define DISPLAY_MINOR_BASE 144 1058 #define DEVICE_NAME "lcd%d" 1059 #define BUF_CHUNK_SIZE 4 1060 #define BUF_SIZE 128 1061 #define BIT_DURATION 250 1062 #define IMON_DATA_BUF_SZ 35 1063 /* LDV_COMMENT_END_PREP */ 1064 /* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/ 1065 /* LDV_COMMENT_BEGIN_PREP */ 1066 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1067 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1068 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1069 #define MOD_NAME "lirc_imon" 1070 #define MOD_VERSION "0.8" 1071 #define DISPLAY_MINOR_BASE 144 1072 #define DEVICE_NAME "lcd%d" 1073 #define BUF_CHUNK_SIZE 4 1074 #define BUF_SIZE 128 1075 #define BIT_DURATION 250 1076 #define IMON_DATA_BUF_SZ 35 1077 /* LDV_COMMENT_END_PREP */ 1078 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_suspend" */ 1079 pm_message_t var_imon_suspend_14_p1; 1080 /* content: static int imon_resume(struct usb_interface *intf)*/ 1081 /* LDV_COMMENT_BEGIN_PREP */ 1082 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1083 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1084 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1085 #define MOD_NAME "lirc_imon" 1086 #define MOD_VERSION "0.8" 1087 #define DISPLAY_MINOR_BASE 144 1088 #define DEVICE_NAME "lcd%d" 1089 #define BUF_CHUNK_SIZE 4 1090 #define BUF_SIZE 128 1091 #define BIT_DURATION 250 1092 #define IMON_DATA_BUF_SZ 35 1093 /* LDV_COMMENT_END_PREP */ 1094 1095 1096 1097 1098 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1099 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1100 /*============================= VARIABLE INITIALIZING PART =============================*/ 1101 LDV_IN_INTERRUPT=1; 1102 1103 1104 1105 1106 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1107 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1108 /*============================= FUNCTION CALL SECTION =============================*/ 1109 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1110 ldv_initialize(); 1111 int ldv_s_imon_driver_usb_driver = 0; 1112 1113 1114 while( nondet_int() 1115 || !(ldv_s_imon_driver_usb_driver == 0) 1116 ) { 1117 1118 switch(nondet_int()) { 1119 1120 case 0: { 1121 1122 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1123 if(ldv_s_imon_driver_usb_driver==0) { 1124 1125 /* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/ 1126 /* LDV_COMMENT_BEGIN_PREP */ 1127 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1128 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1129 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1130 #define MOD_NAME "lirc_imon" 1131 #define MOD_VERSION "0.8" 1132 #define DISPLAY_MINOR_BASE 144 1133 #define DEVICE_NAME "lcd%d" 1134 #define BUF_CHUNK_SIZE 4 1135 #define BUF_SIZE 128 1136 #define BIT_DURATION 250 1137 #define IMON_DATA_BUF_SZ 35 1138 /* LDV_COMMENT_END_PREP */ 1139 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "imon_driver". Standart function test for correct return result. */ 1140 res_imon_probe_12 = imon_probe( var_group1, var_imon_probe_12_p1); 1141 ldv_check_return_value(res_imon_probe_12); 1142 ldv_check_return_value_probe(res_imon_probe_12); 1143 if(res_imon_probe_12) 1144 goto ldv_module_exit; 1145 ldv_s_imon_driver_usb_driver++; 1146 1147 } 1148 1149 } 1150 1151 break; 1152 case 1: { 1153 1154 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1155 if(ldv_s_imon_driver_usb_driver==1) { 1156 1157 /* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/ 1158 /* LDV_COMMENT_BEGIN_PREP */ 1159 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1160 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1161 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1162 #define MOD_NAME "lirc_imon" 1163 #define MOD_VERSION "0.8" 1164 #define DISPLAY_MINOR_BASE 144 1165 #define DEVICE_NAME "lcd%d" 1166 #define BUF_CHUNK_SIZE 4 1167 #define BUF_SIZE 128 1168 #define BIT_DURATION 250 1169 #define IMON_DATA_BUF_SZ 35 1170 /* LDV_COMMENT_END_PREP */ 1171 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "imon_driver" */ 1172 ldv_handler_precall(); 1173 imon_suspend( var_group1, var_imon_suspend_14_p1); 1174 ldv_s_imon_driver_usb_driver++; 1175 1176 } 1177 1178 } 1179 1180 break; 1181 case 2: { 1182 1183 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1184 if(ldv_s_imon_driver_usb_driver==2) { 1185 1186 /* content: static int imon_resume(struct usb_interface *intf)*/ 1187 /* LDV_COMMENT_BEGIN_PREP */ 1188 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1189 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1190 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1191 #define MOD_NAME "lirc_imon" 1192 #define MOD_VERSION "0.8" 1193 #define DISPLAY_MINOR_BASE 144 1194 #define DEVICE_NAME "lcd%d" 1195 #define BUF_CHUNK_SIZE 4 1196 #define BUF_SIZE 128 1197 #define BIT_DURATION 250 1198 #define IMON_DATA_BUF_SZ 35 1199 /* LDV_COMMENT_END_PREP */ 1200 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "imon_driver" */ 1201 ldv_handler_precall(); 1202 imon_resume( var_group1); 1203 ldv_s_imon_driver_usb_driver++; 1204 1205 } 1206 1207 } 1208 1209 break; 1210 case 3: { 1211 1212 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1213 if(ldv_s_imon_driver_usb_driver==3) { 1214 1215 /* content: static void imon_disconnect(struct usb_interface *interface)*/ 1216 /* LDV_COMMENT_BEGIN_PREP */ 1217 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1218 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1219 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1220 #define MOD_NAME "lirc_imon" 1221 #define MOD_VERSION "0.8" 1222 #define DISPLAY_MINOR_BASE 144 1223 #define DEVICE_NAME "lcd%d" 1224 #define BUF_CHUNK_SIZE 4 1225 #define BUF_SIZE 128 1226 #define BIT_DURATION 250 1227 #define IMON_DATA_BUF_SZ 35 1228 /* LDV_COMMENT_END_PREP */ 1229 /* LDV_COMMENT_FUNCTION_CALL Function from field "disconnect" from driver structure with callbacks "imon_driver" */ 1230 ldv_handler_precall(); 1231 imon_disconnect( var_group1); 1232 ldv_s_imon_driver_usb_driver=0; 1233 1234 } 1235 1236 } 1237 1238 break; 1239 default: break; 1240 1241 } 1242 1243 } 1244 1245 ldv_module_exit: 1246 1247 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 1248 ldv_final: ldv_check_final_state(); 1249 1250 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 1251 return; 1252 1253 } 1254 #endif 1255 1256 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 #include <linux/mutex.h> 9 #include <verifier/rcv.h> 10 11 struct usb_device; 12 extern void __ldv_usb_lock_device(struct usb_device *udev); 13 extern void __ldv_usb_unlock_device(struct usb_device *udev); 14 extern int __ldv_usb_trylock_device(struct usb_device *udev); 15 16 extern int mutex_lock_interruptible(struct mutex *lock); 17 extern int mutex_lock_killable(struct mutex *lock); 18 extern void mutex_lock(struct mutex *lock); 19 extern int ldv_mutex_lock_interruptible_ctx_lock_of_imon_context(struct mutex *lock); 20 extern int ldv_mutex_lock_killable_ctx_lock_of_imon_context(struct mutex *lock); 21 extern void ldv_mutex_lock_nested_ctx_lock_of_imon_context(struct mutex *lock, unsigned int subclass); 22 extern void ldv_mutex_lock_ctx_lock_of_imon_context(struct mutex *lock); 23 extern int ldv_mutex_trylock_ctx_lock_of_imon_context(struct mutex *lock); 24 extern int ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context(atomic_t *cnt, struct mutex *lock); 25 extern int ldv_mutex_is_locked_ctx_lock_of_imon_context(struct mutex *lock); 26 extern void ldv_mutex_unlock_ctx_lock_of_imon_context(struct mutex *lock); 27 28 extern void ldv_usb_lock_device_ctx_lock_of_imon_context(void); 29 extern void ldv_usb_unlock_device_ctx_lock_of_imon_context(void); 30 extern int ldv_usb_trylock_device_ctx_lock_of_imon_context(void); 31 extern int ldv_usb_lock_device_for_reset_ctx_lock_of_imon_context(void); 32 extern int ldv_mutex_lock_interruptible_driver_lock(struct mutex *lock); 33 extern int ldv_mutex_lock_killable_driver_lock(struct mutex *lock); 34 extern void ldv_mutex_lock_nested_driver_lock(struct mutex *lock, unsigned int subclass); 35 extern void ldv_mutex_lock_driver_lock(struct mutex *lock); 36 extern int ldv_mutex_trylock_driver_lock(struct mutex *lock); 37 extern int ldv_atomic_dec_and_mutex_lock_driver_lock(atomic_t *cnt, struct mutex *lock); 38 extern int ldv_mutex_is_locked_driver_lock(struct mutex *lock); 39 extern void ldv_mutex_unlock_driver_lock(struct mutex *lock); 40 41 extern void ldv_usb_lock_device_driver_lock(void); 42 extern void ldv_usb_unlock_device_driver_lock(void); 43 extern int ldv_usb_trylock_device_driver_lock(void); 44 extern int ldv_usb_lock_device_for_reset_driver_lock(void); 45 extern int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock); 46 extern int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock); 47 extern void ldv_mutex_lock_nested_i_mutex_of_inode(struct mutex *lock, unsigned int subclass); 48 extern void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock); 49 extern int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock); 50 extern int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock); 51 extern int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock); 52 extern void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock); 53 54 extern void ldv_usb_lock_device_i_mutex_of_inode(void); 55 extern void ldv_usb_unlock_device_i_mutex_of_inode(void); 56 extern int ldv_usb_trylock_device_i_mutex_of_inode(void); 57 extern int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void); 58 extern int ldv_mutex_lock_interruptible_lock(struct mutex *lock); 59 extern int ldv_mutex_lock_killable_lock(struct mutex *lock); 60 extern void ldv_mutex_lock_nested_lock(struct mutex *lock, unsigned int subclass); 61 extern void ldv_mutex_lock_lock(struct mutex *lock); 62 extern int ldv_mutex_trylock_lock(struct mutex *lock); 63 extern int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock); 64 extern int ldv_mutex_is_locked_lock(struct mutex *lock); 65 extern void ldv_mutex_unlock_lock(struct mutex *lock); 66 67 extern void ldv_usb_lock_device_lock(void); 68 extern void ldv_usb_unlock_device_lock(void); 69 extern int ldv_usb_trylock_device_lock(void); 70 extern int ldv_usb_lock_device_for_reset_lock(void); 71 extern int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock); 72 extern int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock); 73 extern void ldv_mutex_lock_nested_mutex_of_device(struct mutex *lock, unsigned int subclass); 74 extern void ldv_mutex_lock_mutex_of_device(struct mutex *lock); 75 extern int ldv_mutex_trylock_mutex_of_device(struct mutex *lock); 76 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock); 77 extern int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock); 78 extern void ldv_mutex_unlock_mutex_of_device(struct mutex *lock); 79 80 extern void ldv_usb_lock_device_mutex_of_device(void); 81 extern void ldv_usb_unlock_device_mutex_of_device(void); 82 extern int ldv_usb_trylock_device_mutex_of_device(void); 83 extern int ldv_usb_lock_device_for_reset_mutex_of_device(void); 84 #line 1 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.c" 85 86 /* 87 * lirc_imon.c: LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD 88 * including the iMON PAD model 89 * 90 * Copyright(C) 2004 Venky Raju(dev@venky.ws) 91 * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com> 92 * 93 * lirc_imon is free software; you can redistribute it and/or modify 94 * it under the terms of the GNU General Public License as published by 95 * the Free Software Foundation; either version 2 of the License, or 96 * (at your option) any later version. 97 * 98 * This program is distributed in the hope that it will be useful, 99 * but WITHOUT ANY WARRANTY; without even the implied warranty of 100 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 101 * GNU General Public License for more details. 102 * 103 * You should have received a copy of the GNU General Public License 104 * along with this program; if not, write to the Free Software 105 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 106 */ 107 108 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 109 110 #include <linux/errno.h> 111 #include <linux/kernel.h> 112 #include <linux/module.h> 113 #include <linux/slab.h> 114 #include <linux/uaccess.h> 115 #include <linux/usb.h> 116 117 #include <media/lirc.h> 118 #include <media/lirc_dev.h> 119 120 121 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 122 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 123 #define MOD_NAME "lirc_imon" 124 #define MOD_VERSION "0.8" 125 126 #define DISPLAY_MINOR_BASE 144 127 #define DEVICE_NAME "lcd%d" 128 129 #define BUF_CHUNK_SIZE 4 130 #define BUF_SIZE 128 131 132 #define BIT_DURATION 250 /* each bit received is 250us */ 133 134 /*** P R O T O T Y P E S ***/ 135 136 /* USB Callback prototypes */ 137 static int imon_probe(struct usb_interface *interface, 138 const struct usb_device_id *id); 139 static void imon_disconnect(struct usb_interface *interface); 140 static void usb_rx_callback(struct urb *urb); 141 static void usb_tx_callback(struct urb *urb); 142 143 /* suspend/resume support */ 144 static int imon_resume(struct usb_interface *intf); 145 static int imon_suspend(struct usb_interface *intf, pm_message_t message); 146 147 /* Display file_operations function prototypes */ 148 static int display_open(struct inode *inode, struct file *file); 149 static int display_close(struct inode *inode, struct file *file); 150 151 /* VFD write operation */ 152 static ssize_t vfd_write(struct file *file, const char __user *buf, 153 size_t n_bytes, loff_t *pos); 154 155 /* LIRC driver function prototypes */ 156 static int ir_open(void *data); 157 static void ir_close(void *data); 158 159 /*** G L O B A L S ***/ 160 #define IMON_DATA_BUF_SZ 35 161 162 struct imon_context { 163 struct usb_device *usbdev; 164 /* Newer devices have two interfaces */ 165 int display; /* not all controllers do */ 166 int display_isopen; /* display port has been opened */ 167 int ir_isopen; /* IR port open */ 168 int dev_present; /* USB device presence */ 169 struct mutex ctx_lock; /* to lock this object */ 170 wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ 171 172 int vfd_proto_6p; /* some VFD require a 6th packet */ 173 174 struct lirc_driver *driver; 175 struct usb_endpoint_descriptor *rx_endpoint; 176 struct usb_endpoint_descriptor *tx_endpoint; 177 struct urb *rx_urb; 178 struct urb *tx_urb; 179 unsigned char usb_rx_buf[8]; 180 unsigned char usb_tx_buf[8]; 181 182 struct rx_data { 183 int count; /* length of 0 or 1 sequence */ 184 int prev_bit; /* logic level of sequence */ 185 int initial_space; /* initial space flag */ 186 } rx; 187 188 struct tx_t { 189 unsigned char data_buf[IMON_DATA_BUF_SZ]; /* user data buffer */ 190 struct completion finished; /* wait for write to finish */ 191 atomic_t busy; /* write in progress */ 192 int status; /* status of tx completion */ 193 } tx; 194 }; 195 196 static const struct file_operations display_fops = { 197 .owner = THIS_MODULE, 198 .open = &display_open, 199 .write = &vfd_write, 200 .release = &display_close, 201 .llseek = noop_llseek, 202 }; 203 204 /* 205 * USB Device ID for iMON USB Control Boards 206 * 207 * The Windows drivers contain 6 different inf files, more or less one for 208 * each new device until the 0x0034-0x0046 devices, which all use the same 209 * driver. Some of the devices in the 34-46 range haven't been definitively 210 * identified yet. Early devices have either a TriGem Computer, Inc. or a 211 * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later 212 * devices use the SoundGraph vendor ID (0x15c2). 213 */ 214 static struct usb_device_id imon_usb_id_table[] = { 215 /* TriGem iMON (IR only) -- TG_iMON.inf */ 216 { USB_DEVICE(0x0aa8, 0x8001) }, 217 218 /* SoundGraph iMON (IR only) -- sg_imon.inf */ 219 { USB_DEVICE(0x04e8, 0xff30) }, 220 221 /* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */ 222 { USB_DEVICE(0x0aa8, 0xffda) }, 223 224 /* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */ 225 { USB_DEVICE(0x15c2, 0xffda) }, 226 227 {} 228 }; 229 230 /* Some iMON VFD models requires a 6th packet for VFD writes */ 231 static struct usb_device_id vfd_proto_6p_list[] = { 232 { USB_DEVICE(0x15c2, 0xffda) }, 233 {} 234 }; 235 236 /* Some iMON devices have no lcd/vfd, don't set one up */ 237 static struct usb_device_id ir_only_list[] = { 238 { USB_DEVICE(0x0aa8, 0x8001) }, 239 { USB_DEVICE(0x04e8, 0xff30) }, 240 {} 241 }; 242 243 /* USB Device data */ 244 static struct usb_driver imon_driver = { 245 .name = MOD_NAME, 246 .probe = imon_probe, 247 .disconnect = imon_disconnect, 248 .suspend = imon_suspend, 249 .resume = imon_resume, 250 .id_table = imon_usb_id_table, 251 }; 252 253 static struct usb_class_driver imon_class = { 254 .name = DEVICE_NAME, 255 .fops = &display_fops, 256 .minor_base = DISPLAY_MINOR_BASE, 257 }; 258 259 /* to prevent races between open() and disconnect(), probing, etc */ 260 static DEFINE_MUTEX(driver_lock); 261 262 static int debug; 263 264 /*** M O D U L E C O D E ***/ 265 266 MODULE_AUTHOR(MOD_AUTHOR); 267 MODULE_DESCRIPTION(MOD_DESC); 268 MODULE_VERSION(MOD_VERSION); 269 MODULE_LICENSE("GPL"); 270 MODULE_DEVICE_TABLE(usb, imon_usb_id_table); 271 module_param(debug, int, S_IRUGO | S_IWUSR); 272 MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)"); 273 274 static void free_imon_context(struct imon_context *context) 275 { 276 struct device *dev = context->driver->dev; 277 278 usb_free_urb(context->tx_urb); 279 usb_free_urb(context->rx_urb); 280 lirc_buffer_free(context->driver->rbuf); 281 kfree(context->driver->rbuf); 282 kfree(context->driver); 283 kfree(context); 284 285 dev_dbg(dev, "%s: iMON context freed\n", __func__); 286 } 287 288 static void deregister_from_lirc(struct imon_context *context) 289 { 290 int retval; 291 int minor = context->driver->minor; 292 293 retval = lirc_unregister_driver(minor); 294 if (retval) 295 dev_err(&context->usbdev->dev, 296 "unable to deregister from lirc(%d)", retval); 297 else 298 dev_info(&context->usbdev->dev, 299 "Deregistered iMON driver (minor:%d)\n", minor); 300 301 } 302 303 /** 304 * Called when the Display device (e.g. /dev/lcd0) 305 * is opened by the application. 306 */ 307 static int display_open(struct inode *inode, struct file *file) 308 { 309 struct usb_interface *interface; 310 struct imon_context *context = NULL; 311 int subminor; 312 int retval = 0; 313 314 /* prevent races with disconnect */ 315 mutex_lock(&driver_lock); 316 317 subminor = iminor(inode); 318 interface = usb_find_interface(&imon_driver, subminor); 319 if (!interface) { 320 pr_err("%s: could not find interface for minor %d\n", 321 __func__, subminor); 322 retval = -ENODEV; 323 goto exit; 324 } 325 context = usb_get_intfdata(interface); 326 327 if (!context) { 328 dev_err(&interface->dev, "no context found for minor %d\n", 329 subminor); 330 retval = -ENODEV; 331 goto exit; 332 } 333 334 mutex_lock(&context->ctx_lock); 335 336 if (!context->display) { 337 dev_err(&interface->dev, 338 "%s: display not supported by device\n", __func__); 339 retval = -ENODEV; 340 } else if (context->display_isopen) { 341 dev_err(&interface->dev, 342 "%s: display port is already open\n", __func__); 343 retval = -EBUSY; 344 } else { 345 context->display_isopen = 1; 346 file->private_data = context; 347 dev_info(context->driver->dev, "display port opened\n"); 348 } 349 350 mutex_unlock(&context->ctx_lock); 351 352 exit: 353 mutex_unlock(&driver_lock); 354 return retval; 355 } 356 357 /** 358 * Called when the display device (e.g. /dev/lcd0) 359 * is closed by the application. 360 */ 361 static int display_close(struct inode *inode, struct file *file) 362 { 363 struct imon_context *context = NULL; 364 int retval = 0; 365 366 context = file->private_data; 367 368 if (!context) { 369 pr_err("%s: no context for device\n", __func__); 370 return -ENODEV; 371 } 372 373 mutex_lock(&context->ctx_lock); 374 375 if (!context->display) { 376 dev_err(&context->usbdev->dev, 377 "%s: display not supported by device\n", __func__); 378 retval = -ENODEV; 379 } else if (!context->display_isopen) { 380 dev_err(&context->usbdev->dev, 381 "%s: display is not open\n", __func__); 382 retval = -EIO; 383 } else { 384 context->display_isopen = 0; 385 dev_info(context->driver->dev, "display port closed\n"); 386 if (!context->dev_present && !context->ir_isopen) { 387 /* 388 * Device disconnected before close and IR port is not 389 * open. If IR port is open, context will be deleted by 390 * ir_close. 391 */ 392 mutex_unlock(&context->ctx_lock); 393 free_imon_context(context); 394 return retval; 395 } 396 } 397 398 mutex_unlock(&context->ctx_lock); 399 return retval; 400 } 401 402 /** 403 * Sends a packet to the device -- this function must be called 404 * with context->ctx_lock held. 405 */ 406 static int send_packet(struct imon_context *context) 407 { 408 unsigned int pipe; 409 int interval = 0; 410 int retval = 0; 411 412 /* Check if we need to use control or interrupt urb */ 413 pipe = usb_sndintpipe(context->usbdev, 414 context->tx_endpoint->bEndpointAddress); 415 interval = context->tx_endpoint->bInterval; 416 417 usb_fill_int_urb(context->tx_urb, context->usbdev, pipe, 418 context->usb_tx_buf, 419 sizeof(context->usb_tx_buf), 420 usb_tx_callback, context, interval); 421 422 context->tx_urb->actual_length = 0; 423 424 init_completion(&context->tx.finished); 425 atomic_set(&context->tx.busy, 1); 426 427 retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); 428 if (retval) { 429 atomic_set(&context->tx.busy, 0); 430 dev_err(&context->usbdev->dev, "error submitting urb(%d)\n", 431 retval); 432 } else { 433 /* Wait for transmission to complete (or abort) */ 434 mutex_unlock(&context->ctx_lock); 435 retval = wait_for_completion_interruptible( 436 &context->tx.finished); 437 if (retval) 438 dev_err(&context->usbdev->dev, 439 "%s: task interrupted\n", __func__); 440 mutex_lock(&context->ctx_lock); 441 442 retval = context->tx.status; 443 if (retval) 444 dev_err(&context->usbdev->dev, 445 "packet tx failed (%d)\n", retval); 446 } 447 448 return retval; 449 } 450 451 /** 452 * Writes data to the VFD. The iMON VFD is 2x16 characters 453 * and requires data in 5 consecutive USB interrupt packets, 454 * each packet but the last carrying 7 bytes. 455 * 456 * I don't know if the VFD board supports features such as 457 * scrolling, clearing rows, blanking, etc. so at 458 * the caller must provide a full screen of data. If fewer 459 * than 32 bytes are provided spaces will be appended to 460 * generate a full screen. 461 */ 462 static ssize_t vfd_write(struct file *file, const char __user *buf, 463 size_t n_bytes, loff_t *pos) 464 { 465 int i; 466 int offset; 467 int seq; 468 int retval = 0; 469 struct imon_context *context; 470 const unsigned char vfd_packet6[] = { 471 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; 472 int *data_buf = NULL; 473 474 context = file->private_data; 475 if (!context) { 476 pr_err("%s: no context for device\n", __func__); 477 return -ENODEV; 478 } 479 480 mutex_lock(&context->ctx_lock); 481 482 if (!context->dev_present) { 483 dev_err(&context->usbdev->dev, 484 "%s: no iMON device present\n", __func__); 485 retval = -ENODEV; 486 goto exit; 487 } 488 489 if (n_bytes <= 0 || n_bytes > IMON_DATA_BUF_SZ - 3) { 490 dev_err(&context->usbdev->dev, 491 "%s: invalid payload size\n", __func__); 492 retval = -EINVAL; 493 goto exit; 494 } 495 496 data_buf = memdup_user(buf, n_bytes); 497 if (IS_ERR(data_buf)) { 498 retval = PTR_ERR(data_buf); 499 data_buf = NULL; 500 goto exit; 501 } 502 503 memcpy(context->tx.data_buf, data_buf, n_bytes); 504 505 /* Pad with spaces */ 506 for (i = n_bytes; i < IMON_DATA_BUF_SZ - 3; ++i) 507 context->tx.data_buf[i] = ' '; 508 509 for (i = IMON_DATA_BUF_SZ - 3; i < IMON_DATA_BUF_SZ; ++i) 510 context->tx.data_buf[i] = 0xFF; 511 512 offset = 0; 513 seq = 0; 514 515 do { 516 memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7); 517 context->usb_tx_buf[7] = (unsigned char) seq; 518 519 retval = send_packet(context); 520 if (retval) { 521 dev_err(&context->usbdev->dev, 522 "send packet failed for packet #%d\n", 523 seq / 2); 524 goto exit; 525 } else { 526 seq += 2; 527 offset += 7; 528 } 529 530 } while (offset < IMON_DATA_BUF_SZ); 531 532 if (context->vfd_proto_6p) { 533 /* Send packet #6 */ 534 memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); 535 context->usb_tx_buf[7] = (unsigned char) seq; 536 retval = send_packet(context); 537 if (retval) 538 dev_err(&context->usbdev->dev, 539 "send packet failed for packet #%d\n", 540 seq / 2); 541 } 542 543 exit: 544 mutex_unlock(&context->ctx_lock); 545 kfree(data_buf); 546 547 return (!retval) ? n_bytes : retval; 548 } 549 550 /** 551 * Callback function for USB core API: transmit data 552 */ 553 static void usb_tx_callback(struct urb *urb) 554 { 555 struct imon_context *context; 556 557 if (!urb) 558 return; 559 context = (struct imon_context *)urb->context; 560 if (!context) 561 return; 562 563 context->tx.status = urb->status; 564 565 /* notify waiters that write has finished */ 566 atomic_set(&context->tx.busy, 0); 567 complete(&context->tx.finished); 568 } 569 570 /** 571 * Called by lirc_dev when the application opens /dev/lirc 572 */ 573 static int ir_open(void *data) 574 { 575 struct imon_context *context; 576 577 /* prevent races with disconnect */ 578 mutex_lock(&driver_lock); 579 580 context = data; 581 582 /* initial IR protocol decode variables */ 583 context->rx.count = 0; 584 context->rx.initial_space = 1; 585 context->rx.prev_bit = 0; 586 587 context->ir_isopen = 1; 588 dev_info(context->driver->dev, "IR port opened\n"); 589 590 mutex_unlock(&driver_lock); 591 return 0; 592 } 593 594 /** 595 * Called by lirc_dev when the application closes /dev/lirc 596 */ 597 static void ir_close(void *data) 598 { 599 struct imon_context *context; 600 601 context = data; 602 if (!context) { 603 pr_err("%s: no context for device\n", __func__); 604 return; 605 } 606 607 mutex_lock(&context->ctx_lock); 608 609 context->ir_isopen = 0; 610 dev_info(context->driver->dev, "IR port closed\n"); 611 612 if (!context->dev_present) { 613 /* 614 * Device disconnected while IR port was still open. Driver 615 * was not deregistered at disconnect time, so do it now. 616 */ 617 deregister_from_lirc(context); 618 619 if (!context->display_isopen) { 620 mutex_unlock(&context->ctx_lock); 621 free_imon_context(context); 622 return; 623 } 624 /* 625 * If display port is open, context will be deleted by 626 * display_close 627 */ 628 } 629 630 mutex_unlock(&context->ctx_lock); 631 } 632 633 /** 634 * Convert bit count to time duration (in us) and submit 635 * the value to lirc_dev. 636 */ 637 static void submit_data(struct imon_context *context) 638 { 639 unsigned char buf[4]; 640 int value = context->rx.count; 641 int i; 642 643 dev_dbg(context->driver->dev, "submitting data to LIRC\n"); 644 645 value *= BIT_DURATION; 646 value &= PULSE_MASK; 647 if (context->rx.prev_bit) 648 value |= PULSE_BIT; 649 650 for (i = 0; i < 4; ++i) 651 buf[i] = value>>(i*8); 652 653 lirc_buffer_write(context->driver->rbuf, buf); 654 wake_up(&context->driver->rbuf->wait_poll); 655 } 656 657 /** 658 * Process the incoming packet 659 */ 660 static void imon_incoming_packet(struct imon_context *context, 661 struct urb *urb, int intf) 662 { 663 int len = urb->actual_length; 664 unsigned char *buf = urb->transfer_buffer; 665 struct device *dev = context->driver->dev; 666 int octet, bit; 667 unsigned char mask; 668 669 /* 670 * just bail out if no listening IR client 671 */ 672 if (!context->ir_isopen) 673 return; 674 675 if (len != 8) { 676 dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n", 677 __func__, len, intf); 678 return; 679 } 680 681 if (debug) 682 dev_info(dev, "raw packet: %*ph\n", len, buf); 683 /* 684 * Translate received data to pulse and space lengths. 685 * Received data is active low, i.e. pulses are 0 and 686 * spaces are 1. 687 * 688 * My original algorithm was essentially similar to 689 * Changwoo Ryu's with the exception that he switched 690 * the incoming bits to active high and also fed an 691 * initial space to LIRC at the start of a new sequence 692 * if the previous bit was a pulse. 693 * 694 * I've decided to adopt his algorithm. 695 */ 696 697 if (buf[7] == 1 && context->rx.initial_space) { 698 /* LIRC requires a leading space */ 699 context->rx.prev_bit = 0; 700 context->rx.count = 4; 701 submit_data(context); 702 context->rx.count = 0; 703 } 704 705 for (octet = 0; octet < 5; ++octet) { 706 mask = 0x80; 707 for (bit = 0; bit < 8; ++bit) { 708 int curr_bit = !(buf[octet] & mask); 709 710 if (curr_bit != context->rx.prev_bit) { 711 if (context->rx.count) { 712 submit_data(context); 713 context->rx.count = 0; 714 } 715 context->rx.prev_bit = curr_bit; 716 } 717 ++context->rx.count; 718 mask >>= 1; 719 } 720 } 721 722 if (buf[7] == 10) { 723 if (context->rx.count) { 724 submit_data(context); 725 context->rx.count = 0; 726 } 727 context->rx.initial_space = context->rx.prev_bit; 728 } 729 } 730 731 /** 732 * Callback function for USB core API: receive data 733 */ 734 static void usb_rx_callback(struct urb *urb) 735 { 736 struct imon_context *context; 737 int intfnum = 0; 738 739 if (!urb) 740 return; 741 742 context = (struct imon_context *)urb->context; 743 if (!context) 744 return; 745 746 switch (urb->status) { 747 case -ENOENT: /* usbcore unlink successful! */ 748 return; 749 750 case 0: 751 imon_incoming_packet(context, urb, intfnum); 752 break; 753 754 default: 755 dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n", 756 __func__, urb->status); 757 break; 758 } 759 760 usb_submit_urb(context->rx_urb, GFP_ATOMIC); 761 } 762 763 /** 764 * Callback function for USB core API: Probe 765 */ 766 static int imon_probe(struct usb_interface *interface, 767 const struct usb_device_id *id) 768 { 769 struct usb_device *usbdev = NULL; 770 struct usb_host_interface *iface_desc = NULL; 771 struct usb_endpoint_descriptor *rx_endpoint = NULL; 772 struct usb_endpoint_descriptor *tx_endpoint = NULL; 773 struct urb *rx_urb = NULL; 774 struct urb *tx_urb = NULL; 775 struct lirc_driver *driver = NULL; 776 struct lirc_buffer *rbuf = NULL; 777 struct device *dev = &interface->dev; 778 int ifnum; 779 int lirc_minor = 0; 780 int num_endpts; 781 int retval = -ENOMEM; 782 int display_ep_found = 0; 783 int ir_ep_found = 0; 784 int vfd_proto_6p = 0; 785 struct imon_context *context = NULL; 786 int i; 787 u16 vendor, product; 788 789 /* prevent races probing devices w/multiple interfaces */ 790 mutex_lock(&driver_lock); 791 792 context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); 793 if (!context) 794 goto driver_unlock; 795 796 /* 797 * Try to auto-detect the type of display if the user hasn't set 798 * it by hand via the display_type modparam. Default is VFD. 799 */ 800 if (usb_match_id(interface, ir_only_list)) 801 context->display = 0; 802 else 803 context->display = 1; 804 805 usbdev = usb_get_dev(interface_to_usbdev(interface)); 806 iface_desc = interface->cur_altsetting; 807 num_endpts = iface_desc->desc.bNumEndpoints; 808 ifnum = iface_desc->desc.bInterfaceNumber; 809 vendor = le16_to_cpu(usbdev->descriptor.idVendor); 810 product = le16_to_cpu(usbdev->descriptor.idProduct); 811 812 dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", 813 __func__, vendor, product, ifnum); 814 815 /* 816 * Scan the endpoint list and set: 817 * first input endpoint = IR endpoint 818 * first output endpoint = display endpoint 819 */ 820 for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { 821 struct usb_endpoint_descriptor *ep; 822 int ep_dir; 823 int ep_type; 824 825 ep = &iface_desc->endpoint[i].desc; 826 ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; 827 ep_type = usb_endpoint_type(ep); 828 829 if (!ir_ep_found && 830 ep_dir == USB_DIR_IN && 831 ep_type == USB_ENDPOINT_XFER_INT) { 832 833 rx_endpoint = ep; 834 ir_ep_found = 1; 835 dev_dbg(dev, "%s: found IR endpoint\n", __func__); 836 837 } else if (!display_ep_found && ep_dir == USB_DIR_OUT && 838 ep_type == USB_ENDPOINT_XFER_INT) { 839 tx_endpoint = ep; 840 display_ep_found = 1; 841 dev_dbg(dev, "%s: found display endpoint\n", __func__); 842 } 843 } 844 845 /* 846 * Some iMON receivers have no display. Unfortunately, it seems 847 * that SoundGraph recycles device IDs between devices both with 848 * and without... :\ 849 */ 850 if (context->display == 0) { 851 display_ep_found = 0; 852 dev_dbg(dev, "%s: device has no display\n", __func__); 853 } 854 855 /* Input endpoint is mandatory */ 856 if (!ir_ep_found) { 857 dev_err(dev, "%s: no valid input (IR) endpoint found.\n", 858 __func__); 859 retval = -ENODEV; 860 goto free_context; 861 } 862 863 /* Determine if display requires 6 packets */ 864 if (display_ep_found) { 865 if (usb_match_id(interface, vfd_proto_6p_list)) 866 vfd_proto_6p = 1; 867 868 dev_dbg(dev, "%s: vfd_proto_6p: %d\n", 869 __func__, vfd_proto_6p); 870 } 871 872 driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); 873 if (!driver) 874 goto free_context; 875 876 rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); 877 if (!rbuf) 878 goto free_driver; 879 880 if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) { 881 dev_err(dev, "%s: lirc_buffer_init failed\n", __func__); 882 goto free_rbuf; 883 } 884 rx_urb = usb_alloc_urb(0, GFP_KERNEL); 885 if (!rx_urb) { 886 dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__); 887 goto free_lirc_buf; 888 } 889 tx_urb = usb_alloc_urb(0, GFP_KERNEL); 890 if (!tx_urb) { 891 dev_err(dev, "%s: usb_alloc_urb failed for display urb\n", 892 __func__); 893 goto free_rx_urb; 894 } 895 896 mutex_init(&context->ctx_lock); 897 context->vfd_proto_6p = vfd_proto_6p; 898 899 strcpy(driver->name, MOD_NAME); 900 driver->minor = -1; 901 driver->code_length = BUF_CHUNK_SIZE * 8; 902 driver->sample_rate = 0; 903 driver->features = LIRC_CAN_REC_MODE2; 904 driver->data = context; 905 driver->rbuf = rbuf; 906 driver->set_use_inc = ir_open; 907 driver->set_use_dec = ir_close; 908 driver->dev = &interface->dev; 909 driver->owner = THIS_MODULE; 910 911 mutex_lock(&context->ctx_lock); 912 913 context->driver = driver; 914 /* start out in keyboard mode */ 915 916 lirc_minor = lirc_register_driver(driver); 917 if (lirc_minor < 0) { 918 dev_err(dev, "%s: lirc_register_driver failed\n", __func__); 919 goto free_tx_urb; 920 } 921 922 dev_info(dev, "Registered iMON driver (lirc minor: %d)\n", 923 lirc_minor); 924 925 /* Needed while unregistering! */ 926 driver->minor = lirc_minor; 927 928 context->usbdev = usbdev; 929 context->dev_present = 1; 930 context->rx_endpoint = rx_endpoint; 931 context->rx_urb = rx_urb; 932 933 /* 934 * tx is used to send characters to lcd/vfd, associate RF 935 * remotes, set IR protocol, and maybe more... 936 */ 937 context->tx_endpoint = tx_endpoint; 938 context->tx_urb = tx_urb; 939 940 if (display_ep_found) 941 context->display = 1; 942 943 usb_fill_int_urb(context->rx_urb, context->usbdev, 944 usb_rcvintpipe(context->usbdev, 945 context->rx_endpoint->bEndpointAddress), 946 context->usb_rx_buf, sizeof(context->usb_rx_buf), 947 usb_rx_callback, context, 948 context->rx_endpoint->bInterval); 949 950 retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); 951 if (retval) { 952 dev_err(dev, "usb_submit_urb failed for intf0 (%d)\n", retval); 953 goto unregister_lirc; 954 } 955 956 usb_set_intfdata(interface, context); 957 958 if (context->display && ifnum == 0) { 959 dev_dbg(dev, "%s: Registering iMON display with sysfs\n", 960 __func__); 961 962 if (usb_register_dev(interface, &imon_class)) { 963 /* Not a fatal error, so ignore */ 964 dev_info(dev, "%s: could not get a minor number for display\n", 965 __func__); 966 } 967 } 968 969 dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", 970 vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); 971 972 /* Everything went fine. Just unlock and return retval (with is 0) */ 973 goto driver_unlock; 974 975 unregister_lirc: 976 lirc_unregister_driver(driver->minor); 977 978 free_tx_urb: 979 usb_free_urb(tx_urb); 980 981 free_rx_urb: 982 usb_free_urb(rx_urb); 983 984 free_lirc_buf: 985 lirc_buffer_free(rbuf); 986 987 free_rbuf: 988 kfree(rbuf); 989 990 free_driver: 991 kfree(driver); 992 free_context: 993 kfree(context); 994 context = NULL; 995 996 driver_unlock: 997 mutex_unlock(&driver_lock); 998 999 return retval; 1000 } 1001 1002 /** 1003 * Callback function for USB core API: disconnect 1004 */ 1005 static void imon_disconnect(struct usb_interface *interface) 1006 { 1007 struct imon_context *context; 1008 int ifnum; 1009 1010 /* prevent races with ir_open()/display_open() */ 1011 mutex_lock(&driver_lock); 1012 1013 context = usb_get_intfdata(interface); 1014 ifnum = interface->cur_altsetting->desc.bInterfaceNumber; 1015 1016 mutex_lock(&context->ctx_lock); 1017 1018 usb_set_intfdata(interface, NULL); 1019 1020 /* Abort ongoing write */ 1021 if (atomic_read(&context->tx.busy)) { 1022 usb_kill_urb(context->tx_urb); 1023 complete_all(&context->tx.finished); 1024 } 1025 1026 context->dev_present = 0; 1027 usb_kill_urb(context->rx_urb); 1028 if (context->display) 1029 usb_deregister_dev(interface, &imon_class); 1030 1031 if (!context->ir_isopen && !context->dev_present) { 1032 deregister_from_lirc(context); 1033 mutex_unlock(&context->ctx_lock); 1034 if (!context->display_isopen) 1035 free_imon_context(context); 1036 } else 1037 mutex_unlock(&context->ctx_lock); 1038 1039 mutex_unlock(&driver_lock); 1040 1041 dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n", 1042 __func__, ifnum); 1043 } 1044 1045 static int imon_suspend(struct usb_interface *intf, pm_message_t message) 1046 { 1047 struct imon_context *context = usb_get_intfdata(intf); 1048 1049 usb_kill_urb(context->rx_urb); 1050 1051 return 0; 1052 } 1053 1054 static int imon_resume(struct usb_interface *intf) 1055 { 1056 struct imon_context *context = usb_get_intfdata(intf); 1057 1058 usb_fill_int_urb(context->rx_urb, context->usbdev, 1059 usb_rcvintpipe(context->usbdev, 1060 context->rx_endpoint->bEndpointAddress), 1061 context->usb_rx_buf, sizeof(context->usb_rx_buf), 1062 usb_rx_callback, context, 1063 context->rx_endpoint->bInterval); 1064 1065 return usb_submit_urb(context->rx_urb, GFP_ATOMIC); 1066 } 1067 1068 module_usb_driver(imon_driver); 1069 1070 1071 1072 1073 1074 /* LDV_COMMENT_BEGIN_MAIN */ 1075 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 1076 1077 /*###########################################################################*/ 1078 1079 /*############## Driver Environment Generator 0.2 output ####################*/ 1080 1081 /*###########################################################################*/ 1082 1083 1084 1085 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1086 void ldv_check_final_state(void); 1087 1088 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1089 void ldv_check_return_value(int res); 1090 1091 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1092 void ldv_check_return_value_probe(int res); 1093 1094 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1095 void ldv_initialize(void); 1096 1097 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1098 void ldv_handler_precall(void); 1099 1100 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1101 int nondet_int(void); 1102 1103 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1104 int LDV_IN_INTERRUPT; 1105 1106 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1107 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1108 1109 1110 1111 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1112 /*============================= VARIABLE DECLARATION PART =============================*/ 1113 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1114 /* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/ 1115 /* LDV_COMMENT_BEGIN_PREP */ 1116 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1117 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1118 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1119 #define MOD_NAME "lirc_imon" 1120 #define MOD_VERSION "0.8" 1121 #define DISPLAY_MINOR_BASE 144 1122 #define DEVICE_NAME "lcd%d" 1123 #define BUF_CHUNK_SIZE 4 1124 #define BUF_SIZE 128 1125 #define BIT_DURATION 250 1126 #define IMON_DATA_BUF_SZ 35 1127 /* LDV_COMMENT_END_PREP */ 1128 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */ 1129 struct usb_interface * var_group1; 1130 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */ 1131 const struct usb_device_id * var_imon_probe_12_p1; 1132 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "imon_probe" */ 1133 static int res_imon_probe_12; 1134 /* content: static void imon_disconnect(struct usb_interface *interface)*/ 1135 /* LDV_COMMENT_BEGIN_PREP */ 1136 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1137 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1138 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1139 #define MOD_NAME "lirc_imon" 1140 #define MOD_VERSION "0.8" 1141 #define DISPLAY_MINOR_BASE 144 1142 #define DEVICE_NAME "lcd%d" 1143 #define BUF_CHUNK_SIZE 4 1144 #define BUF_SIZE 128 1145 #define BIT_DURATION 250 1146 #define IMON_DATA_BUF_SZ 35 1147 /* LDV_COMMENT_END_PREP */ 1148 /* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/ 1149 /* LDV_COMMENT_BEGIN_PREP */ 1150 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1151 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1152 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1153 #define MOD_NAME "lirc_imon" 1154 #define MOD_VERSION "0.8" 1155 #define DISPLAY_MINOR_BASE 144 1156 #define DEVICE_NAME "lcd%d" 1157 #define BUF_CHUNK_SIZE 4 1158 #define BUF_SIZE 128 1159 #define BIT_DURATION 250 1160 #define IMON_DATA_BUF_SZ 35 1161 /* LDV_COMMENT_END_PREP */ 1162 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_suspend" */ 1163 pm_message_t var_imon_suspend_14_p1; 1164 /* content: static int imon_resume(struct usb_interface *intf)*/ 1165 /* LDV_COMMENT_BEGIN_PREP */ 1166 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1167 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1168 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1169 #define MOD_NAME "lirc_imon" 1170 #define MOD_VERSION "0.8" 1171 #define DISPLAY_MINOR_BASE 144 1172 #define DEVICE_NAME "lcd%d" 1173 #define BUF_CHUNK_SIZE 4 1174 #define BUF_SIZE 128 1175 #define BIT_DURATION 250 1176 #define IMON_DATA_BUF_SZ 35 1177 /* LDV_COMMENT_END_PREP */ 1178 1179 1180 1181 1182 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1183 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1184 /*============================= VARIABLE INITIALIZING PART =============================*/ 1185 LDV_IN_INTERRUPT=1; 1186 1187 1188 1189 1190 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1191 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1192 /*============================= FUNCTION CALL SECTION =============================*/ 1193 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1194 ldv_initialize(); 1195 int ldv_s_imon_driver_usb_driver = 0; 1196 1197 1198 while( nondet_int() 1199 || !(ldv_s_imon_driver_usb_driver == 0) 1200 ) { 1201 1202 switch(nondet_int()) { 1203 1204 case 0: { 1205 1206 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1207 if(ldv_s_imon_driver_usb_driver==0) { 1208 1209 /* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/ 1210 /* LDV_COMMENT_BEGIN_PREP */ 1211 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1212 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1213 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1214 #define MOD_NAME "lirc_imon" 1215 #define MOD_VERSION "0.8" 1216 #define DISPLAY_MINOR_BASE 144 1217 #define DEVICE_NAME "lcd%d" 1218 #define BUF_CHUNK_SIZE 4 1219 #define BUF_SIZE 128 1220 #define BIT_DURATION 250 1221 #define IMON_DATA_BUF_SZ 35 1222 /* LDV_COMMENT_END_PREP */ 1223 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "imon_driver". Standart function test for correct return result. */ 1224 res_imon_probe_12 = imon_probe( var_group1, var_imon_probe_12_p1); 1225 ldv_check_return_value(res_imon_probe_12); 1226 ldv_check_return_value_probe(res_imon_probe_12); 1227 if(res_imon_probe_12) 1228 goto ldv_module_exit; 1229 ldv_s_imon_driver_usb_driver++; 1230 1231 } 1232 1233 } 1234 1235 break; 1236 case 1: { 1237 1238 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1239 if(ldv_s_imon_driver_usb_driver==1) { 1240 1241 /* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/ 1242 /* LDV_COMMENT_BEGIN_PREP */ 1243 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1244 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1245 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1246 #define MOD_NAME "lirc_imon" 1247 #define MOD_VERSION "0.8" 1248 #define DISPLAY_MINOR_BASE 144 1249 #define DEVICE_NAME "lcd%d" 1250 #define BUF_CHUNK_SIZE 4 1251 #define BUF_SIZE 128 1252 #define BIT_DURATION 250 1253 #define IMON_DATA_BUF_SZ 35 1254 /* LDV_COMMENT_END_PREP */ 1255 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "imon_driver" */ 1256 ldv_handler_precall(); 1257 imon_suspend( var_group1, var_imon_suspend_14_p1); 1258 ldv_s_imon_driver_usb_driver++; 1259 1260 } 1261 1262 } 1263 1264 break; 1265 case 2: { 1266 1267 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1268 if(ldv_s_imon_driver_usb_driver==2) { 1269 1270 /* content: static int imon_resume(struct usb_interface *intf)*/ 1271 /* LDV_COMMENT_BEGIN_PREP */ 1272 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1273 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1274 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1275 #define MOD_NAME "lirc_imon" 1276 #define MOD_VERSION "0.8" 1277 #define DISPLAY_MINOR_BASE 144 1278 #define DEVICE_NAME "lcd%d" 1279 #define BUF_CHUNK_SIZE 4 1280 #define BUF_SIZE 128 1281 #define BIT_DURATION 250 1282 #define IMON_DATA_BUF_SZ 35 1283 /* LDV_COMMENT_END_PREP */ 1284 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "imon_driver" */ 1285 ldv_handler_precall(); 1286 imon_resume( var_group1); 1287 ldv_s_imon_driver_usb_driver++; 1288 1289 } 1290 1291 } 1292 1293 break; 1294 case 3: { 1295 1296 /** STRUCT: struct type: usb_driver, struct name: imon_driver **/ 1297 if(ldv_s_imon_driver_usb_driver==3) { 1298 1299 /* content: static void imon_disconnect(struct usb_interface *interface)*/ 1300 /* LDV_COMMENT_BEGIN_PREP */ 1301 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 1302 #define MOD_AUTHOR "Venky Raju <dev@venky.ws>" 1303 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 1304 #define MOD_NAME "lirc_imon" 1305 #define MOD_VERSION "0.8" 1306 #define DISPLAY_MINOR_BASE 144 1307 #define DEVICE_NAME "lcd%d" 1308 #define BUF_CHUNK_SIZE 4 1309 #define BUF_SIZE 128 1310 #define BIT_DURATION 250 1311 #define IMON_DATA_BUF_SZ 35 1312 /* LDV_COMMENT_END_PREP */ 1313 /* LDV_COMMENT_FUNCTION_CALL Function from field "disconnect" from driver structure with callbacks "imon_driver" */ 1314 ldv_handler_precall(); 1315 imon_disconnect( var_group1); 1316 ldv_s_imon_driver_usb_driver=0; 1317 1318 } 1319 1320 } 1321 1322 break; 1323 default: break; 1324 1325 } 1326 1327 } 1328 1329 ldv_module_exit: 1330 1331 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 1332 ldv_final: ldv_check_final_state(); 1333 1334 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 1335 return; 1336 1337 } 1338 #endif 1339 1340 /* LDV_COMMENT_END_MAIN */ 1341 1342 #line 84 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.o.c.prepared"
1 2 3 #include <linux/mutex.h> 4 #include <linux/errno.h> 5 #include <verifier/rcv.h> 6 #include <kernel-model/ERR.inc> 7 8 static int ldv_mutex_ctx_lock_of_imon_context = 1; 9 10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 11 int ldv_mutex_lock_interruptible_ctx_lock_of_imon_context(struct mutex *lock) 12 { 13 int nondetermined; 14 15 /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked */ 16 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1); 17 18 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 19 nondetermined = ldv_undef_int(); 20 21 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'ctx_lock_of_imon_context' */ 22 if (nondetermined) 23 { 24 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */ 25 ldv_mutex_ctx_lock_of_imon_context = 2; 26 /* LDV_COMMENT_RETURN Finish with success */ 27 return 0; 28 } 29 else 30 { 31 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'ctx_lock_of_imon_context' is keeped unlocked */ 32 return -EINTR; 33 } 34 } 35 36 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 37 int ldv_mutex_lock_killable_ctx_lock_of_imon_context(struct mutex *lock) 38 { 39 int nondetermined; 40 41 /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked */ 42 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1); 43 44 /* LDV_COMMENT_OTHER Construct nondetermined result */ 45 nondetermined = ldv_undef_int(); 46 47 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'ctx_lock_of_imon_context' */ 48 if (nondetermined) 49 { 50 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */ 51 ldv_mutex_ctx_lock_of_imon_context = 2; 52 /* LDV_COMMENT_RETURN Finish with success*/ 53 return 0; 54 } 55 else 56 { 57 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'ctx_lock_of_imon_context' is keeped unlocked */ 58 return -EINTR; 59 } 60 } 61 62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was not locked and lock it */ 63 void ldv_mutex_lock_ctx_lock_of_imon_context(struct mutex *lock) 64 { 65 /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked */ 66 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1); 67 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */ 68 ldv_mutex_ctx_lock_of_imon_context = 2; 69 } 70 71 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was not locked and nondeterministically lock it. Return 0 on fails */ 72 int ldv_mutex_trylock_ctx_lock_of_imon_context(struct mutex *lock) 73 { 74 int is_mutex_held_by_another_thread; 75 76 /* LDV_COMMENT_ASSERT It may be an error if mutex 'ctx_lock_of_imon_context' is locked at this point */ 77 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1); 78 79 /* LDV_COMMENT_OTHER Construct nondetermined result */ 80 is_mutex_held_by_another_thread = ldv_undef_int(); 81 82 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'ctx_lock_of_imon_context' */ 83 if (is_mutex_held_by_another_thread) 84 { 85 /* LDV_COMMENT_RETURN Finish with fail */ 86 return 0; 87 } 88 else 89 { 90 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */ 91 ldv_mutex_ctx_lock_of_imon_context = 2; 92 /* LDV_COMMENT_RETURN Finish with success */ 93 return 1; 94 } 95 } 96 97 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context') Lock mutex 'ctx_lock_of_imon_context' if atomic decrement result is zero */ 98 int ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context(atomic_t *cnt, struct mutex *lock) 99 { 100 int atomic_value_after_dec; 101 102 /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked (since we may lock it in this function) */ 103 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1); 104 105 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 106 atomic_value_after_dec = ldv_undef_int(); 107 108 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 109 if (atomic_value_after_dec == 0) 110 { 111 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context', as atomic has decremented to zero */ 112 ldv_mutex_ctx_lock_of_imon_context = 2; 113 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'ctx_lock_of_imon_context' */ 114 return 1; 115 } 116 117 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'ctx_lock_of_imon_context' */ 118 return 0; 119 } 120 121 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 122 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_ctx_lock_of_imon_context') Check whether mutex 'ctx_lock_of_imon_context' was locked */ 123 int ldv_mutex_is_locked_ctx_lock_of_imon_context(struct mutex *lock) 124 { 125 int nondetermined; 126 127 if(ldv_mutex_ctx_lock_of_imon_context == 1) 128 { 129 /* LDV_COMMENT_OTHER Construct nondetermined result */ 130 nondetermined = ldv_undef_int(); 131 132 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'ctx_lock_of_imon_context' was locked */ 133 if(nondetermined) 134 { 135 /* LDV_COMMENT_RETURN Mutex 'ctx_lock_of_imon_context' was unlocked */ 136 return 0; 137 } 138 else 139 { 140 /* LDV_COMMENT_RETURN Mutex 'ctx_lock_of_imon_context' was locked */ 141 return 1; 142 } 143 } 144 else 145 { 146 /* LDV_COMMENT_RETURN Mutex 'ctx_lock_of_imon_context' was locked */ 147 return 1; 148 } 149 } 150 151 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was locked and unlock it */ 152 void ldv_mutex_unlock_ctx_lock_of_imon_context(struct mutex *lock) 153 { 154 /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be locked */ 155 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 2); 156 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'ctx_lock_of_imon_context' */ 157 ldv_mutex_ctx_lock_of_imon_context = 1; 158 } 159 160 161 162 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 163 void ldv_usb_lock_device_ctx_lock_of_imon_context(void) 164 { 165 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'ctx_lock_of_imon_context' */ 166 ldv_mutex_lock_ctx_lock_of_imon_context(NULL); 167 } 168 169 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 170 int ldv_usb_trylock_device_ctx_lock_of_imon_context(void) 171 { 172 return ldv_mutex_trylock_ctx_lock_of_imon_context(NULL); 173 } 174 175 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 176 int ldv_usb_lock_device_for_reset_ctx_lock_of_imon_context(void) 177 { 178 if(ldv_undef_int()) { 179 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */ 180 ldv_mutex_lock_ctx_lock_of_imon_context(NULL); 181 /* LDV_COMMENT_RETURN Finish with success */ 182 return 0; 183 } else 184 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 185 return ldv_undef_int_negative(); 186 } 187 188 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 189 void ldv_usb_unlock_device_ctx_lock_of_imon_context(void) { 190 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'ctx_lock_of_imon_context' */ 191 ldv_mutex_unlock_ctx_lock_of_imon_context(NULL); 192 } 193 194 static int ldv_mutex_driver_lock = 1; 195 196 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_driver_lock') Check that mutex 'driver_lock' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 197 int ldv_mutex_lock_interruptible_driver_lock(struct mutex *lock) 198 { 199 int nondetermined; 200 201 /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked */ 202 ldv_assert(ldv_mutex_driver_lock == 1); 203 204 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 205 nondetermined = ldv_undef_int(); 206 207 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'driver_lock' */ 208 if (nondetermined) 209 { 210 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */ 211 ldv_mutex_driver_lock = 2; 212 /* LDV_COMMENT_RETURN Finish with success */ 213 return 0; 214 } 215 else 216 { 217 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'driver_lock' is keeped unlocked */ 218 return -EINTR; 219 } 220 } 221 222 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_driver_lock') Check that mutex 'driver_lock' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 223 int ldv_mutex_lock_killable_driver_lock(struct mutex *lock) 224 { 225 int nondetermined; 226 227 /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked */ 228 ldv_assert(ldv_mutex_driver_lock == 1); 229 230 /* LDV_COMMENT_OTHER Construct nondetermined result */ 231 nondetermined = ldv_undef_int(); 232 233 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'driver_lock' */ 234 if (nondetermined) 235 { 236 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */ 237 ldv_mutex_driver_lock = 2; 238 /* LDV_COMMENT_RETURN Finish with success*/ 239 return 0; 240 } 241 else 242 { 243 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'driver_lock' is keeped unlocked */ 244 return -EINTR; 245 } 246 } 247 248 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_driver_lock') Check that mutex 'driver_lock' was not locked and lock it */ 249 void ldv_mutex_lock_driver_lock(struct mutex *lock) 250 { 251 /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked */ 252 ldv_assert(ldv_mutex_driver_lock == 1); 253 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */ 254 ldv_mutex_driver_lock = 2; 255 } 256 257 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_driver_lock') Check that mutex 'driver_lock' was not locked and nondeterministically lock it. Return 0 on fails */ 258 int ldv_mutex_trylock_driver_lock(struct mutex *lock) 259 { 260 int is_mutex_held_by_another_thread; 261 262 /* LDV_COMMENT_ASSERT It may be an error if mutex 'driver_lock' is locked at this point */ 263 ldv_assert(ldv_mutex_driver_lock == 1); 264 265 /* LDV_COMMENT_OTHER Construct nondetermined result */ 266 is_mutex_held_by_another_thread = ldv_undef_int(); 267 268 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'driver_lock' */ 269 if (is_mutex_held_by_another_thread) 270 { 271 /* LDV_COMMENT_RETURN Finish with fail */ 272 return 0; 273 } 274 else 275 { 276 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */ 277 ldv_mutex_driver_lock = 2; 278 /* LDV_COMMENT_RETURN Finish with success */ 279 return 1; 280 } 281 } 282 283 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_driver_lock') Lock mutex 'driver_lock' if atomic decrement result is zero */ 284 int ldv_atomic_dec_and_mutex_lock_driver_lock(atomic_t *cnt, struct mutex *lock) 285 { 286 int atomic_value_after_dec; 287 288 /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked (since we may lock it in this function) */ 289 ldv_assert(ldv_mutex_driver_lock == 1); 290 291 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 292 atomic_value_after_dec = ldv_undef_int(); 293 294 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 295 if (atomic_value_after_dec == 0) 296 { 297 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock', as atomic has decremented to zero */ 298 ldv_mutex_driver_lock = 2; 299 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'driver_lock' */ 300 return 1; 301 } 302 303 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'driver_lock' */ 304 return 0; 305 } 306 307 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 308 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_driver_lock') Check whether mutex 'driver_lock' was locked */ 309 int ldv_mutex_is_locked_driver_lock(struct mutex *lock) 310 { 311 int nondetermined; 312 313 if(ldv_mutex_driver_lock == 1) 314 { 315 /* LDV_COMMENT_OTHER Construct nondetermined result */ 316 nondetermined = ldv_undef_int(); 317 318 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'driver_lock' was locked */ 319 if(nondetermined) 320 { 321 /* LDV_COMMENT_RETURN Mutex 'driver_lock' was unlocked */ 322 return 0; 323 } 324 else 325 { 326 /* LDV_COMMENT_RETURN Mutex 'driver_lock' was locked */ 327 return 1; 328 } 329 } 330 else 331 { 332 /* LDV_COMMENT_RETURN Mutex 'driver_lock' was locked */ 333 return 1; 334 } 335 } 336 337 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_driver_lock') Check that mutex 'driver_lock' was locked and unlock it */ 338 void ldv_mutex_unlock_driver_lock(struct mutex *lock) 339 { 340 /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be locked */ 341 ldv_assert(ldv_mutex_driver_lock == 2); 342 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'driver_lock' */ 343 ldv_mutex_driver_lock = 1; 344 } 345 346 347 348 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 349 void ldv_usb_lock_device_driver_lock(void) 350 { 351 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'driver_lock' */ 352 ldv_mutex_lock_driver_lock(NULL); 353 } 354 355 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 356 int ldv_usb_trylock_device_driver_lock(void) 357 { 358 return ldv_mutex_trylock_driver_lock(NULL); 359 } 360 361 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 362 int ldv_usb_lock_device_for_reset_driver_lock(void) 363 { 364 if(ldv_undef_int()) { 365 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */ 366 ldv_mutex_lock_driver_lock(NULL); 367 /* LDV_COMMENT_RETURN Finish with success */ 368 return 0; 369 } else 370 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 371 return ldv_undef_int_negative(); 372 } 373 374 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 375 void ldv_usb_unlock_device_driver_lock(void) { 376 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'driver_lock' */ 377 ldv_mutex_unlock_driver_lock(NULL); 378 } 379 380 static int ldv_mutex_i_mutex_of_inode = 1; 381 382 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 383 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock) 384 { 385 int nondetermined; 386 387 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 388 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 389 390 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 391 nondetermined = ldv_undef_int(); 392 393 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 394 if (nondetermined) 395 { 396 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 397 ldv_mutex_i_mutex_of_inode = 2; 398 /* LDV_COMMENT_RETURN Finish with success */ 399 return 0; 400 } 401 else 402 { 403 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'i_mutex_of_inode' is keeped unlocked */ 404 return -EINTR; 405 } 406 } 407 408 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 409 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock) 410 { 411 int nondetermined; 412 413 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 414 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 415 416 /* LDV_COMMENT_OTHER Construct nondetermined result */ 417 nondetermined = ldv_undef_int(); 418 419 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 420 if (nondetermined) 421 { 422 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 423 ldv_mutex_i_mutex_of_inode = 2; 424 /* LDV_COMMENT_RETURN Finish with success*/ 425 return 0; 426 } 427 else 428 { 429 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'i_mutex_of_inode' is keeped unlocked */ 430 return -EINTR; 431 } 432 } 433 434 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and lock it */ 435 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock) 436 { 437 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */ 438 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 439 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 440 ldv_mutex_i_mutex_of_inode = 2; 441 } 442 443 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and nondeterministically lock it. Return 0 on fails */ 444 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock) 445 { 446 int is_mutex_held_by_another_thread; 447 448 /* LDV_COMMENT_ASSERT It may be an error if mutex 'i_mutex_of_inode' is locked at this point */ 449 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 450 451 /* LDV_COMMENT_OTHER Construct nondetermined result */ 452 is_mutex_held_by_another_thread = ldv_undef_int(); 453 454 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */ 455 if (is_mutex_held_by_another_thread) 456 { 457 /* LDV_COMMENT_RETURN Finish with fail */ 458 return 0; 459 } 460 else 461 { 462 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 463 ldv_mutex_i_mutex_of_inode = 2; 464 /* LDV_COMMENT_RETURN Finish with success */ 465 return 1; 466 } 467 } 468 469 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode') Lock mutex 'i_mutex_of_inode' if atomic decrement result is zero */ 470 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock) 471 { 472 int atomic_value_after_dec; 473 474 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked (since we may lock it in this function) */ 475 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 476 477 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 478 atomic_value_after_dec = ldv_undef_int(); 479 480 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 481 if (atomic_value_after_dec == 0) 482 { 483 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode', as atomic has decremented to zero */ 484 ldv_mutex_i_mutex_of_inode = 2; 485 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'i_mutex_of_inode' */ 486 return 1; 487 } 488 489 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'i_mutex_of_inode' */ 490 return 0; 491 } 492 493 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 494 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_i_mutex_of_inode') Check whether mutex 'i_mutex_of_inode' was locked */ 495 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock) 496 { 497 int nondetermined; 498 499 if(ldv_mutex_i_mutex_of_inode == 1) 500 { 501 /* LDV_COMMENT_OTHER Construct nondetermined result */ 502 nondetermined = ldv_undef_int(); 503 504 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'i_mutex_of_inode' was locked */ 505 if(nondetermined) 506 { 507 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was unlocked */ 508 return 0; 509 } 510 else 511 { 512 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */ 513 return 1; 514 } 515 } 516 else 517 { 518 /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */ 519 return 1; 520 } 521 } 522 523 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was locked and unlock it */ 524 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock) 525 { 526 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be locked */ 527 ldv_assert(ldv_mutex_i_mutex_of_inode == 2); 528 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'i_mutex_of_inode' */ 529 ldv_mutex_i_mutex_of_inode = 1; 530 } 531 532 533 534 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 535 void ldv_usb_lock_device_i_mutex_of_inode(void) 536 { 537 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'i_mutex_of_inode' */ 538 ldv_mutex_lock_i_mutex_of_inode(NULL); 539 } 540 541 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 542 int ldv_usb_trylock_device_i_mutex_of_inode(void) 543 { 544 return ldv_mutex_trylock_i_mutex_of_inode(NULL); 545 } 546 547 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 548 int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void) 549 { 550 if(ldv_undef_int()) { 551 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */ 552 ldv_mutex_lock_i_mutex_of_inode(NULL); 553 /* LDV_COMMENT_RETURN Finish with success */ 554 return 0; 555 } else 556 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 557 return ldv_undef_int_negative(); 558 } 559 560 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 561 void ldv_usb_unlock_device_i_mutex_of_inode(void) { 562 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'i_mutex_of_inode' */ 563 ldv_mutex_unlock_i_mutex_of_inode(NULL); 564 } 565 566 static int ldv_mutex_lock = 1; 567 568 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_lock') Check that mutex 'lock' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 569 int ldv_mutex_lock_interruptible_lock(struct mutex *lock) 570 { 571 int nondetermined; 572 573 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 574 ldv_assert(ldv_mutex_lock == 1); 575 576 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 577 nondetermined = ldv_undef_int(); 578 579 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 580 if (nondetermined) 581 { 582 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 583 ldv_mutex_lock = 2; 584 /* LDV_COMMENT_RETURN Finish with success */ 585 return 0; 586 } 587 else 588 { 589 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'lock' is keeped unlocked */ 590 return -EINTR; 591 } 592 } 593 594 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_lock') Check that mutex 'lock' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 595 int ldv_mutex_lock_killable_lock(struct mutex *lock) 596 { 597 int nondetermined; 598 599 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 600 ldv_assert(ldv_mutex_lock == 1); 601 602 /* LDV_COMMENT_OTHER Construct nondetermined result */ 603 nondetermined = ldv_undef_int(); 604 605 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 606 if (nondetermined) 607 { 608 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 609 ldv_mutex_lock = 2; 610 /* LDV_COMMENT_RETURN Finish with success*/ 611 return 0; 612 } 613 else 614 { 615 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'lock' is keeped unlocked */ 616 return -EINTR; 617 } 618 } 619 620 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_lock') Check that mutex 'lock' was not locked and lock it */ 621 void ldv_mutex_lock_lock(struct mutex *lock) 622 { 623 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */ 624 ldv_assert(ldv_mutex_lock == 1); 625 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 626 ldv_mutex_lock = 2; 627 } 628 629 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_lock') Check that mutex 'lock' was not locked and nondeterministically lock it. Return 0 on fails */ 630 int ldv_mutex_trylock_lock(struct mutex *lock) 631 { 632 int is_mutex_held_by_another_thread; 633 634 /* LDV_COMMENT_ASSERT It may be an error if mutex 'lock' is locked at this point */ 635 ldv_assert(ldv_mutex_lock == 1); 636 637 /* LDV_COMMENT_OTHER Construct nondetermined result */ 638 is_mutex_held_by_another_thread = ldv_undef_int(); 639 640 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */ 641 if (is_mutex_held_by_another_thread) 642 { 643 /* LDV_COMMENT_RETURN Finish with fail */ 644 return 0; 645 } 646 else 647 { 648 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 649 ldv_mutex_lock = 2; 650 /* LDV_COMMENT_RETURN Finish with success */ 651 return 1; 652 } 653 } 654 655 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_lock') Lock mutex 'lock' if atomic decrement result is zero */ 656 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock) 657 { 658 int atomic_value_after_dec; 659 660 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked (since we may lock it in this function) */ 661 ldv_assert(ldv_mutex_lock == 1); 662 663 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 664 atomic_value_after_dec = ldv_undef_int(); 665 666 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 667 if (atomic_value_after_dec == 0) 668 { 669 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock', as atomic has decremented to zero */ 670 ldv_mutex_lock = 2; 671 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'lock' */ 672 return 1; 673 } 674 675 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'lock' */ 676 return 0; 677 } 678 679 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 680 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_lock') Check whether mutex 'lock' was locked */ 681 int ldv_mutex_is_locked_lock(struct mutex *lock) 682 { 683 int nondetermined; 684 685 if(ldv_mutex_lock == 1) 686 { 687 /* LDV_COMMENT_OTHER Construct nondetermined result */ 688 nondetermined = ldv_undef_int(); 689 690 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'lock' was locked */ 691 if(nondetermined) 692 { 693 /* LDV_COMMENT_RETURN Mutex 'lock' was unlocked */ 694 return 0; 695 } 696 else 697 { 698 /* LDV_COMMENT_RETURN Mutex 'lock' was locked */ 699 return 1; 700 } 701 } 702 else 703 { 704 /* LDV_COMMENT_RETURN Mutex 'lock' was locked */ 705 return 1; 706 } 707 } 708 709 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_lock') Check that mutex 'lock' was locked and unlock it */ 710 void ldv_mutex_unlock_lock(struct mutex *lock) 711 { 712 /* LDV_COMMENT_ASSERT Mutex 'lock' must be locked */ 713 ldv_assert(ldv_mutex_lock == 2); 714 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'lock' */ 715 ldv_mutex_lock = 1; 716 } 717 718 719 720 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 721 void ldv_usb_lock_device_lock(void) 722 { 723 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'lock' */ 724 ldv_mutex_lock_lock(NULL); 725 } 726 727 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 728 int ldv_usb_trylock_device_lock(void) 729 { 730 return ldv_mutex_trylock_lock(NULL); 731 } 732 733 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 734 int ldv_usb_lock_device_for_reset_lock(void) 735 { 736 if(ldv_undef_int()) { 737 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */ 738 ldv_mutex_lock_lock(NULL); 739 /* LDV_COMMENT_RETURN Finish with success */ 740 return 0; 741 } else 742 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 743 return ldv_undef_int_negative(); 744 } 745 746 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 747 void ldv_usb_unlock_device_lock(void) { 748 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'lock' */ 749 ldv_mutex_unlock_lock(NULL); 750 } 751 752 static int ldv_mutex_mutex_of_device = 1; 753 754 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_device') Check that mutex 'mutex_of_device' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */ 755 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock) 756 { 757 int nondetermined; 758 759 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 760 ldv_assert(ldv_mutex_mutex_of_device == 1); 761 762 /* LDV_COMMENT_OTHER Construct nondetermined result*/ 763 nondetermined = ldv_undef_int(); 764 765 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 766 if (nondetermined) 767 { 768 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 769 ldv_mutex_mutex_of_device = 2; 770 /* LDV_COMMENT_RETURN Finish with success */ 771 return 0; 772 } 773 else 774 { 775 /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_device' is keeped unlocked */ 776 return -EINTR; 777 } 778 } 779 780 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_device') Check that mutex 'mutex_of_device' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ 781 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock) 782 { 783 int nondetermined; 784 785 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 786 ldv_assert(ldv_mutex_mutex_of_device == 1); 787 788 /* LDV_COMMENT_OTHER Construct nondetermined result */ 789 nondetermined = ldv_undef_int(); 790 791 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 792 if (nondetermined) 793 { 794 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 795 ldv_mutex_mutex_of_device = 2; 796 /* LDV_COMMENT_RETURN Finish with success*/ 797 return 0; 798 } 799 else 800 { 801 /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_device' is keeped unlocked */ 802 return -EINTR; 803 } 804 } 805 806 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and lock it */ 807 void ldv_mutex_lock_mutex_of_device(struct mutex *lock) 808 { 809 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */ 810 ldv_assert(ldv_mutex_mutex_of_device == 1); 811 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 812 ldv_mutex_mutex_of_device = 2; 813 } 814 815 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and nondeterministically lock it. Return 0 on fails */ 816 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock) 817 { 818 int is_mutex_held_by_another_thread; 819 820 /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_device' is locked at this point */ 821 ldv_assert(ldv_mutex_mutex_of_device == 1); 822 823 /* LDV_COMMENT_OTHER Construct nondetermined result */ 824 is_mutex_held_by_another_thread = ldv_undef_int(); 825 826 /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */ 827 if (is_mutex_held_by_another_thread) 828 { 829 /* LDV_COMMENT_RETURN Finish with fail */ 830 return 0; 831 } 832 else 833 { 834 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 835 ldv_mutex_mutex_of_device = 2; 836 /* LDV_COMMENT_RETURN Finish with success */ 837 return 1; 838 } 839 } 840 841 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_device') Lock mutex 'mutex_of_device' if atomic decrement result is zero */ 842 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock) 843 { 844 int atomic_value_after_dec; 845 846 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked (since we may lock it in this function) */ 847 ldv_assert(ldv_mutex_mutex_of_device == 1); 848 849 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 850 atomic_value_after_dec = ldv_undef_int(); 851 852 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 853 if (atomic_value_after_dec == 0) 854 { 855 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device', as atomic has decremented to zero */ 856 ldv_mutex_mutex_of_device = 2; 857 /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_device' */ 858 return 1; 859 } 860 861 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_device' */ 862 return 0; 863 } 864 865 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */ 866 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_device') Check whether mutex 'mutex_of_device' was locked */ 867 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock) 868 { 869 int nondetermined; 870 871 if(ldv_mutex_mutex_of_device == 1) 872 { 873 /* LDV_COMMENT_OTHER Construct nondetermined result */ 874 nondetermined = ldv_undef_int(); 875 876 /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_device' was locked */ 877 if(nondetermined) 878 { 879 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was unlocked */ 880 return 0; 881 } 882 else 883 { 884 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */ 885 return 1; 886 } 887 } 888 else 889 { 890 /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */ 891 return 1; 892 } 893 } 894 895 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_device') Check that mutex 'mutex_of_device' was locked and unlock it */ 896 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock) 897 { 898 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be locked */ 899 ldv_assert(ldv_mutex_mutex_of_device == 2); 900 /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_device' */ 901 ldv_mutex_mutex_of_device = 1; 902 } 903 904 905 906 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/ 907 void ldv_usb_lock_device_mutex_of_device(void) 908 { 909 /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'mutex_of_device' */ 910 ldv_mutex_lock_mutex_of_device(NULL); 911 } 912 913 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/ 914 int ldv_usb_trylock_device_mutex_of_device(void) 915 { 916 return ldv_mutex_trylock_mutex_of_device(NULL); 917 } 918 919 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/ 920 int ldv_usb_lock_device_for_reset_mutex_of_device(void) 921 { 922 if(ldv_undef_int()) { 923 /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */ 924 ldv_mutex_lock_mutex_of_device(NULL); 925 /* LDV_COMMENT_RETURN Finish with success */ 926 return 0; 927 } else 928 /* LDV_COMMENT_RETURN Usb lock is not acquired*/ 929 return ldv_undef_int_negative(); 930 } 931 932 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/ 933 void ldv_usb_unlock_device_mutex_of_device(void) { 934 /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'mutex_of_device' */ 935 ldv_mutex_unlock_mutex_of_device(NULL); 936 } 937 938 939 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all mutexes are unlocked at the end */ 940 void ldv_check_final_state(void) 941 { 942 /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked at the end */ 943 ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1); 944 /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked at the end */ 945 ldv_assert(ldv_mutex_driver_lock == 1); 946 /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked at the end */ 947 ldv_assert(ldv_mutex_i_mutex_of_inode == 1); 948 /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked at the end */ 949 ldv_assert(ldv_mutex_lock == 1); 950 /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked at the end */ 951 ldv_assert(ldv_mutex_mutex_of_device == 1); 952 }
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 long ldv_undef_long(void); 29 /* Return nondeterministic negative integer number. */ 30 static inline int ldv_undef_int_negative(void) 31 { 32 int ret = ldv_undef_int(); 33 34 ldv_assume(ret < 0); 35 36 return ret; 37 } 38 /* Return nondeterministic nonpositive integer number. */ 39 static inline int ldv_undef_int_nonpositive(void) 40 { 41 int ret = ldv_undef_int(); 42 43 ldv_assume(ret <= 0); 44 45 return ret; 46 } 47 48 /* Add explicit model for __builin_expect GCC function. Without the model a 49 return value will be treated as nondetermined by verifiers. */ 50 static inline long __builtin_expect(long exp, long c) 51 { 52 return exp; 53 } 54 55 /* This function causes the program to exit abnormally. GCC implements this 56 function by using a target-dependent mechanism (such as intentionally executing 57 an illegal instruction) or by calling abort. The mechanism used may vary from 58 release to release so you should not rely on any particular implementation. 59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 60 static inline void __builtin_trap(void) 61 { 62 ldv_assert(0); 63 } 64 65 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 66 #define LDV_PTR_MAX 2012 67 68 #endif /* _LDV_RCV_H_ */
1 /* 2 * device.h - generic, centralized driver model 3 * 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> 6 * Copyright (c) 2008-2009 Novell Inc. 7 * 8 * This file is released under the GPLv2 9 * 10 * See Documentation/driver-model/ for more information. 11 */ 12 13 #ifndef _DEVICE_H_ 14 #define _DEVICE_H_ 15 16 #include <linux/ioport.h> 17 #include <linux/kobject.h> 18 #include <linux/klist.h> 19 #include <linux/list.h> 20 #include <linux/lockdep.h> 21 #include <linux/compiler.h> 22 #include <linux/types.h> 23 #include <linux/mutex.h> 24 #include <linux/pinctrl/devinfo.h> 25 #include <linux/pm.h> 26 #include <linux/atomic.h> 27 #include <linux/ratelimit.h> 28 #include <linux/uidgid.h> 29 #include <linux/gfp.h> 30 #include <asm/device.h> 31 32 struct device; 33 struct device_private; 34 struct device_driver; 35 struct driver_private; 36 struct module; 37 struct class; 38 struct subsys_private; 39 struct bus_type; 40 struct device_node; 41 struct fwnode_handle; 42 struct iommu_ops; 43 struct iommu_group; 44 45 struct bus_attribute { 46 struct attribute attr; 47 ssize_t (*show)(struct bus_type *bus, char *buf); 48 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 49 }; 50 51 #define BUS_ATTR(_name, _mode, _show, _store) \ 52 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) 53 #define BUS_ATTR_RW(_name) \ 54 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 55 #define BUS_ATTR_RO(_name) \ 56 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) 57 58 extern int __must_check bus_create_file(struct bus_type *, 59 struct bus_attribute *); 60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *); 61 62 /** 63 * struct bus_type - The bus type of the device 64 * 65 * @name: The name of the bus. 66 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). 67 * @dev_root: Default device to use as the parent. 68 * @dev_attrs: Default attributes of the devices on the bus. 69 * @bus_groups: Default attributes of the bus. 70 * @dev_groups: Default attributes of the devices on the bus. 71 * @drv_groups: Default attributes of the device drivers on the bus. 72 * @match: Called, perhaps multiple times, whenever a new device or driver 73 * is added for this bus. It should return a nonzero value if the 74 * given device can be handled by the given driver. 75 * @uevent: Called when a device is added, removed, or a few other things 76 * that generate uevents to add the environment variables. 77 * @probe: Called when a new device or driver add to this bus, and callback 78 * the specific driver's probe to initial the matched device. 79 * @remove: Called when a device removed from this bus. 80 * @shutdown: Called at shut-down time to quiesce the device. 81 * 82 * @online: Called to put the device back online (after offlining it). 83 * @offline: Called to put the device offline for hot-removal. May fail. 84 * 85 * @suspend: Called when a device on this bus wants to go to sleep mode. 86 * @resume: Called to bring a device on this bus out of sleep mode. 87 * @pm: Power management operations of this bus, callback the specific 88 * device driver's pm-ops. 89 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 90 * driver implementations to a bus and allow the driver to do 91 * bus-specific setup 92 * @p: The private data of the driver core, only the driver core can 93 * touch this. 94 * @lock_key: Lock class key for use by the lock validator 95 * 96 * A bus is a channel between the processor and one or more devices. For the 97 * purposes of the device model, all devices are connected via a bus, even if 98 * it is an internal, virtual, "platform" bus. Buses can plug into each other. 99 * A USB controller is usually a PCI device, for example. The device model 100 * represents the actual connections between buses and the devices they control. 101 * A bus is represented by the bus_type structure. It contains the name, the 102 * default attributes, the bus' methods, PM operations, and the driver core's 103 * private data. 104 */ 105 struct bus_type { 106 const char *name; 107 const char *dev_name; 108 struct device *dev_root; 109 struct device_attribute *dev_attrs; /* use dev_groups instead */ 110 const struct attribute_group **bus_groups; 111 const struct attribute_group **dev_groups; 112 const struct attribute_group **drv_groups; 113 114 int (*match)(struct device *dev, struct device_driver *drv); 115 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 116 int (*probe)(struct device *dev); 117 int (*remove)(struct device *dev); 118 void (*shutdown)(struct device *dev); 119 120 int (*online)(struct device *dev); 121 int (*offline)(struct device *dev); 122 123 int (*suspend)(struct device *dev, pm_message_t state); 124 int (*resume)(struct device *dev); 125 126 const struct dev_pm_ops *pm; 127 128 const struct iommu_ops *iommu_ops; 129 130 struct subsys_private *p; 131 struct lock_class_key lock_key; 132 }; 133 134 extern int __must_check bus_register(struct bus_type *bus); 135 136 extern void bus_unregister(struct bus_type *bus); 137 138 extern int __must_check bus_rescan_devices(struct bus_type *bus); 139 140 /* iterator helpers for buses */ 141 struct subsys_dev_iter { 142 struct klist_iter ki; 143 const struct device_type *type; 144 }; 145 void subsys_dev_iter_init(struct subsys_dev_iter *iter, 146 struct bus_type *subsys, 147 struct device *start, 148 const struct device_type *type); 149 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); 150 void subsys_dev_iter_exit(struct subsys_dev_iter *iter); 151 152 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 153 int (*fn)(struct device *dev, void *data)); 154 struct device *bus_find_device(struct bus_type *bus, struct device *start, 155 void *data, 156 int (*match)(struct device *dev, void *data)); 157 struct device *bus_find_device_by_name(struct bus_type *bus, 158 struct device *start, 159 const char *name); 160 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, 161 struct device *hint); 162 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 163 void *data, int (*fn)(struct device_driver *, void *)); 164 void bus_sort_breadthfirst(struct bus_type *bus, 165 int (*compare)(const struct device *a, 166 const struct device *b)); 167 /* 168 * Bus notifiers: Get notified of addition/removal of devices 169 * and binding/unbinding of drivers to devices. 170 * In the long run, it should be a replacement for the platform 171 * notify hooks. 172 */ 173 struct notifier_block; 174 175 extern int bus_register_notifier(struct bus_type *bus, 176 struct notifier_block *nb); 177 extern int bus_unregister_notifier(struct bus_type *bus, 178 struct notifier_block *nb); 179 180 /* All 4 notifers below get called with the target struct device * 181 * as an argument. Note that those functions are likely to be called 182 * with the device lock held in the core, so be careful. 183 */ 184 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 185 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ 186 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ 187 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be 188 bound */ 189 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ 190 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be 191 unbound */ 192 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound 193 from the device */ 194 195 extern struct kset *bus_get_kset(struct bus_type *bus); 196 extern struct klist *bus_get_device_klist(struct bus_type *bus); 197 198 /** 199 * enum probe_type - device driver probe type to try 200 * Device drivers may opt in for special handling of their 201 * respective probe routines. This tells the core what to 202 * expect and prefer. 203 * 204 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well 205 * whether probed synchronously or asynchronously. 206 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which 207 * probing order is not essential for booting the system may 208 * opt into executing their probes asynchronously. 209 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need 210 * their probe routines to run synchronously with driver and 211 * device registration (with the exception of -EPROBE_DEFER 212 * handling - re-probing always ends up being done asynchronously). 213 * 214 * Note that the end goal is to switch the kernel to use asynchronous 215 * probing by default, so annotating drivers with 216 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us 217 * to speed up boot process while we are validating the rest of the 218 * drivers. 219 */ 220 enum probe_type { 221 PROBE_DEFAULT_STRATEGY, 222 PROBE_PREFER_ASYNCHRONOUS, 223 PROBE_FORCE_SYNCHRONOUS, 224 }; 225 226 /** 227 * struct device_driver - The basic device driver structure 228 * @name: Name of the device driver. 229 * @bus: The bus which the device of this driver belongs to. 230 * @owner: The module owner. 231 * @mod_name: Used for built-in modules. 232 * @suppress_bind_attrs: Disables bind/unbind via sysfs. 233 * @probe_type: Type of the probe (synchronous or asynchronous) to use. 234 * @of_match_table: The open firmware table. 235 * @acpi_match_table: The ACPI match table. 236 * @probe: Called to query the existence of a specific device, 237 * whether this driver can work with it, and bind the driver 238 * to a specific device. 239 * @remove: Called when the device is removed from the system to 240 * unbind a device from this driver. 241 * @shutdown: Called at shut-down time to quiesce the device. 242 * @suspend: Called to put the device to sleep mode. Usually to a 243 * low power state. 244 * @resume: Called to bring a device from sleep mode. 245 * @groups: Default attributes that get created by the driver core 246 * automatically. 247 * @pm: Power management operations of the device which matched 248 * this driver. 249 * @p: Driver core's private data, no one other than the driver 250 * core can touch this. 251 * 252 * The device driver-model tracks all of the drivers known to the system. 253 * The main reason for this tracking is to enable the driver core to match 254 * up drivers with new devices. Once drivers are known objects within the 255 * system, however, a number of other things become possible. Device drivers 256 * can export information and configuration variables that are independent 257 * of any specific device. 258 */ 259 struct device_driver { 260 const char *name; 261 struct bus_type *bus; 262 263 struct module *owner; 264 const char *mod_name; /* used for built-in modules */ 265 266 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 267 enum probe_type probe_type; 268 269 const struct of_device_id *of_match_table; 270 const struct acpi_device_id *acpi_match_table; 271 272 int (*probe) (struct device *dev); 273 int (*remove) (struct device *dev); 274 void (*shutdown) (struct device *dev); 275 int (*suspend) (struct device *dev, pm_message_t state); 276 int (*resume) (struct device *dev); 277 const struct attribute_group **groups; 278 279 const struct dev_pm_ops *pm; 280 281 struct driver_private *p; 282 }; 283 284 285 extern int __must_check driver_register(struct device_driver *drv); 286 extern void driver_unregister(struct device_driver *drv); 287 288 extern struct device_driver *driver_find(const char *name, 289 struct bus_type *bus); 290 extern int driver_probe_done(void); 291 extern void wait_for_device_probe(void); 292 293 294 /* sysfs interface for exporting driver attributes */ 295 296 struct driver_attribute { 297 struct attribute attr; 298 ssize_t (*show)(struct device_driver *driver, char *buf); 299 ssize_t (*store)(struct device_driver *driver, const char *buf, 300 size_t count); 301 }; 302 303 #define DRIVER_ATTR(_name, _mode, _show, _store) \ 304 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) 305 #define DRIVER_ATTR_RW(_name) \ 306 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) 307 #define DRIVER_ATTR_RO(_name) \ 308 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) 309 #define DRIVER_ATTR_WO(_name) \ 310 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) 311 312 extern int __must_check driver_create_file(struct device_driver *driver, 313 const struct driver_attribute *attr); 314 extern void driver_remove_file(struct device_driver *driver, 315 const struct driver_attribute *attr); 316 317 extern int __must_check driver_for_each_device(struct device_driver *drv, 318 struct device *start, 319 void *data, 320 int (*fn)(struct device *dev, 321 void *)); 322 struct device *driver_find_device(struct device_driver *drv, 323 struct device *start, void *data, 324 int (*match)(struct device *dev, void *data)); 325 326 /** 327 * struct subsys_interface - interfaces to device functions 328 * @name: name of the device function 329 * @subsys: subsytem of the devices to attach to 330 * @node: the list of functions registered at the subsystem 331 * @add_dev: device hookup to device function handler 332 * @remove_dev: device hookup to device function handler 333 * 334 * Simple interfaces attached to a subsystem. Multiple interfaces can 335 * attach to a subsystem and its devices. Unlike drivers, they do not 336 * exclusively claim or control devices. Interfaces usually represent 337 * a specific functionality of a subsystem/class of devices. 338 */ 339 struct subsys_interface { 340 const char *name; 341 struct bus_type *subsys; 342 struct list_head node; 343 int (*add_dev)(struct device *dev, struct subsys_interface *sif); 344 void (*remove_dev)(struct device *dev, struct subsys_interface *sif); 345 }; 346 347 int subsys_interface_register(struct subsys_interface *sif); 348 void subsys_interface_unregister(struct subsys_interface *sif); 349 350 int subsys_system_register(struct bus_type *subsys, 351 const struct attribute_group **groups); 352 int subsys_virtual_register(struct bus_type *subsys, 353 const struct attribute_group **groups); 354 355 /** 356 * struct class - device classes 357 * @name: Name of the class. 358 * @owner: The module owner. 359 * @class_attrs: Default attributes of this class. 360 * @dev_groups: Default attributes of the devices that belong to the class. 361 * @dev_kobj: The kobject that represents this class and links it into the hierarchy. 362 * @dev_uevent: Called when a device is added, removed from this class, or a 363 * few other things that generate uevents to add the environment 364 * variables. 365 * @devnode: Callback to provide the devtmpfs. 366 * @class_release: Called to release this class. 367 * @dev_release: Called to release the device. 368 * @suspend: Used to put the device to sleep mode, usually to a low power 369 * state. 370 * @resume: Used to bring the device from the sleep mode. 371 * @ns_type: Callbacks so sysfs can detemine namespaces. 372 * @namespace: Namespace of the device belongs to this class. 373 * @pm: The default device power management operations of this class. 374 * @p: The private data of the driver core, no one other than the 375 * driver core can touch this. 376 * 377 * A class is a higher-level view of a device that abstracts out low-level 378 * implementation details. Drivers may see a SCSI disk or an ATA disk, but, 379 * at the class level, they are all simply disks. Classes allow user space 380 * to work with devices based on what they do, rather than how they are 381 * connected or how they work. 382 */ 383 struct class { 384 const char *name; 385 struct module *owner; 386 387 struct class_attribute *class_attrs; 388 const struct attribute_group **dev_groups; 389 struct kobject *dev_kobj; 390 391 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 392 char *(*devnode)(struct device *dev, umode_t *mode); 393 394 void (*class_release)(struct class *class); 395 void (*dev_release)(struct device *dev); 396 397 int (*suspend)(struct device *dev, pm_message_t state); 398 int (*resume)(struct device *dev); 399 400 const struct kobj_ns_type_operations *ns_type; 401 const void *(*namespace)(struct device *dev); 402 403 const struct dev_pm_ops *pm; 404 405 struct subsys_private *p; 406 }; 407 408 struct class_dev_iter { 409 struct klist_iter ki; 410 const struct device_type *type; 411 }; 412 413 extern struct kobject *sysfs_dev_block_kobj; 414 extern struct kobject *sysfs_dev_char_kobj; 415 extern int __must_check __class_register(struct class *class, 416 struct lock_class_key *key); 417 extern void class_unregister(struct class *class); 418 419 /* This is a #define to keep the compiler from merging different 420 * instances of the __key variable */ 421 #define class_register(class) \ 422 ({ \ 423 static struct lock_class_key __key; \ 424 __class_register(class, &__key); \ 425 }) 426 427 struct class_compat; 428 struct class_compat *class_compat_register(const char *name); 429 void class_compat_unregister(struct class_compat *cls); 430 int class_compat_create_link(struct class_compat *cls, struct device *dev, 431 struct device *device_link); 432 void class_compat_remove_link(struct class_compat *cls, struct device *dev, 433 struct device *device_link); 434 435 extern void class_dev_iter_init(struct class_dev_iter *iter, 436 struct class *class, 437 struct device *start, 438 const struct device_type *type); 439 extern struct device *class_dev_iter_next(struct class_dev_iter *iter); 440 extern void class_dev_iter_exit(struct class_dev_iter *iter); 441 442 extern int class_for_each_device(struct class *class, struct device *start, 443 void *data, 444 int (*fn)(struct device *dev, void *data)); 445 extern struct device *class_find_device(struct class *class, 446 struct device *start, const void *data, 447 int (*match)(struct device *, const void *)); 448 449 struct class_attribute { 450 struct attribute attr; 451 ssize_t (*show)(struct class *class, struct class_attribute *attr, 452 char *buf); 453 ssize_t (*store)(struct class *class, struct class_attribute *attr, 454 const char *buf, size_t count); 455 }; 456 457 #define CLASS_ATTR(_name, _mode, _show, _store) \ 458 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) 459 #define CLASS_ATTR_RW(_name) \ 460 struct class_attribute class_attr_##_name = __ATTR_RW(_name) 461 #define CLASS_ATTR_RO(_name) \ 462 struct class_attribute class_attr_##_name = __ATTR_RO(_name) 463 464 extern int __must_check class_create_file_ns(struct class *class, 465 const struct class_attribute *attr, 466 const void *ns); 467 extern void class_remove_file_ns(struct class *class, 468 const struct class_attribute *attr, 469 const void *ns); 470 471 static inline int __must_check class_create_file(struct class *class, 472 const struct class_attribute *attr) 473 { 474 return class_create_file_ns(class, attr, NULL); 475 } 476 477 static inline void class_remove_file(struct class *class, 478 const struct class_attribute *attr) 479 { 480 return class_remove_file_ns(class, attr, NULL); 481 } 482 483 /* Simple class attribute that is just a static string */ 484 struct class_attribute_string { 485 struct class_attribute attr; 486 char *str; 487 }; 488 489 /* Currently read-only only */ 490 #define _CLASS_ATTR_STRING(_name, _mode, _str) \ 491 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } 492 #define CLASS_ATTR_STRING(_name, _mode, _str) \ 493 struct class_attribute_string class_attr_##_name = \ 494 _CLASS_ATTR_STRING(_name, _mode, _str) 495 496 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, 497 char *buf); 498 499 struct class_interface { 500 struct list_head node; 501 struct class *class; 502 503 int (*add_dev) (struct device *, struct class_interface *); 504 void (*remove_dev) (struct device *, struct class_interface *); 505 }; 506 507 extern int __must_check class_interface_register(struct class_interface *); 508 extern void class_interface_unregister(struct class_interface *); 509 510 extern struct class * __must_check __class_create(struct module *owner, 511 const char *name, 512 struct lock_class_key *key); 513 extern void class_destroy(struct class *cls); 514 515 /* This is a #define to keep the compiler from merging different 516 * instances of the __key variable */ 517 #define class_create(owner, name) \ 518 ({ \ 519 static struct lock_class_key __key; \ 520 __class_create(owner, name, &__key); \ 521 }) 522 523 /* 524 * The type of device, "struct device" is embedded in. A class 525 * or bus can contain devices of different types 526 * like "partitions" and "disks", "mouse" and "event". 527 * This identifies the device type and carries type-specific 528 * information, equivalent to the kobj_type of a kobject. 529 * If "name" is specified, the uevent will contain it in 530 * the DEVTYPE variable. 531 */ 532 struct device_type { 533 const char *name; 534 const struct attribute_group **groups; 535 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 536 char *(*devnode)(struct device *dev, umode_t *mode, 537 kuid_t *uid, kgid_t *gid); 538 void (*release)(struct device *dev); 539 540 const struct dev_pm_ops *pm; 541 }; 542 543 /* interface for exporting device attributes */ 544 struct device_attribute { 545 struct attribute attr; 546 ssize_t (*show)(struct device *dev, struct device_attribute *attr, 547 char *buf); 548 ssize_t (*store)(struct device *dev, struct device_attribute *attr, 549 const char *buf, size_t count); 550 }; 551 552 struct dev_ext_attribute { 553 struct device_attribute attr; 554 void *var; 555 }; 556 557 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, 558 char *buf); 559 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, 560 const char *buf, size_t count); 561 ssize_t device_show_int(struct device *dev, struct device_attribute *attr, 562 char *buf); 563 ssize_t device_store_int(struct device *dev, struct device_attribute *attr, 564 const char *buf, size_t count); 565 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 566 char *buf); 567 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 568 const char *buf, size_t count); 569 570 #define DEVICE_ATTR(_name, _mode, _show, _store) \ 571 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 572 #define DEVICE_ATTR_RW(_name) \ 573 struct device_attribute dev_attr_##_name = __ATTR_RW(_name) 574 #define DEVICE_ATTR_RO(_name) \ 575 struct device_attribute dev_attr_##_name = __ATTR_RO(_name) 576 #define DEVICE_ATTR_WO(_name) \ 577 struct device_attribute dev_attr_##_name = __ATTR_WO(_name) 578 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ 579 struct dev_ext_attribute dev_attr_##_name = \ 580 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } 581 #define DEVICE_INT_ATTR(_name, _mode, _var) \ 582 struct dev_ext_attribute dev_attr_##_name = \ 583 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } 584 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ 585 struct dev_ext_attribute dev_attr_##_name = \ 586 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } 587 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ 588 struct device_attribute dev_attr_##_name = \ 589 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) 590 591 extern int device_create_file(struct device *device, 592 const struct device_attribute *entry); 593 extern void device_remove_file(struct device *dev, 594 const struct device_attribute *attr); 595 extern bool device_remove_file_self(struct device *dev, 596 const struct device_attribute *attr); 597 extern int __must_check device_create_bin_file(struct device *dev, 598 const struct bin_attribute *attr); 599 extern void device_remove_bin_file(struct device *dev, 600 const struct bin_attribute *attr); 601 602 /* device resource management */ 603 typedef void (*dr_release_t)(struct device *dev, void *res); 604 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); 605 606 #ifdef CONFIG_DEBUG_DEVRES 607 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, 608 const char *name); 609 #define devres_alloc(release, size, gfp) \ 610 __devres_alloc(release, size, gfp, #release) 611 #else 612 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); 613 #endif 614 extern void devres_for_each_res(struct device *dev, dr_release_t release, 615 dr_match_t match, void *match_data, 616 void (*fn)(struct device *, void *, void *), 617 void *data); 618 extern void devres_free(void *res); 619 extern void devres_add(struct device *dev, void *res); 620 extern void *devres_find(struct device *dev, dr_release_t release, 621 dr_match_t match, void *match_data); 622 extern void *devres_get(struct device *dev, void *new_res, 623 dr_match_t match, void *match_data); 624 extern void *devres_remove(struct device *dev, dr_release_t release, 625 dr_match_t match, void *match_data); 626 extern int devres_destroy(struct device *dev, dr_release_t release, 627 dr_match_t match, void *match_data); 628 extern int devres_release(struct device *dev, dr_release_t release, 629 dr_match_t match, void *match_data); 630 631 /* devres group */ 632 extern void * __must_check devres_open_group(struct device *dev, void *id, 633 gfp_t gfp); 634 extern void devres_close_group(struct device *dev, void *id); 635 extern void devres_remove_group(struct device *dev, void *id); 636 extern int devres_release_group(struct device *dev, void *id); 637 638 /* managed devm_k.alloc/kfree for device drivers */ 639 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 640 extern __printf(3, 0) 641 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 642 va_list ap); 643 extern __printf(3, 4) 644 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); 645 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 646 { 647 return devm_kmalloc(dev, size, gfp | __GFP_ZERO); 648 } 649 static inline void *devm_kmalloc_array(struct device *dev, 650 size_t n, size_t size, gfp_t flags) 651 { 652 if (size != 0 && n > SIZE_MAX / size) 653 return NULL; 654 return devm_kmalloc(dev, n * size, flags); 655 } 656 static inline void *devm_kcalloc(struct device *dev, 657 size_t n, size_t size, gfp_t flags) 658 { 659 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); 660 } 661 extern void devm_kfree(struct device *dev, void *p); 662 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); 663 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, 664 gfp_t gfp); 665 666 extern unsigned long devm_get_free_pages(struct device *dev, 667 gfp_t gfp_mask, unsigned int order); 668 extern void devm_free_pages(struct device *dev, unsigned long addr); 669 670 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 671 672 /* allows to add/remove a custom action to devres stack */ 673 int devm_add_action(struct device *dev, void (*action)(void *), void *data); 674 void devm_remove_action(struct device *dev, void (*action)(void *), void *data); 675 676 struct device_dma_parameters { 677 /* 678 * a low level driver may set these to teach IOMMU code about 679 * sg limitations. 680 */ 681 unsigned int max_segment_size; 682 unsigned long segment_boundary_mask; 683 }; 684 685 /** 686 * struct device - The basic device structure 687 * @parent: The device's "parent" device, the device to which it is attached. 688 * In most cases, a parent device is some sort of bus or host 689 * controller. If parent is NULL, the device, is a top-level device, 690 * which is not usually what you want. 691 * @p: Holds the private data of the driver core portions of the device. 692 * See the comment of the struct device_private for detail. 693 * @kobj: A top-level, abstract class from which other classes are derived. 694 * @init_name: Initial name of the device. 695 * @type: The type of device. 696 * This identifies the device type and carries type-specific 697 * information. 698 * @mutex: Mutex to synchronize calls to its driver. 699 * @bus: Type of bus device is on. 700 * @driver: Which driver has allocated this 701 * @platform_data: Platform data specific to the device. 702 * Example: For devices on custom boards, as typical of embedded 703 * and SOC based hardware, Linux often uses platform_data to point 704 * to board-specific structures describing devices and how they 705 * are wired. That can include what ports are available, chip 706 * variants, which GPIO pins act in what additional roles, and so 707 * on. This shrinks the "Board Support Packages" (BSPs) and 708 * minimizes board-specific #ifdefs in drivers. 709 * @driver_data: Private pointer for driver specific info. 710 * @power: For device power management. 711 * See Documentation/power/devices.txt for details. 712 * @pm_domain: Provide callbacks that are executed during system suspend, 713 * hibernation, system resume and during runtime PM transitions 714 * along with subsystem-level and driver-level callbacks. 715 * @pins: For device pin management. 716 * See Documentation/pinctrl.txt for details. 717 * @msi_list: Hosts MSI descriptors 718 * @msi_domain: The generic MSI domain this device is using. 719 * @numa_node: NUMA node this device is close to. 720 * @dma_mask: Dma mask (if dma'ble device). 721 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 722 * hardware supports 64-bit addresses for consistent allocations 723 * such descriptors. 724 * @dma_pfn_offset: offset of DMA memory range relatively of RAM 725 * @dma_parms: A low level driver may set these to teach IOMMU code about 726 * segment limitations. 727 * @dma_pools: Dma pools (if dma'ble device). 728 * @dma_mem: Internal for coherent mem override. 729 * @cma_area: Contiguous memory area for dma allocations 730 * @archdata: For arch-specific additions. 731 * @of_node: Associated device tree node. 732 * @fwnode: Associated device node supplied by platform firmware. 733 * @devt: For creating the sysfs "dev". 734 * @id: device instance 735 * @devres_lock: Spinlock to protect the resource of the device. 736 * @devres_head: The resources list of the device. 737 * @knode_class: The node used to add the device to the class list. 738 * @class: The class of the device. 739 * @groups: Optional attribute groups. 740 * @release: Callback to free the device after all references have 741 * gone away. This should be set by the allocator of the 742 * device (i.e. the bus driver that discovered the device). 743 * @iommu_group: IOMMU group the device belongs to. 744 * 745 * @offline_disabled: If set, the device is permanently online. 746 * @offline: Set after successful invocation of bus type's .offline(). 747 * 748 * At the lowest level, every device in a Linux system is represented by an 749 * instance of struct device. The device structure contains the information 750 * that the device model core needs to model the system. Most subsystems, 751 * however, track additional information about the devices they host. As a 752 * result, it is rare for devices to be represented by bare device structures; 753 * instead, that structure, like kobject structures, is usually embedded within 754 * a higher-level representation of the device. 755 */ 756 struct device { 757 struct device *parent; 758 759 struct device_private *p; 760 761 struct kobject kobj; 762 const char *init_name; /* initial name of the device */ 763 const struct device_type *type; 764 765 struct mutex mutex; /* mutex to synchronize calls to 766 * its driver. 767 */ 768 769 struct bus_type *bus; /* type of bus device is on */ 770 struct device_driver *driver; /* which driver has allocated this 771 device */ 772 void *platform_data; /* Platform specific data, device 773 core doesn't touch it */ 774 void *driver_data; /* Driver data, set and get with 775 dev_set/get_drvdata */ 776 struct dev_pm_info power; 777 struct dev_pm_domain *pm_domain; 778 779 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 780 struct irq_domain *msi_domain; 781 #endif 782 #ifdef CONFIG_PINCTRL 783 struct dev_pin_info *pins; 784 #endif 785 #ifdef CONFIG_GENERIC_MSI_IRQ 786 struct list_head msi_list; 787 #endif 788 789 #ifdef CONFIG_NUMA 790 int numa_node; /* NUMA node this device is close to */ 791 #endif 792 u64 *dma_mask; /* dma mask (if dma'able device) */ 793 u64 coherent_dma_mask;/* Like dma_mask, but for 794 alloc_coherent mappings as 795 not all hardware supports 796 64 bit addresses for consistent 797 allocations such descriptors. */ 798 unsigned long dma_pfn_offset; 799 800 struct device_dma_parameters *dma_parms; 801 802 struct list_head dma_pools; /* dma pools (if dma'ble) */ 803 804 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 805 override */ 806 #ifdef CONFIG_DMA_CMA 807 struct cma *cma_area; /* contiguous memory area for dma 808 allocations */ 809 #endif 810 /* arch specific additions */ 811 struct dev_archdata archdata; 812 813 struct device_node *of_node; /* associated device tree node */ 814 struct fwnode_handle *fwnode; /* firmware device node */ 815 816 dev_t devt; /* dev_t, creates the sysfs "dev" */ 817 u32 id; /* device instance */ 818 819 spinlock_t devres_lock; 820 struct list_head devres_head; 821 822 struct klist_node knode_class; 823 struct class *class; 824 const struct attribute_group **groups; /* optional groups */ 825 826 void (*release)(struct device *dev); 827 struct iommu_group *iommu_group; 828 829 bool offline_disabled:1; 830 bool offline:1; 831 }; 832 833 static inline struct device *kobj_to_dev(struct kobject *kobj) 834 { 835 return container_of(kobj, struct device, kobj); 836 } 837 838 /* Get the wakeup routines, which depend on struct device */ 839 #include <linux/pm_wakeup.h> 840 841 static inline const char *dev_name(const struct device *dev) 842 { 843 /* Use the init name until the kobject becomes available */ 844 if (dev->init_name) 845 return dev->init_name; 846 847 return kobject_name(&dev->kobj); 848 } 849 850 extern __printf(2, 3) 851 int dev_set_name(struct device *dev, const char *name, ...); 852 853 #ifdef CONFIG_NUMA 854 static inline int dev_to_node(struct device *dev) 855 { 856 return dev->numa_node; 857 } 858 static inline void set_dev_node(struct device *dev, int node) 859 { 860 dev->numa_node = node; 861 } 862 #else 863 static inline int dev_to_node(struct device *dev) 864 { 865 return -1; 866 } 867 static inline void set_dev_node(struct device *dev, int node) 868 { 869 } 870 #endif 871 872 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) 873 { 874 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 875 return dev->msi_domain; 876 #else 877 return NULL; 878 #endif 879 } 880 881 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) 882 { 883 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 884 dev->msi_domain = d; 885 #endif 886 } 887 888 static inline void *dev_get_drvdata(const struct device *dev) 889 { 890 return dev->driver_data; 891 } 892 893 static inline void dev_set_drvdata(struct device *dev, void *data) 894 { 895 dev->driver_data = data; 896 } 897 898 static inline struct pm_subsys_data *dev_to_psd(struct device *dev) 899 { 900 return dev ? dev->power.subsys_data : NULL; 901 } 902 903 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) 904 { 905 return dev->kobj.uevent_suppress; 906 } 907 908 static inline void dev_set_uevent_suppress(struct device *dev, int val) 909 { 910 dev->kobj.uevent_suppress = val; 911 } 912 913 static inline int device_is_registered(struct device *dev) 914 { 915 return dev->kobj.state_in_sysfs; 916 } 917 918 static inline void device_enable_async_suspend(struct device *dev) 919 { 920 if (!dev->power.is_prepared) 921 dev->power.async_suspend = true; 922 } 923 924 static inline void device_disable_async_suspend(struct device *dev) 925 { 926 if (!dev->power.is_prepared) 927 dev->power.async_suspend = false; 928 } 929 930 static inline bool device_async_suspend_enabled(struct device *dev) 931 { 932 return !!dev->power.async_suspend; 933 } 934 935 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) 936 { 937 dev->power.ignore_children = enable; 938 } 939 940 static inline void dev_pm_syscore_device(struct device *dev, bool val) 941 { 942 #ifdef CONFIG_PM_SLEEP 943 dev->power.syscore = val; 944 #endif 945 } 946 947 static inline void device_lock(struct device *dev) 948 { 949 mutex_lock(&dev->mutex); 950 } 951 952 static inline int device_trylock(struct device *dev) 953 { 954 return mutex_trylock(&dev->mutex); 955 } 956 957 static inline void device_unlock(struct device *dev) 958 { 959 mutex_unlock(&dev->mutex); 960 } 961 962 static inline void device_lock_assert(struct device *dev) 963 { 964 lockdep_assert_held(&dev->mutex); 965 } 966 967 static inline struct device_node *dev_of_node(struct device *dev) 968 { 969 if (!IS_ENABLED(CONFIG_OF)) 970 return NULL; 971 return dev->of_node; 972 } 973 974 void driver_init(void); 975 976 /* 977 * High level routines for use by the bus drivers 978 */ 979 extern int __must_check device_register(struct device *dev); 980 extern void device_unregister(struct device *dev); 981 extern void device_initialize(struct device *dev); 982 extern int __must_check device_add(struct device *dev); 983 extern void device_del(struct device *dev); 984 extern int device_for_each_child(struct device *dev, void *data, 985 int (*fn)(struct device *dev, void *data)); 986 extern int device_for_each_child_reverse(struct device *dev, void *data, 987 int (*fn)(struct device *dev, void *data)); 988 extern struct device *device_find_child(struct device *dev, void *data, 989 int (*match)(struct device *dev, void *data)); 990 extern int device_rename(struct device *dev, const char *new_name); 991 extern int device_move(struct device *dev, struct device *new_parent, 992 enum dpm_order dpm_order); 993 extern const char *device_get_devnode(struct device *dev, 994 umode_t *mode, kuid_t *uid, kgid_t *gid, 995 const char **tmp); 996 997 static inline bool device_supports_offline(struct device *dev) 998 { 999 return dev->bus && dev->bus->offline && dev->bus->online; 1000 } 1001 1002 extern void lock_device_hotplug(void); 1003 extern void unlock_device_hotplug(void); 1004 extern int lock_device_hotplug_sysfs(void); 1005 extern int device_offline(struct device *dev); 1006 extern int device_online(struct device *dev); 1007 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1008 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1009 1010 /* 1011 * Root device objects for grouping under /sys/devices 1012 */ 1013 extern struct device *__root_device_register(const char *name, 1014 struct module *owner); 1015 1016 /* This is a macro to avoid include problems with THIS_MODULE */ 1017 #define root_device_register(name) \ 1018 __root_device_register(name, THIS_MODULE) 1019 1020 extern void root_device_unregister(struct device *root); 1021 1022 static inline void *dev_get_platdata(const struct device *dev) 1023 { 1024 return dev->platform_data; 1025 } 1026 1027 /* 1028 * Manual binding of a device to driver. See drivers/base/bus.c 1029 * for information on use. 1030 */ 1031 extern int __must_check device_bind_driver(struct device *dev); 1032 extern void device_release_driver(struct device *dev); 1033 extern int __must_check device_attach(struct device *dev); 1034 extern int __must_check driver_attach(struct device_driver *drv); 1035 extern void device_initial_probe(struct device *dev); 1036 extern int __must_check device_reprobe(struct device *dev); 1037 1038 /* 1039 * Easy functions for dynamically creating devices on the fly 1040 */ 1041 extern __printf(5, 0) 1042 struct device *device_create_vargs(struct class *cls, struct device *parent, 1043 dev_t devt, void *drvdata, 1044 const char *fmt, va_list vargs); 1045 extern __printf(5, 6) 1046 struct device *device_create(struct class *cls, struct device *parent, 1047 dev_t devt, void *drvdata, 1048 const char *fmt, ...); 1049 extern __printf(6, 7) 1050 struct device *device_create_with_groups(struct class *cls, 1051 struct device *parent, dev_t devt, void *drvdata, 1052 const struct attribute_group **groups, 1053 const char *fmt, ...); 1054 extern void device_destroy(struct class *cls, dev_t devt); 1055 1056 /* 1057 * Platform "fixup" functions - allow the platform to have their say 1058 * about devices and actions that the general device layer doesn't 1059 * know about. 1060 */ 1061 /* Notify platform of device discovery */ 1062 extern int (*platform_notify)(struct device *dev); 1063 1064 extern int (*platform_notify_remove)(struct device *dev); 1065 1066 1067 /* 1068 * get_device - atomically increment the reference count for the device. 1069 * 1070 */ 1071 extern struct device *get_device(struct device *dev); 1072 extern void put_device(struct device *dev); 1073 1074 #ifdef CONFIG_DEVTMPFS 1075 extern int devtmpfs_create_node(struct device *dev); 1076 extern int devtmpfs_delete_node(struct device *dev); 1077 extern int devtmpfs_mount(const char *mntdir); 1078 #else 1079 static inline int devtmpfs_create_node(struct device *dev) { return 0; } 1080 static inline int devtmpfs_delete_node(struct device *dev) { return 0; } 1081 static inline int devtmpfs_mount(const char *mountpoint) { return 0; } 1082 #endif 1083 1084 /* drivers/base/power/shutdown.c */ 1085 extern void device_shutdown(void); 1086 1087 /* debugging and troubleshooting/diagnostic helpers. */ 1088 extern const char *dev_driver_string(const struct device *dev); 1089 1090 1091 #ifdef CONFIG_PRINTK 1092 1093 extern __printf(3, 0) 1094 int dev_vprintk_emit(int level, const struct device *dev, 1095 const char *fmt, va_list args); 1096 extern __printf(3, 4) 1097 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); 1098 1099 extern __printf(3, 4) 1100 void dev_printk(const char *level, const struct device *dev, 1101 const char *fmt, ...); 1102 extern __printf(2, 3) 1103 void dev_emerg(const struct device *dev, const char *fmt, ...); 1104 extern __printf(2, 3) 1105 void dev_alert(const struct device *dev, const char *fmt, ...); 1106 extern __printf(2, 3) 1107 void dev_crit(const struct device *dev, const char *fmt, ...); 1108 extern __printf(2, 3) 1109 void dev_err(const struct device *dev, const char *fmt, ...); 1110 extern __printf(2, 3) 1111 void dev_warn(const struct device *dev, const char *fmt, ...); 1112 extern __printf(2, 3) 1113 void dev_notice(const struct device *dev, const char *fmt, ...); 1114 extern __printf(2, 3) 1115 void _dev_info(const struct device *dev, const char *fmt, ...); 1116 1117 #else 1118 1119 static inline __printf(3, 0) 1120 int dev_vprintk_emit(int level, const struct device *dev, 1121 const char *fmt, va_list args) 1122 { return 0; } 1123 static inline __printf(3, 4) 1124 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 1125 { return 0; } 1126 1127 static inline void __dev_printk(const char *level, const struct device *dev, 1128 struct va_format *vaf) 1129 {} 1130 static inline __printf(3, 4) 1131 void dev_printk(const char *level, const struct device *dev, 1132 const char *fmt, ...) 1133 {} 1134 1135 static inline __printf(2, 3) 1136 void dev_emerg(const struct device *dev, const char *fmt, ...) 1137 {} 1138 static inline __printf(2, 3) 1139 void dev_crit(const struct device *dev, const char *fmt, ...) 1140 {} 1141 static inline __printf(2, 3) 1142 void dev_alert(const struct device *dev, const char *fmt, ...) 1143 {} 1144 static inline __printf(2, 3) 1145 void dev_err(const struct device *dev, const char *fmt, ...) 1146 {} 1147 static inline __printf(2, 3) 1148 void dev_warn(const struct device *dev, const char *fmt, ...) 1149 {} 1150 static inline __printf(2, 3) 1151 void dev_notice(const struct device *dev, const char *fmt, ...) 1152 {} 1153 static inline __printf(2, 3) 1154 void _dev_info(const struct device *dev, const char *fmt, ...) 1155 {} 1156 1157 #endif 1158 1159 /* 1160 * Stupid hackaround for existing uses of non-printk uses dev_info 1161 * 1162 * Note that the definition of dev_info below is actually _dev_info 1163 * and a macro is used to avoid redefining dev_info 1164 */ 1165 1166 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) 1167 1168 #if defined(CONFIG_DYNAMIC_DEBUG) 1169 #define dev_dbg(dev, format, ...) \ 1170 do { \ 1171 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ 1172 } while (0) 1173 #elif defined(DEBUG) 1174 #define dev_dbg(dev, format, arg...) \ 1175 dev_printk(KERN_DEBUG, dev, format, ##arg) 1176 #else 1177 #define dev_dbg(dev, format, arg...) \ 1178 ({ \ 1179 if (0) \ 1180 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1181 }) 1182 #endif 1183 1184 #ifdef CONFIG_PRINTK 1185 #define dev_level_once(dev_level, dev, fmt, ...) \ 1186 do { \ 1187 static bool __print_once __read_mostly; \ 1188 \ 1189 if (!__print_once) { \ 1190 __print_once = true; \ 1191 dev_level(dev, fmt, ##__VA_ARGS__); \ 1192 } \ 1193 } while (0) 1194 #else 1195 #define dev_level_once(dev_level, dev, fmt, ...) \ 1196 do { \ 1197 if (0) \ 1198 dev_level(dev, fmt, ##__VA_ARGS__); \ 1199 } while (0) 1200 #endif 1201 1202 #define dev_emerg_once(dev, fmt, ...) \ 1203 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) 1204 #define dev_alert_once(dev, fmt, ...) \ 1205 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) 1206 #define dev_crit_once(dev, fmt, ...) \ 1207 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) 1208 #define dev_err_once(dev, fmt, ...) \ 1209 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) 1210 #define dev_warn_once(dev, fmt, ...) \ 1211 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) 1212 #define dev_notice_once(dev, fmt, ...) \ 1213 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) 1214 #define dev_info_once(dev, fmt, ...) \ 1215 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) 1216 #define dev_dbg_once(dev, fmt, ...) \ 1217 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) 1218 1219 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ 1220 do { \ 1221 static DEFINE_RATELIMIT_STATE(_rs, \ 1222 DEFAULT_RATELIMIT_INTERVAL, \ 1223 DEFAULT_RATELIMIT_BURST); \ 1224 if (__ratelimit(&_rs)) \ 1225 dev_level(dev, fmt, ##__VA_ARGS__); \ 1226 } while (0) 1227 1228 #define dev_emerg_ratelimited(dev, fmt, ...) \ 1229 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) 1230 #define dev_alert_ratelimited(dev, fmt, ...) \ 1231 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) 1232 #define dev_crit_ratelimited(dev, fmt, ...) \ 1233 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) 1234 #define dev_err_ratelimited(dev, fmt, ...) \ 1235 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) 1236 #define dev_warn_ratelimited(dev, fmt, ...) \ 1237 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) 1238 #define dev_notice_ratelimited(dev, fmt, ...) \ 1239 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) 1240 #define dev_info_ratelimited(dev, fmt, ...) \ 1241 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) 1242 #if defined(CONFIG_DYNAMIC_DEBUG) 1243 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 1244 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1245 do { \ 1246 static DEFINE_RATELIMIT_STATE(_rs, \ 1247 DEFAULT_RATELIMIT_INTERVAL, \ 1248 DEFAULT_RATELIMIT_BURST); \ 1249 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 1250 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 1251 __ratelimit(&_rs)) \ 1252 __dynamic_dev_dbg(&descriptor, dev, fmt, \ 1253 ##__VA_ARGS__); \ 1254 } while (0) 1255 #elif defined(DEBUG) 1256 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1257 do { \ 1258 static DEFINE_RATELIMIT_STATE(_rs, \ 1259 DEFAULT_RATELIMIT_INTERVAL, \ 1260 DEFAULT_RATELIMIT_BURST); \ 1261 if (__ratelimit(&_rs)) \ 1262 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1263 } while (0) 1264 #else 1265 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1266 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 1267 #endif 1268 1269 #ifdef VERBOSE_DEBUG 1270 #define dev_vdbg dev_dbg 1271 #else 1272 #define dev_vdbg(dev, format, arg...) \ 1273 ({ \ 1274 if (0) \ 1275 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1276 }) 1277 #endif 1278 1279 /* 1280 * dev_WARN*() acts like dev_printk(), but with the key difference of 1281 * using WARN/WARN_ONCE to include file/line information and a backtrace. 1282 */ 1283 #define dev_WARN(dev, format, arg...) \ 1284 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); 1285 1286 #define dev_WARN_ONCE(dev, condition, format, arg...) \ 1287 WARN_ONCE(condition, "%s %s: " format, \ 1288 dev_driver_string(dev), dev_name(dev), ## arg) 1289 1290 /* Create alias, so I can be autoloaded. */ 1291 #define MODULE_ALIAS_CHARDEV(major,minor) \ 1292 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 1293 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ 1294 MODULE_ALIAS("char-major-" __stringify(major) "-*") 1295 1296 #ifdef CONFIG_SYSFS_DEPRECATED 1297 extern long sysfs_deprecated; 1298 #else 1299 #define sysfs_deprecated 0 1300 #endif 1301 1302 /** 1303 * module_driver() - Helper macro for drivers that don't do anything 1304 * special in module init/exit. This eliminates a lot of boilerplate. 1305 * Each module may only use this macro once, and calling it replaces 1306 * module_init() and module_exit(). 1307 * 1308 * @__driver: driver name 1309 * @__register: register function for this driver type 1310 * @__unregister: unregister function for this driver type 1311 * @...: Additional arguments to be passed to __register and __unregister. 1312 * 1313 * Use this macro to construct bus specific macros for registering 1314 * drivers, and do not use it on its own. 1315 */ 1316 #define module_driver(__driver, __register, __unregister, ...) \ 1317 static int __init __driver##_init(void) \ 1318 { \ 1319 return __register(&(__driver) , ##__VA_ARGS__); \ 1320 } \ 1321 module_init(__driver##_init); \ 1322 static void __exit __driver##_exit(void) \ 1323 { \ 1324 __unregister(&(__driver) , ##__VA_ARGS__); \ 1325 } \ 1326 module_exit(__driver##_exit); 1327 1328 /** 1329 * builtin_driver() - Helper macro for drivers that don't do anything 1330 * special in init and have no exit. This eliminates some boilerplate. 1331 * Each driver may only use this macro once, and calling it replaces 1332 * device_initcall (or in some cases, the legacy __initcall). This is 1333 * meant to be a direct parallel of module_driver() above but without 1334 * the __exit stuff that is not used for builtin cases. 1335 * 1336 * @__driver: driver name 1337 * @__register: register function for this driver type 1338 * @...: Additional arguments to be passed to __register 1339 * 1340 * Use this macro to construct bus specific macros for registering 1341 * drivers, and do not use it on its own. 1342 */ 1343 #define builtin_driver(__driver, __register, ...) \ 1344 static int __init __driver##_init(void) \ 1345 { \ 1346 return __register(&(__driver) , ##__VA_ARGS__); \ 1347 } \ 1348 device_initcall(__driver##_init); 1349 1350 #endif /* _DEVICE_H_ */
1 /* 2 * A generic kernel FIFO implementation 3 * 4 * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * 20 */ 21 22 #ifndef _LINUX_KFIFO_H 23 #define _LINUX_KFIFO_H 24 25 /* 26 * How to porting drivers to the new generic FIFO API: 27 * 28 * - Modify the declaration of the "struct kfifo *" object into a 29 * in-place "struct kfifo" object 30 * - Init the in-place object with kfifo_alloc() or kfifo_init() 31 * Note: The address of the in-place "struct kfifo" object must be 32 * passed as the first argument to this functions 33 * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get 34 * into kfifo_out 35 * - Replace the use of kfifo_put into kfifo_in_spinlocked and kfifo_get 36 * into kfifo_out_spinlocked 37 * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc 38 * must be passed now to the kfifo_in_spinlocked and kfifo_out_spinlocked 39 * as the last parameter 40 * - The formerly __kfifo_* functions are renamed into kfifo_* 41 */ 42 43 /* 44 * Note about locking : There is no locking required until only * one reader 45 * and one writer is using the fifo and no kfifo_reset() will be * called 46 * kfifo_reset_out() can be safely used, until it will be only called 47 * in the reader thread. 48 * For multiple writer and one reader there is only a need to lock the writer. 49 * And vice versa for only one writer and multiple reader there is only a need 50 * to lock the reader. 51 */ 52 53 #include <linux/kernel.h> 54 #include <linux/spinlock.h> 55 #include <linux/stddef.h> 56 #include <linux/scatterlist.h> 57 58 struct __kfifo { 59 unsigned int in; 60 unsigned int out; 61 unsigned int mask; 62 unsigned int esize; 63 void *data; 64 }; 65 66 #define __STRUCT_KFIFO_COMMON(datatype, recsize, ptrtype) \ 67 union { \ 68 struct __kfifo kfifo; \ 69 datatype *type; \ 70 const datatype *const_type; \ 71 char (*rectype)[recsize]; \ 72 ptrtype *ptr; \ 73 ptrtype const *ptr_const; \ 74 } 75 76 #define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ 77 { \ 78 __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ 79 type buf[((size < 2) || (size & (size - 1))) ? -1 : size]; \ 80 } 81 82 #define STRUCT_KFIFO(type, size) \ 83 struct __STRUCT_KFIFO(type, size, 0, type) 84 85 #define __STRUCT_KFIFO_PTR(type, recsize, ptrtype) \ 86 { \ 87 __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ 88 type buf[0]; \ 89 } 90 91 #define STRUCT_KFIFO_PTR(type) \ 92 struct __STRUCT_KFIFO_PTR(type, 0, type) 93 94 /* 95 * define compatibility "struct kfifo" for dynamic allocated fifos 96 */ 97 struct kfifo __STRUCT_KFIFO_PTR(unsigned char, 0, void); 98 99 #define STRUCT_KFIFO_REC_1(size) \ 100 struct __STRUCT_KFIFO(unsigned char, size, 1, void) 101 102 #define STRUCT_KFIFO_REC_2(size) \ 103 struct __STRUCT_KFIFO(unsigned char, size, 2, void) 104 105 /* 106 * define kfifo_rec types 107 */ 108 struct kfifo_rec_ptr_1 __STRUCT_KFIFO_PTR(unsigned char, 1, void); 109 struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); 110 111 /* 112 * helper macro to distinguish between real in place fifo where the fifo 113 * array is a part of the structure and the fifo type where the array is 114 * outside of the fifo structure. 115 */ 116 #define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) 117 118 /** 119 * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object 120 * @fifo: name of the declared fifo 121 * @type: type of the fifo elements 122 */ 123 #define DECLARE_KFIFO_PTR(fifo, type) STRUCT_KFIFO_PTR(type) fifo 124 125 /** 126 * DECLARE_KFIFO - macro to declare a fifo object 127 * @fifo: name of the declared fifo 128 * @type: type of the fifo elements 129 * @size: the number of elements in the fifo, this must be a power of 2 130 */ 131 #define DECLARE_KFIFO(fifo, type, size) STRUCT_KFIFO(type, size) fifo 132 133 /** 134 * INIT_KFIFO - Initialize a fifo declared by DECLARE_KFIFO 135 * @fifo: name of the declared fifo datatype 136 */ 137 #define INIT_KFIFO(fifo) \ 138 (void)({ \ 139 typeof(&(fifo)) __tmp = &(fifo); \ 140 struct __kfifo *__kfifo = &__tmp->kfifo; \ 141 __kfifo->in = 0; \ 142 __kfifo->out = 0; \ 143 __kfifo->mask = __is_kfifo_ptr(__tmp) ? 0 : ARRAY_SIZE(__tmp->buf) - 1;\ 144 __kfifo->esize = sizeof(*__tmp->buf); \ 145 __kfifo->data = __is_kfifo_ptr(__tmp) ? NULL : __tmp->buf; \ 146 }) 147 148 /** 149 * DEFINE_KFIFO - macro to define and initialize a fifo 150 * @fifo: name of the declared fifo datatype 151 * @type: type of the fifo elements 152 * @size: the number of elements in the fifo, this must be a power of 2 153 * 154 * Note: the macro can be used for global and local fifo data type variables. 155 */ 156 #define DEFINE_KFIFO(fifo, type, size) \ 157 DECLARE_KFIFO(fifo, type, size) = \ 158 (typeof(fifo)) { \ 159 { \ 160 { \ 161 .in = 0, \ 162 .out = 0, \ 163 .mask = __is_kfifo_ptr(&(fifo)) ? \ 164 0 : \ 165 ARRAY_SIZE((fifo).buf) - 1, \ 166 .esize = sizeof(*(fifo).buf), \ 167 .data = __is_kfifo_ptr(&(fifo)) ? \ 168 NULL : \ 169 (fifo).buf, \ 170 } \ 171 } \ 172 } 173 174 175 static inline unsigned int __must_check 176 __kfifo_uint_must_check_helper(unsigned int val) 177 { 178 return val; 179 } 180 181 static inline int __must_check 182 __kfifo_int_must_check_helper(int val) 183 { 184 return val; 185 } 186 187 /** 188 * kfifo_initialized - Check if the fifo is initialized 189 * @fifo: address of the fifo to check 190 * 191 * Return %true if fifo is initialized, otherwise %false. 192 * Assumes the fifo was 0 before. 193 */ 194 #define kfifo_initialized(fifo) ((fifo)->kfifo.mask) 195 196 /** 197 * kfifo_esize - returns the size of the element managed by the fifo 198 * @fifo: address of the fifo to be used 199 */ 200 #define kfifo_esize(fifo) ((fifo)->kfifo.esize) 201 202 /** 203 * kfifo_recsize - returns the size of the record length field 204 * @fifo: address of the fifo to be used 205 */ 206 #define kfifo_recsize(fifo) (sizeof(*(fifo)->rectype)) 207 208 /** 209 * kfifo_size - returns the size of the fifo in elements 210 * @fifo: address of the fifo to be used 211 */ 212 #define kfifo_size(fifo) ((fifo)->kfifo.mask + 1) 213 214 /** 215 * kfifo_reset - removes the entire fifo content 216 * @fifo: address of the fifo to be used 217 * 218 * Note: usage of kfifo_reset() is dangerous. It should be only called when the 219 * fifo is exclusived locked or when it is secured that no other thread is 220 * accessing the fifo. 221 */ 222 #define kfifo_reset(fifo) \ 223 (void)({ \ 224 typeof((fifo) + 1) __tmp = (fifo); \ 225 __tmp->kfifo.in = __tmp->kfifo.out = 0; \ 226 }) 227 228 /** 229 * kfifo_reset_out - skip fifo content 230 * @fifo: address of the fifo to be used 231 * 232 * Note: The usage of kfifo_reset_out() is safe until it will be only called 233 * from the reader thread and there is only one concurrent reader. Otherwise 234 * it is dangerous and must be handled in the same way as kfifo_reset(). 235 */ 236 #define kfifo_reset_out(fifo) \ 237 (void)({ \ 238 typeof((fifo) + 1) __tmp = (fifo); \ 239 __tmp->kfifo.out = __tmp->kfifo.in; \ 240 }) 241 242 /** 243 * kfifo_len - returns the number of used elements in the fifo 244 * @fifo: address of the fifo to be used 245 */ 246 #define kfifo_len(fifo) \ 247 ({ \ 248 typeof((fifo) + 1) __tmpl = (fifo); \ 249 __tmpl->kfifo.in - __tmpl->kfifo.out; \ 250 }) 251 252 /** 253 * kfifo_is_empty - returns true if the fifo is empty 254 * @fifo: address of the fifo to be used 255 */ 256 #define kfifo_is_empty(fifo) \ 257 ({ \ 258 typeof((fifo) + 1) __tmpq = (fifo); \ 259 __tmpq->kfifo.in == __tmpq->kfifo.out; \ 260 }) 261 262 /** 263 * kfifo_is_full - returns true if the fifo is full 264 * @fifo: address of the fifo to be used 265 */ 266 #define kfifo_is_full(fifo) \ 267 ({ \ 268 typeof((fifo) + 1) __tmpq = (fifo); \ 269 kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ 270 }) 271 272 /** 273 * kfifo_avail - returns the number of unused elements in the fifo 274 * @fifo: address of the fifo to be used 275 */ 276 #define kfifo_avail(fifo) \ 277 __kfifo_uint_must_check_helper( \ 278 ({ \ 279 typeof((fifo) + 1) __tmpq = (fifo); \ 280 const size_t __recsize = sizeof(*__tmpq->rectype); \ 281 unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ 282 (__recsize) ? ((__avail <= __recsize) ? 0 : \ 283 __kfifo_max_r(__avail - __recsize, __recsize)) : \ 284 __avail; \ 285 }) \ 286 ) 287 288 /** 289 * kfifo_skip - skip output data 290 * @fifo: address of the fifo to be used 291 */ 292 #define kfifo_skip(fifo) \ 293 (void)({ \ 294 typeof((fifo) + 1) __tmp = (fifo); \ 295 const size_t __recsize = sizeof(*__tmp->rectype); \ 296 struct __kfifo *__kfifo = &__tmp->kfifo; \ 297 if (__recsize) \ 298 __kfifo_skip_r(__kfifo, __recsize); \ 299 else \ 300 __kfifo->out++; \ 301 }) 302 303 /** 304 * kfifo_peek_len - gets the size of the next fifo record 305 * @fifo: address of the fifo to be used 306 * 307 * This function returns the size of the next fifo record in number of bytes. 308 */ 309 #define kfifo_peek_len(fifo) \ 310 __kfifo_uint_must_check_helper( \ 311 ({ \ 312 typeof((fifo) + 1) __tmp = (fifo); \ 313 const size_t __recsize = sizeof(*__tmp->rectype); \ 314 struct __kfifo *__kfifo = &__tmp->kfifo; \ 315 (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ 316 __kfifo_len_r(__kfifo, __recsize); \ 317 }) \ 318 ) 319 320 /** 321 * kfifo_alloc - dynamically allocates a new fifo buffer 322 * @fifo: pointer to the fifo 323 * @size: the number of elements in the fifo, this must be a power of 2 324 * @gfp_mask: get_free_pages mask, passed to kmalloc() 325 * 326 * This macro dynamically allocates a new fifo buffer. 327 * 328 * The numer of elements will be rounded-up to a power of 2. 329 * The fifo will be release with kfifo_free(). 330 * Return 0 if no error, otherwise an error code. 331 */ 332 #define kfifo_alloc(fifo, size, gfp_mask) \ 333 __kfifo_int_must_check_helper( \ 334 ({ \ 335 typeof((fifo) + 1) __tmp = (fifo); \ 336 struct __kfifo *__kfifo = &__tmp->kfifo; \ 337 __is_kfifo_ptr(__tmp) ? \ 338 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ 339 -EINVAL; \ 340 }) \ 341 ) 342 343 /** 344 * kfifo_free - frees the fifo 345 * @fifo: the fifo to be freed 346 */ 347 #define kfifo_free(fifo) \ 348 ({ \ 349 typeof((fifo) + 1) __tmp = (fifo); \ 350 struct __kfifo *__kfifo = &__tmp->kfifo; \ 351 if (__is_kfifo_ptr(__tmp)) \ 352 __kfifo_free(__kfifo); \ 353 }) 354 355 /** 356 * kfifo_init - initialize a fifo using a preallocated buffer 357 * @fifo: the fifo to assign the buffer 358 * @buffer: the preallocated buffer to be used 359 * @size: the size of the internal buffer, this have to be a power of 2 360 * 361 * This macro initialize a fifo using a preallocated buffer. 362 * 363 * The numer of elements will be rounded-up to a power of 2. 364 * Return 0 if no error, otherwise an error code. 365 */ 366 #define kfifo_init(fifo, buffer, size) \ 367 ({ \ 368 typeof((fifo) + 1) __tmp = (fifo); \ 369 struct __kfifo *__kfifo = &__tmp->kfifo; \ 370 __is_kfifo_ptr(__tmp) ? \ 371 __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ 372 -EINVAL; \ 373 }) 374 375 /** 376 * kfifo_put - put data into the fifo 377 * @fifo: address of the fifo to be used 378 * @val: the data to be added 379 * 380 * This macro copies the given value into the fifo. 381 * It returns 0 if the fifo was full. Otherwise it returns the number 382 * processed elements. 383 * 384 * Note that with only one concurrent reader and one concurrent 385 * writer, you don't need extra locking to use these macro. 386 */ 387 #define kfifo_put(fifo, val) \ 388 ({ \ 389 typeof((fifo) + 1) __tmp = (fifo); \ 390 typeof(*__tmp->const_type) __val = (val); \ 391 unsigned int __ret; \ 392 size_t __recsize = sizeof(*__tmp->rectype); \ 393 struct __kfifo *__kfifo = &__tmp->kfifo; \ 394 if (__recsize) \ 395 __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \ 396 __recsize); \ 397 else { \ 398 __ret = !kfifo_is_full(__tmp); \ 399 if (__ret) { \ 400 (__is_kfifo_ptr(__tmp) ? \ 401 ((typeof(__tmp->type))__kfifo->data) : \ 402 (__tmp->buf) \ 403 )[__kfifo->in & __tmp->kfifo.mask] = \ 404 (typeof(*__tmp->type))__val; \ 405 smp_wmb(); \ 406 __kfifo->in++; \ 407 } \ 408 } \ 409 __ret; \ 410 }) 411 412 /** 413 * kfifo_get - get data from the fifo 414 * @fifo: address of the fifo to be used 415 * @val: address where to store the data 416 * 417 * This macro reads the data from the fifo. 418 * It returns 0 if the fifo was empty. Otherwise it returns the number 419 * processed elements. 420 * 421 * Note that with only one concurrent reader and one concurrent 422 * writer, you don't need extra locking to use these macro. 423 */ 424 #define kfifo_get(fifo, val) \ 425 __kfifo_uint_must_check_helper( \ 426 ({ \ 427 typeof((fifo) + 1) __tmp = (fifo); \ 428 typeof(__tmp->ptr) __val = (val); \ 429 unsigned int __ret; \ 430 const size_t __recsize = sizeof(*__tmp->rectype); \ 431 struct __kfifo *__kfifo = &__tmp->kfifo; \ 432 if (__recsize) \ 433 __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ 434 __recsize); \ 435 else { \ 436 __ret = !kfifo_is_empty(__tmp); \ 437 if (__ret) { \ 438 *(typeof(__tmp->type))__val = \ 439 (__is_kfifo_ptr(__tmp) ? \ 440 ((typeof(__tmp->type))__kfifo->data) : \ 441 (__tmp->buf) \ 442 )[__kfifo->out & __tmp->kfifo.mask]; \ 443 smp_wmb(); \ 444 __kfifo->out++; \ 445 } \ 446 } \ 447 __ret; \ 448 }) \ 449 ) 450 451 /** 452 * kfifo_peek - get data from the fifo without removing 453 * @fifo: address of the fifo to be used 454 * @val: address where to store the data 455 * 456 * This reads the data from the fifo without removing it from the fifo. 457 * It returns 0 if the fifo was empty. Otherwise it returns the number 458 * processed elements. 459 * 460 * Note that with only one concurrent reader and one concurrent 461 * writer, you don't need extra locking to use these macro. 462 */ 463 #define kfifo_peek(fifo, val) \ 464 __kfifo_uint_must_check_helper( \ 465 ({ \ 466 typeof((fifo) + 1) __tmp = (fifo); \ 467 typeof(__tmp->ptr) __val = (val); \ 468 unsigned int __ret; \ 469 const size_t __recsize = sizeof(*__tmp->rectype); \ 470 struct __kfifo *__kfifo = &__tmp->kfifo; \ 471 if (__recsize) \ 472 __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ 473 __recsize); \ 474 else { \ 475 __ret = !kfifo_is_empty(__tmp); \ 476 if (__ret) { \ 477 *(typeof(__tmp->type))__val = \ 478 (__is_kfifo_ptr(__tmp) ? \ 479 ((typeof(__tmp->type))__kfifo->data) : \ 480 (__tmp->buf) \ 481 )[__kfifo->out & __tmp->kfifo.mask]; \ 482 smp_wmb(); \ 483 } \ 484 } \ 485 __ret; \ 486 }) \ 487 ) 488 489 /** 490 * kfifo_in - put data into the fifo 491 * @fifo: address of the fifo to be used 492 * @buf: the data to be added 493 * @n: number of elements to be added 494 * 495 * This macro copies the given buffer into the fifo and returns the 496 * number of copied elements. 497 * 498 * Note that with only one concurrent reader and one concurrent 499 * writer, you don't need extra locking to use these macro. 500 */ 501 #define kfifo_in(fifo, buf, n) \ 502 ({ \ 503 typeof((fifo) + 1) __tmp = (fifo); \ 504 typeof(__tmp->ptr_const) __buf = (buf); \ 505 unsigned long __n = (n); \ 506 const size_t __recsize = sizeof(*__tmp->rectype); \ 507 struct __kfifo *__kfifo = &__tmp->kfifo; \ 508 (__recsize) ?\ 509 __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ 510 __kfifo_in(__kfifo, __buf, __n); \ 511 }) 512 513 /** 514 * kfifo_in_spinlocked - put data into the fifo using a spinlock for locking 515 * @fifo: address of the fifo to be used 516 * @buf: the data to be added 517 * @n: number of elements to be added 518 * @lock: pointer to the spinlock to use for locking 519 * 520 * This macro copies the given values buffer into the fifo and returns the 521 * number of copied elements. 522 */ 523 #define kfifo_in_spinlocked(fifo, buf, n, lock) \ 524 ({ \ 525 unsigned long __flags; \ 526 unsigned int __ret; \ 527 spin_lock_irqsave(lock, __flags); \ 528 __ret = kfifo_in(fifo, buf, n); \ 529 spin_unlock_irqrestore(lock, __flags); \ 530 __ret; \ 531 }) 532 533 /* alias for kfifo_in_spinlocked, will be removed in a future release */ 534 #define kfifo_in_locked(fifo, buf, n, lock) \ 535 kfifo_in_spinlocked(fifo, buf, n, lock) 536 537 /** 538 * kfifo_out - get data from the fifo 539 * @fifo: address of the fifo to be used 540 * @buf: pointer to the storage buffer 541 * @n: max. number of elements to get 542 * 543 * This macro get some data from the fifo and return the numbers of elements 544 * copied. 545 * 546 * Note that with only one concurrent reader and one concurrent 547 * writer, you don't need extra locking to use these macro. 548 */ 549 #define kfifo_out(fifo, buf, n) \ 550 __kfifo_uint_must_check_helper( \ 551 ({ \ 552 typeof((fifo) + 1) __tmp = (fifo); \ 553 typeof(__tmp->ptr) __buf = (buf); \ 554 unsigned long __n = (n); \ 555 const size_t __recsize = sizeof(*__tmp->rectype); \ 556 struct __kfifo *__kfifo = &__tmp->kfifo; \ 557 (__recsize) ?\ 558 __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ 559 __kfifo_out(__kfifo, __buf, __n); \ 560 }) \ 561 ) 562 563 /** 564 * kfifo_out_spinlocked - get data from the fifo using a spinlock for locking 565 * @fifo: address of the fifo to be used 566 * @buf: pointer to the storage buffer 567 * @n: max. number of elements to get 568 * @lock: pointer to the spinlock to use for locking 569 * 570 * This macro get the data from the fifo and return the numbers of elements 571 * copied. 572 */ 573 #define kfifo_out_spinlocked(fifo, buf, n, lock) \ 574 __kfifo_uint_must_check_helper( \ 575 ({ \ 576 unsigned long __flags; \ 577 unsigned int __ret; \ 578 spin_lock_irqsave(lock, __flags); \ 579 __ret = kfifo_out(fifo, buf, n); \ 580 spin_unlock_irqrestore(lock, __flags); \ 581 __ret; \ 582 }) \ 583 ) 584 585 /* alias for kfifo_out_spinlocked, will be removed in a future release */ 586 #define kfifo_out_locked(fifo, buf, n, lock) \ 587 kfifo_out_spinlocked(fifo, buf, n, lock) 588 589 /** 590 * kfifo_from_user - puts some data from user space into the fifo 591 * @fifo: address of the fifo to be used 592 * @from: pointer to the data to be added 593 * @len: the length of the data to be added 594 * @copied: pointer to output variable to store the number of copied bytes 595 * 596 * This macro copies at most @len bytes from the @from into the 597 * fifo, depending of the available space and returns -EFAULT/0. 598 * 599 * Note that with only one concurrent reader and one concurrent 600 * writer, you don't need extra locking to use these macro. 601 */ 602 #define kfifo_from_user(fifo, from, len, copied) \ 603 __kfifo_uint_must_check_helper( \ 604 ({ \ 605 typeof((fifo) + 1) __tmp = (fifo); \ 606 const void __user *__from = (from); \ 607 unsigned int __len = (len); \ 608 unsigned int *__copied = (copied); \ 609 const size_t __recsize = sizeof(*__tmp->rectype); \ 610 struct __kfifo *__kfifo = &__tmp->kfifo; \ 611 (__recsize) ? \ 612 __kfifo_from_user_r(__kfifo, __from, __len, __copied, __recsize) : \ 613 __kfifo_from_user(__kfifo, __from, __len, __copied); \ 614 }) \ 615 ) 616 617 /** 618 * kfifo_to_user - copies data from the fifo into user space 619 * @fifo: address of the fifo to be used 620 * @to: where the data must be copied 621 * @len: the size of the destination buffer 622 * @copied: pointer to output variable to store the number of copied bytes 623 * 624 * This macro copies at most @len bytes from the fifo into the 625 * @to buffer and returns -EFAULT/0. 626 * 627 * Note that with only one concurrent reader and one concurrent 628 * writer, you don't need extra locking to use these macro. 629 */ 630 #define kfifo_to_user(fifo, to, len, copied) \ 631 __kfifo_uint_must_check_helper( \ 632 ({ \ 633 typeof((fifo) + 1) __tmp = (fifo); \ 634 void __user *__to = (to); \ 635 unsigned int __len = (len); \ 636 unsigned int *__copied = (copied); \ 637 const size_t __recsize = sizeof(*__tmp->rectype); \ 638 struct __kfifo *__kfifo = &__tmp->kfifo; \ 639 (__recsize) ? \ 640 __kfifo_to_user_r(__kfifo, __to, __len, __copied, __recsize) : \ 641 __kfifo_to_user(__kfifo, __to, __len, __copied); \ 642 }) \ 643 ) 644 645 /** 646 * kfifo_dma_in_prepare - setup a scatterlist for DMA input 647 * @fifo: address of the fifo to be used 648 * @sgl: pointer to the scatterlist array 649 * @nents: number of entries in the scatterlist array 650 * @len: number of elements to transfer 651 * 652 * This macro fills a scatterlist for DMA input. 653 * It returns the number entries in the scatterlist array. 654 * 655 * Note that with only one concurrent reader and one concurrent 656 * writer, you don't need extra locking to use these macros. 657 */ 658 #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ 659 ({ \ 660 typeof((fifo) + 1) __tmp = (fifo); \ 661 struct scatterlist *__sgl = (sgl); \ 662 int __nents = (nents); \ 663 unsigned int __len = (len); \ 664 const size_t __recsize = sizeof(*__tmp->rectype); \ 665 struct __kfifo *__kfifo = &__tmp->kfifo; \ 666 (__recsize) ? \ 667 __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ 668 __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \ 669 }) 670 671 /** 672 * kfifo_dma_in_finish - finish a DMA IN operation 673 * @fifo: address of the fifo to be used 674 * @len: number of bytes to received 675 * 676 * This macro finish a DMA IN operation. The in counter will be updated by 677 * the len parameter. No error checking will be done. 678 * 679 * Note that with only one concurrent reader and one concurrent 680 * writer, you don't need extra locking to use these macros. 681 */ 682 #define kfifo_dma_in_finish(fifo, len) \ 683 (void)({ \ 684 typeof((fifo) + 1) __tmp = (fifo); \ 685 unsigned int __len = (len); \ 686 const size_t __recsize = sizeof(*__tmp->rectype); \ 687 struct __kfifo *__kfifo = &__tmp->kfifo; \ 688 if (__recsize) \ 689 __kfifo_dma_in_finish_r(__kfifo, __len, __recsize); \ 690 else \ 691 __kfifo->in += __len / sizeof(*__tmp->type); \ 692 }) 693 694 /** 695 * kfifo_dma_out_prepare - setup a scatterlist for DMA output 696 * @fifo: address of the fifo to be used 697 * @sgl: pointer to the scatterlist array 698 * @nents: number of entries in the scatterlist array 699 * @len: number of elements to transfer 700 * 701 * This macro fills a scatterlist for DMA output which at most @len bytes 702 * to transfer. 703 * It returns the number entries in the scatterlist array. 704 * A zero means there is no space available and the scatterlist is not filled. 705 * 706 * Note that with only one concurrent reader and one concurrent 707 * writer, you don't need extra locking to use these macros. 708 */ 709 #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ 710 ({ \ 711 typeof((fifo) + 1) __tmp = (fifo); \ 712 struct scatterlist *__sgl = (sgl); \ 713 int __nents = (nents); \ 714 unsigned int __len = (len); \ 715 const size_t __recsize = sizeof(*__tmp->rectype); \ 716 struct __kfifo *__kfifo = &__tmp->kfifo; \ 717 (__recsize) ? \ 718 __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ 719 __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \ 720 }) 721 722 /** 723 * kfifo_dma_out_finish - finish a DMA OUT operation 724 * @fifo: address of the fifo to be used 725 * @len: number of bytes transferred 726 * 727 * This macro finish a DMA OUT operation. The out counter will be updated by 728 * the len parameter. No error checking will be done. 729 * 730 * Note that with only one concurrent reader and one concurrent 731 * writer, you don't need extra locking to use these macros. 732 */ 733 #define kfifo_dma_out_finish(fifo, len) \ 734 (void)({ \ 735 typeof((fifo) + 1) __tmp = (fifo); \ 736 unsigned int __len = (len); \ 737 const size_t __recsize = sizeof(*__tmp->rectype); \ 738 struct __kfifo *__kfifo = &__tmp->kfifo; \ 739 if (__recsize) \ 740 __kfifo_dma_out_finish_r(__kfifo, __recsize); \ 741 else \ 742 __kfifo->out += __len / sizeof(*__tmp->type); \ 743 }) 744 745 /** 746 * kfifo_out_peek - gets some data from the fifo 747 * @fifo: address of the fifo to be used 748 * @buf: pointer to the storage buffer 749 * @n: max. number of elements to get 750 * 751 * This macro get the data from the fifo and return the numbers of elements 752 * copied. The data is not removed from the fifo. 753 * 754 * Note that with only one concurrent reader and one concurrent 755 * writer, you don't need extra locking to use these macro. 756 */ 757 #define kfifo_out_peek(fifo, buf, n) \ 758 __kfifo_uint_must_check_helper( \ 759 ({ \ 760 typeof((fifo) + 1) __tmp = (fifo); \ 761 typeof(__tmp->ptr) __buf = (buf); \ 762 unsigned long __n = (n); \ 763 const size_t __recsize = sizeof(*__tmp->rectype); \ 764 struct __kfifo *__kfifo = &__tmp->kfifo; \ 765 (__recsize) ? \ 766 __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ 767 __kfifo_out_peek(__kfifo, __buf, __n); \ 768 }) \ 769 ) 770 771 extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, 772 size_t esize, gfp_t gfp_mask); 773 774 extern void __kfifo_free(struct __kfifo *fifo); 775 776 extern int __kfifo_init(struct __kfifo *fifo, void *buffer, 777 unsigned int size, size_t esize); 778 779 extern unsigned int __kfifo_in(struct __kfifo *fifo, 780 const void *buf, unsigned int len); 781 782 extern unsigned int __kfifo_out(struct __kfifo *fifo, 783 void *buf, unsigned int len); 784 785 extern int __kfifo_from_user(struct __kfifo *fifo, 786 const void __user *from, unsigned long len, unsigned int *copied); 787 788 extern int __kfifo_to_user(struct __kfifo *fifo, 789 void __user *to, unsigned long len, unsigned int *copied); 790 791 extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, 792 struct scatterlist *sgl, int nents, unsigned int len); 793 794 extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, 795 struct scatterlist *sgl, int nents, unsigned int len); 796 797 extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, 798 void *buf, unsigned int len); 799 800 extern unsigned int __kfifo_in_r(struct __kfifo *fifo, 801 const void *buf, unsigned int len, size_t recsize); 802 803 extern unsigned int __kfifo_out_r(struct __kfifo *fifo, 804 void *buf, unsigned int len, size_t recsize); 805 806 extern int __kfifo_from_user_r(struct __kfifo *fifo, 807 const void __user *from, unsigned long len, unsigned int *copied, 808 size_t recsize); 809 810 extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, 811 unsigned long len, unsigned int *copied, size_t recsize); 812 813 extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, 814 struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); 815 816 extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, 817 unsigned int len, size_t recsize); 818 819 extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, 820 struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); 821 822 extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); 823 824 extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); 825 826 extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); 827 828 extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, 829 void *buf, unsigned int len, size_t recsize); 830 831 extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize); 832 833 #endif
1 /* 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 3 * 4 * (C) SGI 2006, Christoph Lameter 5 * Cleaned up and restructured to ease the addition of alternative 6 * implementations of SLAB allocators. 7 * (C) Linux Foundation 2008-2013 8 * Unified interface for all slab allocators 9 */ 10 11 #ifndef _LINUX_SLAB_H 12 #define _LINUX_SLAB_H 13 14 #include <linux/gfp.h> 15 #include <linux/types.h> 16 #include <linux/workqueue.h> 17 18 19 /* 20 * Flags to pass to kmem_cache_create(). 21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 22 */ 23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30 /* 31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! 32 * 33 * This delays freeing the SLAB page by a grace period, it does _NOT_ 34 * delay object freeing. This means that if you do kmem_cache_free() 35 * that memory location is free to be reused at any time. Thus it may 36 * be possible to see another object there in the same RCU grace period. 37 * 38 * This feature only ensures the memory location backing the object 39 * stays valid, the trick to using this is relying on an independent 40 * object validation pass. Something like: 41 * 42 * rcu_read_lock() 43 * again: 44 * obj = lockless_lookup(key); 45 * if (obj) { 46 * if (!try_get_ref(obj)) // might fail for free objects 47 * goto again; 48 * 49 * if (obj->key != key) { // not the object we expected 50 * put_ref(obj); 51 * goto again; 52 * } 53 * } 54 * rcu_read_unlock(); 55 * 56 * This is useful if we need to approach a kernel structure obliquely, 57 * from its address obtained without the usual locking. We can lock 58 * the structure to stabilize it and check it's still at the given address, 59 * only if we can be sure that the memory has not been meanwhile reused 60 * for some other kind of object (which our subsystem's lock might corrupt). 61 * 62 * rcu_read_lock before reading the address, then rcu_read_unlock after 63 * taking the spinlock within the structure expected at that address. 64 */ 65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 68 69 /* Flag to prevent checks on free */ 70 #ifdef CONFIG_DEBUG_OBJECTS 71 # define SLAB_DEBUG_OBJECTS 0x00400000UL 72 #else 73 # define SLAB_DEBUG_OBJECTS 0x00000000UL 74 #endif 75 76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 77 78 /* Don't track use of uninitialized memory */ 79 #ifdef CONFIG_KMEMCHECK 80 # define SLAB_NOTRACK 0x01000000UL 81 #else 82 # define SLAB_NOTRACK 0x00000000UL 83 #endif 84 #ifdef CONFIG_FAILSLAB 85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 86 #else 87 # define SLAB_FAILSLAB 0x00000000UL 88 #endif 89 90 /* The following flags affect the page allocator grouping pages by mobility */ 91 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 92 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 93 /* 94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 95 * 96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 97 * 98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 99 * Both make kfree a no-op. 100 */ 101 #define ZERO_SIZE_PTR ((void *)16) 102 103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 104 (unsigned long)ZERO_SIZE_PTR) 105 106 #include <linux/kmemleak.h> 107 #include <linux/kasan.h> 108 109 struct mem_cgroup; 110 /* 111 * struct kmem_cache related prototypes 112 */ 113 void __init kmem_cache_init(void); 114 int slab_is_available(void); 115 116 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 117 unsigned long, 118 void (*)(void *)); 119 void kmem_cache_destroy(struct kmem_cache *); 120 int kmem_cache_shrink(struct kmem_cache *); 121 122 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); 123 void memcg_deactivate_kmem_caches(struct mem_cgroup *); 124 void memcg_destroy_kmem_caches(struct mem_cgroup *); 125 126 /* 127 * Please use this macro to create slab caches. Simply specify the 128 * name of the structure and maybe some flags that are listed above. 129 * 130 * The alignment of the struct determines object alignment. If you 131 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 132 * then the objects will be properly aligned in SMP configurations. 133 */ 134 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 135 sizeof(struct __struct), __alignof__(struct __struct),\ 136 (__flags), NULL) 137 138 /* 139 * Common kmalloc functions provided by all allocators 140 */ 141 void * __must_check __krealloc(const void *, size_t, gfp_t); 142 void * __must_check krealloc(const void *, size_t, gfp_t); 143 void kfree(const void *); 144 void kzfree(const void *); 145 size_t ksize(const void *); 146 147 /* 148 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 149 * alignment larger than the alignment of a 64-bit integer. 150 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 151 */ 152 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 153 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 154 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 155 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 156 #else 157 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 158 #endif 159 160 /* 161 * Kmalloc array related definitions 162 */ 163 164 #ifdef CONFIG_SLAB 165 /* 166 * The largest kmalloc size supported by the SLAB allocators is 167 * 32 megabyte (2^25) or the maximum allocatable page order if that is 168 * less than 32 MB. 169 * 170 * WARNING: Its not easy to increase this value since the allocators have 171 * to do various tricks to work around compiler limitations in order to 172 * ensure proper constant folding. 173 */ 174 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 175 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 176 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 177 #ifndef KMALLOC_SHIFT_LOW 178 #define KMALLOC_SHIFT_LOW 5 179 #endif 180 #endif 181 182 #ifdef CONFIG_SLUB 183 /* 184 * SLUB directly allocates requests fitting in to an order-1 page 185 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 186 */ 187 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 188 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 189 #ifndef KMALLOC_SHIFT_LOW 190 #define KMALLOC_SHIFT_LOW 3 191 #endif 192 #endif 193 194 #ifdef CONFIG_SLOB 195 /* 196 * SLOB passes all requests larger than one page to the page allocator. 197 * No kmalloc array is necessary since objects of different sizes can 198 * be allocated from the same page. 199 */ 200 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT 201 #define KMALLOC_SHIFT_MAX 30 202 #ifndef KMALLOC_SHIFT_LOW 203 #define KMALLOC_SHIFT_LOW 3 204 #endif 205 #endif 206 207