Error Trace
[Home]
Bug # 54
Show/hide error trace Error trace
{ 95 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 40 typedef __kernel_long_t __kernel_suseconds_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 147 typedef u64 dma_addr_t; 158 typedef unsigned int gfp_t; 159 typedef unsigned int fmode_t; 160 typedef unsigned int oom_flags_t; 163 typedef u64 phys_addr_t; 168 typedef phys_addr_t resource_size_t; 178 struct __anonstruct_atomic_t_6 { int counter; } ; 178 typedef struct __anonstruct_atomic_t_6 atomic_t; 183 struct __anonstruct_atomic64_t_7 { long counter; } ; 183 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 184 struct list_head { struct list_head *next; struct list_head *prev; } ; 189 struct hlist_node ; 189 struct hlist_head { struct hlist_node *first; } ; 193 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 204 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 59 struct __anonstruct_ldv_1016_9 { unsigned int a; unsigned int b; } ; 59 struct __anonstruct_ldv_1031_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 59 union __anonunion_ldv_1032_8 { struct __anonstruct_ldv_1016_9 ldv_1016; struct __anonstruct_ldv_1031_10 ldv_1031; } ; 59 struct desc_struct { union __anonunion_ldv_1032_8 ldv_1032; } ; 12 typedef unsigned long pteval_t; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 18 struct __anonstruct_pte_t_11 { pteval_t pte; } ; 18 typedef struct __anonstruct_pte_t_11 pte_t; 20 struct pgprot { pgprotval_t pgprot; } ; 242 typedef struct pgprot pgprot_t; 244 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ; 244 typedef struct __anonstruct_pgd_t_12 pgd_t; 332 struct page ; 332 typedef struct page *pgtable_t; 340 struct file ; 353 struct seq_file ; 390 struct thread_struct ; 392 struct mm_struct ; 393 struct task_struct ; 394 struct cpumask ; 327 struct arch_spinlock ; 18 typedef u16 __ticket_t; 19 typedef u32 __ticketpair_t; 20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ; 32 union __anonunion_ldv_1452_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ; 32 struct arch_spinlock { union __anonunion_ldv_1452_15 ldv_1452; } ; 33 typedef struct arch_spinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 142 typedef void (*ctor_fn_t)(); 48 struct device ; 403 struct file_operations ; 415 struct completion ; 416 struct pid ; 526 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 102 struct timespec ; 127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ; 79 union __anonunion_ldv_3051_20 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ; 79 struct math_emu_info { long ___orig_eip; union __anonunion_ldv_3051_20 ldv_3051; } ; 306 struct cpumask { unsigned long bits[128U]; } ; 14 typedef struct cpumask cpumask_t; 671 typedef struct cpumask *cpumask_var_t; 161 struct seq_operations ; 293 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 311 struct __anonstruct_ldv_5357_25 { u64 rip; u64 rdp; } ; 311 struct __anonstruct_ldv_5363_26 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 311 union __anonunion_ldv_5364_24 { struct __anonstruct_ldv_5357_25 ldv_5357; struct __anonstruct_ldv_5363_26 ldv_5363; } ; 311 union __anonunion_ldv_5373_27 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 311 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion_ldv_5364_24 ldv_5364; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion_ldv_5373_27 ldv_5373; } ; 345 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 366 struct ymmh_struct { u32 ymmh_space[64U]; } ; 371 struct lwp_struct { u8 reserved[128U]; } ; 376 struct bndregs_struct { u64 bndregs[8U]; } ; 380 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ; 385 struct xsave_hdr_struct { u64 xstate_bv; u64 xcomp_bv; u64 reserved[6U]; } ; 391 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ; 400 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ; 408 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ; 464 struct kmem_cache ; 465 struct perf_event ; 466 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ; 23 typedef atomic64_t atomic_long_t; 34 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 26 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ; 530 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct_ldv_6410_31 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion_ldv_6411_30 { struct raw_spinlock rlock; struct __anonstruct_ldv_6410_31 ldv_6410; } ; 33 struct spinlock { union __anonunion_ldv_6411_30 ldv_6411; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_32 rwlock_t; 12 struct __wait_queue ; 12 typedef struct __wait_queue wait_queue_t; 15 struct __wait_queue { unsigned int flags; void *private; int (*func)(wait_queue_t *, unsigned int, int, void *); struct list_head task_list; } ; 35 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 40 typedef struct __wait_queue_head wait_queue_head_t; 1029 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 51 typedef struct seqcount seqcount_t; 98 struct __anonstruct_nodemask_t_34 { unsigned long bits[16U]; } ; 98 typedef struct __anonstruct_nodemask_t_34 nodemask_t; 825 struct optimistic_spin_queue { atomic_t tail; } ; 26 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ; 68 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 178 struct rw_semaphore ; 179 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 172 struct completion { unsigned int done; wait_queue_head_t wait; } ; 1112 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 13 struct timeval { __kernel_time_t tv_sec; __kernel_suseconds_t tv_usec; } ; 323 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 207 struct tvec_base ; 208 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 266 struct workqueue_struct ; 267 struct work_struct ; 53 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 106 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 72 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ; 172 struct pci_dev ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 546 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ; 553 struct dev_pm_qos ; 553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 614 struct dev_pm_domain { struct dev_pm_ops ops; } ; 133 struct pci_bus ; 22 struct __anonstruct_mm_context_t_99 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ; 22 typedef struct __anonstruct_mm_context_t_99 mm_context_t; 18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 40 struct rb_root { struct rb_node *rb_node; } ; 87 struct vm_area_struct ; 169 struct device_node ; 1304 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 57 struct mem_cgroup ; 338 union __anonunion_ldv_13039_126 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 338 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion_ldv_13039_126 ldv_13039; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 185 struct dentry ; 186 struct iattr ; 187 struct super_block ; 188 struct file_system_type ; 189 struct kernfs_open_node ; 190 struct kernfs_iattrs ; 213 struct kernfs_root ; 213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion_ldv_13184_127 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion_ldv_13184_127 ldv_13184; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 171 struct vm_operations_struct ; 171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 187 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 464 struct sock ; 465 struct kobject ; 466 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 472 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_128 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_128 kuid_t; 27 struct __anonstruct_kgid_t_129 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_129 kgid_t; 127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct bin_attribute ; 37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 131 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 470 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 252 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ; 48 struct kmem_cache_order_objects { unsigned long x; } ; 58 struct memcg_cache_params ; 58 struct kmem_cache_node ; 58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; struct kset *memcg_kset; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ; 505 struct __anonstruct_ldv_14089_131 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ; 505 struct __anonstruct_ldv_14095_132 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; } ; 505 union __anonunion_ldv_14096_130 { struct __anonstruct_ldv_14089_131 ldv_14089; struct __anonstruct_ldv_14095_132 ldv_14095; } ; 505 struct memcg_cache_params { bool is_root_cache; union __anonunion_ldv_14096_130 ldv_14096; } ; 33 struct v4l2_streamparm ; 36 struct i2c_adapter ; 38 struct v4l2_format ; 41 struct vb2_queue ; 45 struct vb2_buffer ; 53 struct v4l2_buffer ; 836 struct nsproxy ; 37 struct cred ; 24 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct_ldv_15236_141 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct_ldv_15240_142 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion_ldv_15241_140 { struct __anonstruct_ldv_15236_141 ldv_15236; struct __anonstruct_ldv_15240_142 ldv_15240; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion_ldv_15241_140 ldv_15241; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct xol_area ; 95 struct uprobes_state { struct xol_area *xol_area; } ; 133 struct address_space ; 134 union __anonunion_ldv_15350_143 { struct address_space *mapping; void *s_mem; } ; 134 union __anonunion_ldv_15356_145 { unsigned long index; void *freelist; bool pfmemalloc; } ; 134 struct __anonstruct_ldv_15366_149 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 134 union __anonunion_ldv_15368_148 { atomic_t _mapcount; struct __anonstruct_ldv_15366_149 ldv_15366; int units; } ; 134 struct __anonstruct_ldv_15370_147 { union __anonunion_ldv_15368_148 ldv_15368; atomic_t _count; } ; 134 union __anonunion_ldv_15372_146 { unsigned long counters; struct __anonstruct_ldv_15370_147 ldv_15370; unsigned int active; } ; 134 struct __anonstruct_ldv_15373_144 { union __anonunion_ldv_15356_145 ldv_15356; union __anonunion_ldv_15372_146 ldv_15372; } ; 134 struct __anonstruct_ldv_15380_151 { struct page *next; int pages; int pobjects; } ; 134 struct slab ; 134 union __anonunion_ldv_15385_150 { struct list_head lru; struct __anonstruct_ldv_15380_151 ldv_15380; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ; 134 union __anonunion_ldv_15391_152 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ; 134 struct page { unsigned long flags; union __anonunion_ldv_15350_143 ldv_15350; struct __anonstruct_ldv_15373_144 ldv_15373; union __anonunion_ldv_15385_150 ldv_15385; union __anonunion_ldv_15391_152 ldv_15391; unsigned long debug_flags; } ; 187 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 239 struct __anonstruct_linear_154 { struct rb_node rb; unsigned long rb_subtree_last; } ; 239 union __anonunion_shared_153 { struct __anonstruct_linear_154 linear; struct list_head nonlinear; } ; 239 struct anon_vma ; 239 struct mempolicy ; 239 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_153 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ; 311 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 317 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 330 struct task_rss_stat { int events; int count[3U]; } ; 338 struct mm_rss_stat { atomic_long_t count[3U]; } ; 343 struct kioctx_table ; 344 struct linux_binfmt ; 344 struct mmu_notifier_mm ; 344 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 48 struct kernel_param ; 53 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 58 struct kparam_string ; 58 struct kparam_array ; 58 union __anonunion_ldv_15774_158 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion_ldv_15774_158 ldv_15774; } ; 70 struct kparam_string { unsigned int maxlen; char *string; } ; 76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 466 struct mod_arch_specific { } ; 36 struct module_param_attrs ; 36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 72 struct exception_table_entry ; 205 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 212 struct module_ref { unsigned long incs; unsigned long decs; } ; 226 struct module_sect_attrs ; 226 struct module_notes_attrs ; 226 struct tracepoint ; 226 struct ftrace_event_call ; 226 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 13 typedef unsigned long kernel_ulong_t; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ; 219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 628 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 67 struct path ; 68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 196 struct pinctrl ; 197 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 42 struct dma_map_ops ; 42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 14 struct device_private ; 15 struct device_driver ; 16 struct driver_private ; 17 struct class ; 18 struct subsys_private ; 19 struct bus_type ; 20 struct iommu_ops ; 21 struct iommu_group ; 60 struct device_attribute ; 60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 138 struct device_type ; 195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 321 struct class_attribute ; 321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 642 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 651 struct acpi_device ; 652 struct acpi_dev_node { struct acpi_device *companion; } ; 658 struct dma_coherent_mem ; 658 struct cma ; 658 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 805 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 71 struct hotplug_slot ; 71 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ; 109 typedef int pci_power_t; 136 typedef unsigned int pci_channel_state_t; 137 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; 162 typedef unsigned short pci_dev_flags_t; 185 typedef unsigned short pci_bus_flags_t; 242 struct pcie_link_state ; 243 struct pci_vpd ; 244 struct pci_sriov ; 245 struct pci_ats ; 246 struct proc_dir_entry ; 246 struct pci_driver ; 246 union __anonunion_ldv_17799_162 { struct pci_sriov *sriov; struct pci_dev *physfn; } ; 246 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; u8 dma_alias_devfn; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; struct list_head msi_list; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion_ldv_17799_162 ldv_17799; struct pci_ats *ats; phys_addr_t rom; size_t romlen; char *driver_override; } ; 436 struct pci_ops ; 436 struct msi_chip ; 436 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_chip *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ; 553 struct pci_ops { int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ; 574 struct pci_dynids { spinlock_t lock; struct list_head list; } ; 588 typedef unsigned int pci_ers_result_t; 598 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ; 631 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ; 1155 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ; 26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 71 struct file_ra_state ; 72 struct user_struct ; 73 struct writeback_control ; 188 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; unsigned long max_pgoff; pte_t *pte; } ; 221 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ; 34 struct dma_attrs { unsigned long flags[1U]; } ; 70 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 77 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 351 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_164 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_164 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_166 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_167 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_168 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_169 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__sigfault_170 { void *_addr; short _addr_lsb; } ; 11 struct __anonstruct__sigpoll_171 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_172 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_165 { int _pad[28U]; struct __anonstruct__kill_166 _kill; struct __anonstruct__timer_167 _timer; struct __anonstruct__rt_168 _rt; struct __anonstruct__sigchld_169 _sigchld; struct __anonstruct__sigfault_170 _sigfault; struct __anonstruct__sigpoll_171 _sigpoll; struct __anonstruct__sigsys_172 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_165 _sifields; } ; 109 typedef struct siginfo siginfo_t; 21 struct sigpending { struct list_head list; sigset_t signal; } ; 246 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 260 struct k_sigaction { struct sigaction sa; } ; 446 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 453 struct pid_namespace ; 453 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 174 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 53 struct seccomp_filter ; 54 struct seccomp { int mode; struct seccomp_filter *filter; } ; 20 struct rt_mutex { raw_spinlock_t wait_lock; struct rb_root waiters; struct rb_node *waiters_leftmost; struct task_struct *owner; int save_state; const char *name; const char *file; int line; void *magic; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ; 163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 451 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 39 struct assoc_array_ptr ; 39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 123 union __anonunion_ldv_24335_175 { struct list_head graveyard_link; struct rb_node serial_node; } ; 123 struct key_user ; 123 union __anonunion_ldv_24343_176 { time_t expiry; time_t revoked_at; } ; 123 struct __anonstruct_ldv_24356_178 { struct key_type *type; char *description; } ; 123 union __anonunion_ldv_24357_177 { struct keyring_index_key index_key; struct __anonstruct_ldv_24356_178 ldv_24356; } ; 123 union __anonunion_type_data_179 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ; 123 union __anonunion_payload_181 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ; 123 union __anonunion_ldv_24372_180 { union __anonunion_payload_181 payload; struct assoc_array keys; } ; 123 struct key { atomic_t usage; key_serial_t serial; union __anonunion_ldv_24335_175 ldv_24335; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion_ldv_24343_176 ldv_24343; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion_ldv_24357_177 ldv_24357; union __anonunion_type_data_179 type_data; union __anonunion_ldv_24372_180 ldv_24372; } ; 358 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 126 struct futex_pi_state ; 127 struct robust_list_head ; 128 struct bio_list ; 129 struct fs_struct ; 130 struct perf_event_context ; 131 struct blk_plug ; 180 struct cfs_rq ; 181 struct task_group ; 426 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 465 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 473 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 480 struct cputime { cputime_t utime; cputime_t stime; } ; 492 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 512 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ; 554 struct autogroup ; 555 struct tty_struct ; 555 struct taskstats ; 555 struct tty_audit_buf ; 555 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 735 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 778 struct backing_dev_info ; 779 struct reclaim_state ; 780 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 794 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 1026 struct io_context ; 1060 struct pipe_inode_info ; 1062 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1069 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ; 1081 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1116 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1148 struct rt_rq ; 1148 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1164 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1222 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ; 1639 struct sched_class ; 1639 struct files_struct ; 1639 struct css_set ; 1639 struct compat_robust_list_head ; 1639 struct numa_group ; 1639 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned long atomic_flags; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults_memory; unsigned long total_numa_faults; unsigned long *numa_faults_buffer_memory; unsigned long *numa_faults_cpu; unsigned long *numa_faults_buffer_cpu; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ; 30 typedef u32 phandle; 32 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 42 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct device_node *next; struct device_node *allnext; struct kobject kobj; unsigned long _flags; void *data; } ; 825 struct i2c_msg { __u16 addr; __u16 flags; __u16 len; __u8 *buf; } ; 82 union i2c_smbus_data { __u8 byte; __u16 word; __u8 block[34U]; } ; 39 struct i2c_algorithm ; 335 struct i2c_algorithm { int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); int (*smbus_xfer)(struct i2c_adapter *, u16 , unsigned short, char, u8 , int, union i2c_smbus_data *); u32 (*functionality)(struct i2c_adapter *); } ; 381 struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *); int (*get_scl)(struct i2c_adapter *); void (*set_scl)(struct i2c_adapter *, int); int (*get_sda)(struct i2c_adapter *); void (*prepare_recovery)(struct i2c_bus_recovery_info *); void (*unprepare_recovery)(struct i2c_bus_recovery_info *); int scl_gpio; int sda_gpio; } ; 420 struct i2c_adapter { struct module *owner; unsigned int class; const struct i2c_algorithm *algo; void *algo_data; struct rt_mutex bus_lock; int timeout; int retries; struct device dev; int nr; char name[48U]; struct completion dev_released; struct mutex userspace_clients_lock; struct list_head userspace_clients; struct i2c_bus_recovery_info *bus_recovery_info; } ; 584 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 62 struct exception_table_entry { int insn; int fixup; } ; 450 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 663 struct v4l2_edid { __u32 pad; __u32 start_block; __u32 blocks; __u32 reserved[5U]; __u8 *edid; } ; 569 enum v4l2_buf_type { V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, V4L2_BUF_TYPE_VBI_CAPTURE = 4, V4L2_BUF_TYPE_VBI_OUTPUT = 5, V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10, V4L2_BUF_TYPE_SDR_CAPTURE = 11, V4L2_BUF_TYPE_PRIVATE = 128 } ; 592 enum v4l2_memory { V4L2_MEMORY_MMAP = 1, V4L2_MEMORY_USERPTR = 2, V4L2_MEMORY_OVERLAY = 3, V4L2_MEMORY_DMABUF = 4 } ; 610 enum v4l2_priority { V4L2_PRIORITY_UNSET = 0, V4L2_PRIORITY_BACKGROUND = 1, V4L2_PRIORITY_INTERACTIVE = 2, V4L2_PRIORITY_RECORD = 3, V4L2_PRIORITY_DEFAULT = 2 } ; 618 struct v4l2_rect { __s32 left; __s32 top; __u32 width; __u32 height; } ; 220 struct v4l2_fract { __u32 numerator; __u32 denominator; } ; 246 struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 bytesperline; __u32 sizeimage; __u32 colorspace; __u32 priv; __u32 flags; } ; 483 struct v4l2_frmsize_discrete { __u32 width; __u32 height; } ; 496 struct v4l2_frmsize_stepwise { __u32 min_width; __u32 max_width; __u32 step_width; __u32 min_height; __u32 max_height; __u32 step_height; } ; 505 union __anonunion_ldv_28765_185 { struct v4l2_frmsize_discrete discrete; struct v4l2_frmsize_stepwise stepwise; } ; 505 struct v4l2_frmsizeenum { __u32 index; __u32 pixel_format; __u32 type; union __anonunion_ldv_28765_185 ldv_28765; __u32 reserved[2U]; } ; 524 struct v4l2_frmival_stepwise { struct v4l2_fract min; struct v4l2_fract max; struct v4l2_fract step; } ; 533 union __anonunion_ldv_28784_186 { struct v4l2_fract discrete; struct v4l2_frmival_stepwise stepwise; } ; 533 struct v4l2_frmivalenum { __u32 index; __u32 pixel_format; __u32 width; __u32 height; __u32 type; union __anonunion_ldv_28784_186 ldv_28784; __u32 reserved[2U]; } ; 548 struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; __u8 seconds; __u8 minutes; __u8 hours; __u8 userbits[4U]; } ; 616 union __anonunion_m_187 { __u32 mem_offset; unsigned long userptr; __s32 fd; } ; 616 struct v4l2_plane { __u32 bytesused; __u32 length; union __anonunion_m_187 m; __u32 data_offset; __u32 reserved[11U]; } ; 648 union __anonunion_m_188 { __u32 offset; unsigned long userptr; struct v4l2_plane *planes; __s32 fd; } ; 648 struct v4l2_buffer { __u32 index; __u32 type; __u32 bytesused; __u32 flags; __u32 field; struct timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; __u32 memory; union __anonunion_m_188 m; __u32 length; __u32 reserved2; __u32 reserved; } ; 781 struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip *next; } ; 803 struct v4l2_window { struct v4l2_rect w; __u32 field; __u32 chromakey; struct v4l2_clip *clips; __u32 clipcount; void *bitmap; __u8 global_alpha; } ; 813 struct v4l2_captureparm { __u32 capability; __u32 capturemode; struct v4l2_fract timeperframe; __u32 extendedmode; __u32 readbuffers; __u32 reserved[4U]; } ; 825 struct v4l2_outputparm { __u32 capability; __u32 outputmode; struct v4l2_fract timeperframe; __u32 extendedmode; __u32 writebuffers; __u32 reserved[4U]; } ; 838 struct v4l2_cropcap { __u32 type; struct v4l2_rect bounds; struct v4l2_rect defrect; struct v4l2_fract pixelaspect; } ; 848 struct v4l2_crop { __u32 type; struct v4l2_rect c; } ; 880 typedef __u64 v4l2_std_id; 1016 struct v4l2_bt_timings { __u32 width; __u32 height; __u32 interlaced; __u32 polarities; __u64 pixelclock; __u32 hfrontporch; __u32 hsync; __u32 hbackporch; __u32 vfrontporch; __u32 vsync; __u32 vbackporch; __u32 il_vfrontporch; __u32 il_vsync; __u32 il_vbackporch; __u32 standards; __u32 flags; __u32 reserved[14U]; } ; 1072 union __anonunion_ldv_28926_190 { struct v4l2_bt_timings bt; __u32 reserved[32U]; } ; 1072 struct v4l2_dv_timings { __u32 type; union __anonunion_ldv_28926_190 ldv_28926; } ; 1134 struct v4l2_enum_dv_timings { __u32 index; __u32 pad; __u32 reserved[2U]; struct v4l2_dv_timings timings; } ; 1152 struct v4l2_bt_timings_cap { __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; __u64 min_pixelclock; __u64 max_pixelclock; __u32 standards; __u32 capabilities; __u32 reserved[16U]; } ; 1175 union __anonunion_ldv_28949_191 { struct v4l2_bt_timings_cap bt; __u32 raw_data[32U]; } ; 1175 struct v4l2_dv_timings_cap { __u32 type; __u32 pad; __u32 reserved[2U]; union __anonunion_ldv_28949_191 ldv_28949; } ; 1263 struct v4l2_control { __u32 id; __s32 value; } ; 1280 union __anonunion_ldv_28984_192 { __s32 value; __s64 value64; char *string; __u8 *p_u8; __u16 *p_u16; __u32 *p_u32; void *ptr; } ; 1280 struct v4l2_ext_control { __u32 id; __u32 size; __u32 reserved2[1U]; union __anonunion_ldv_28984_192 ldv_28984; } ; 1295 struct v4l2_ext_controls { __u32 ctrl_class; __u32 count; __u32 error_idx; __u32 reserved[2U]; struct v4l2_ext_control *controls; } ; 1303 enum v4l2_ctrl_type { V4L2_CTRL_TYPE_INTEGER = 1, V4L2_CTRL_TYPE_BOOLEAN = 2, V4L2_CTRL_TYPE_MENU = 3, V4L2_CTRL_TYPE_BUTTON = 4, V4L2_CTRL_TYPE_INTEGER64 = 5, V4L2_CTRL_TYPE_CTRL_CLASS = 6, V4L2_CTRL_TYPE_STRING = 7, V4L2_CTRL_TYPE_BITMASK = 8, V4L2_CTRL_TYPE_INTEGER_MENU = 9, V4L2_CTRL_COMPOUND_TYPES = 256, V4L2_CTRL_TYPE_U8 = 256, V4L2_CTRL_TYPE_U16 = 257, V4L2_CTRL_TYPE_U32 = 258 } ; 1319 struct v4l2_queryctrl { __u32 id; __u32 type; __u8 name[32U]; __s32 minimum; __s32 maximum; __s32 step; __s32 default_value; __u32 flags; __u32 reserved[2U]; } ; 1356 union __anonunion_ldv_29035_193 { __u8 name[32U]; __s64 value; } ; 1356 struct v4l2_querymenu { __u32 id; __u32 index; union __anonunion_ldv_29035_193 ldv_29035; __u32 reserved; } ; 1367 struct v4l2_tuner { __u32 index; __u8 name[32U]; __u32 type; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 rxsubchans; __u32 audmode; __s32 signal; __s32 afc; __u32 reserved[4U]; } ; 1405 struct v4l2_modulator { __u32 index; __u8 name[32U]; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 txsubchans; __u32 reserved[4U]; } ; 1415 struct v4l2_frequency { __u32 tuner; __u32 type; __u32 frequency; __u32 reserved[8U]; } ; 1454 struct v4l2_frequency_band { __u32 tuner; __u32 type; __u32 index; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 modulation; __u32 reserved[9U]; } ; 1622 struct v4l2_vbi_format { __u32 sampling_rate; __u32 offset; __u32 samples_per_line; __u32 sample_format; __s32 start[2U]; __u32 count[2U]; __u32 flags; __u32 reserved[2U]; } ; 1642 struct v4l2_sliced_vbi_format { __u16 service_set; __u16 service_lines[2U][24U]; __u32 io_size; __u32 reserved[2U]; } ; 1670 struct v4l2_sliced_vbi_cap { __u16 service_set; __u16 service_lines[2U][24U]; __u32 type; __u32 reserved[3U]; } ; 1694 struct v4l2_sliced_vbi_data { __u32 id; __u32 field; __u32 line; __u32 reserved; __u8 data[48U]; } ; 1749 struct v4l2_plane_pix_format { __u32 sizeimage; __u16 bytesperline; __u16 reserved[7U]; } ; 1766 struct v4l2_pix_format_mplane { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 colorspace; struct v4l2_plane_pix_format plane_fmt[8U]; __u8 num_planes; __u8 flags; __u8 reserved[10U]; } ; 1790 struct v4l2_sdr_format { __u32 pixelformat; __u32 buffersize; __u8 reserved[24U]; } ; 1801 union __anonunion_fmt_201 { struct v4l2_pix_format pix; struct v4l2_pix_format_mplane pix_mp; struct v4l2_window win; struct v4l2_vbi_format vbi; struct v4l2_sliced_vbi_format sliced; struct v4l2_sdr_format sdr; __u8 raw_data[200U]; } ; 1801 struct v4l2_format { __u32 type; union __anonunion_fmt_201 fmt; } ; 1824 union __anonunion_parm_202 { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200U]; } ; 1824 struct v4l2_streamparm { __u32 type; union __anonunion_parm_202 parm; } ; 1915 struct v4l2_event_subscription { __u32 type; __u32 id; __u32 flags; __u32 reserved[5U]; } ; 1925 union __anonunion_ldv_29250_205 { __u32 addr; char name[32U]; } ; 1925 struct v4l2_dbg_match { __u32 type; union __anonunion_ldv_29250_205 ldv_29250; } ; 1951 struct v4l2_dbg_register { struct v4l2_dbg_match match; __u32 size; __u64 reg; __u64 val; } ; 91 struct hlist_bl_node ; 91 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct_ldv_29391_207 { spinlock_t lock; unsigned int count; } ; 114 union __anonunion_ldv_29392_206 { struct __anonstruct_ldv_29391_207 ldv_29391; } ; 114 struct lockref { union __anonunion_ldv_29392_206 ldv_29392; } ; 49 struct nameidata ; 50 struct vfsmount ; 51 struct __anonstruct_ldv_29415_209 { u32 hash; u32 len; } ; 51 union __anonunion_ldv_29417_208 { struct __anonstruct_ldv_29415_209 ldv_29415; u64 hash_len; } ; 51 struct qstr { union __anonunion_ldv_29417_208 ldv_29417; const unsigned char *name; } ; 90 struct dentry_operations ; 90 union __anonunion_d_u_210 { struct list_head d_child; struct callback_head d_rcu; } ; 90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_210 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ; 142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ; 478 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 27 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ; 30 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ; 58 struct __anonstruct_ldv_29780_212 { struct radix_tree_node *parent; void *private_data; } ; 58 union __anonunion_ldv_29782_211 { struct __anonstruct_ldv_29780_212 ldv_29780; struct callback_head callback_head; } ; 58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion_ldv_29782_211 ldv_29782; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 30 struct block_device ; 59 struct export_operations ; 61 struct iovec ; 62 struct kiocb ; 63 struct poll_table_struct ; 64 struct kstatfs ; 65 struct swap_info_struct ; 66 struct iov_iter ; 69 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 253 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ; 76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ; 151 typedef struct fs_qfilestat fs_qfilestat_t; 152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ; 166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ; 196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ; 212 struct dquot ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_213 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_213 kprojid_t; 119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ; 152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 60 typedef long long qsize_t; 61 union __anonunion_ldv_30311_214 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 61 struct kqid { union __anonunion_ldv_30311_214 ldv_30311; enum quota_type type; } ; 178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ; 200 struct quota_format_type ; 201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ; 264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ; 302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ; 316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 334 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 380 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ; 323 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t ); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 382 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 405 struct request_queue ; 406 struct hd_struct ; 406 struct gendisk ; 406 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 503 struct posix_acl ; 504 struct inode_operations ; 504 union __anonunion_ldv_30736_217 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 504 union __anonunion_ldv_30756_218 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 504 struct file_lock ; 504 struct cdev ; 504 union __anonunion_ldv_30773_219 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ; 504 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion_ldv_30736_217 ldv_30736; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion_ldv_30756_218 ldv_30756; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion_ldv_30773_219 ldv_30773; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ; 740 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 748 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 771 union __anonunion_f_u_220 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 771 struct file { union __anonunion_f_u_220 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 861 typedef void *fl_owner_t; 862 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 867 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ; 885 struct nlm_lockowner ; 886 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_222 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_221 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_222 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_221 fl_u; } ; 988 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1182 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ; 1198 struct super_operations ; 1198 struct xattr_handler ; 1198 struct mtd_info ; 1198 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ; 1429 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1467 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1472 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ; 1514 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1561 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ; 1775 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ; 163 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ; 34 struct media_file_operations { struct module *owner; ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*open)(struct file *); int (*release)(struct file *); } ; 53 struct media_devnode { const struct media_file_operations *fops; struct device dev; struct cdev cdev; struct device *parent; int minor; unsigned long flags; void (*release)(struct media_devnode *); } ; 129 struct media_pipeline { } ; 132 struct media_pad ; 132 struct media_link { struct media_pad *source; struct media_pad *sink; struct media_link *reverse; unsigned long flags; } ; 40 struct media_entity ; 40 struct media_pad { struct media_entity *entity; u16 index; unsigned long flags; } ; 46 struct media_entity_operations { int (*link_setup)(struct media_entity *, const struct media_pad *, const struct media_pad *, u32 ); int (*link_validate)(struct media_link *); } ; 53 struct media_device ; 53 struct __anonstruct_v4l_229 { u32 major; u32 minor; } ; 53 struct __anonstruct_fb_230 { u32 major; u32 minor; } ; 53 struct __anonstruct_alsa_231 { u32 card; u32 device; u32 subdevice; } ; 53 union __anonunion_info_228 { struct __anonstruct_v4l_229 v4l; struct __anonstruct_fb_230 fb; struct __anonstruct_alsa_231 alsa; int dvb; } ; 53 struct media_entity { struct list_head list; struct media_device *parent; u32 id; const char *name; u32 type; u32 revision; unsigned long flags; u32 group_id; u16 num_pads; u16 num_links; u16 num_backlinks; u16 max_links; struct media_pad *pads; struct media_link *links; const struct media_entity_operations *ops; int stream_count; int use_count; struct media_pipeline *pipe; union __anonunion_info_228 info; } ; 155 struct media_device { struct device *dev; struct media_devnode devnode; char model[32U]; char serial[40U]; char bus_info[32U]; u32 hw_revision; u32 driver_version; u32 entity_id; struct list_head entities; spinlock_t lock; struct mutex graph_mutex; int (*link_notify)(struct media_link *, u32 , unsigned int); } ; 98 enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_FIXED = 1, V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 4097, V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 4098, V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 4099, V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 4100, V4L2_MBUS_FMT_BGR565_2X8_BE = 4101, V4L2_MBUS_FMT_BGR565_2X8_LE = 4102, V4L2_MBUS_FMT_RGB565_2X8_BE = 4103, V4L2_MBUS_FMT_RGB565_2X8_LE = 4104, V4L2_MBUS_FMT_RGB666_1X18 = 4105, V4L2_MBUS_FMT_RGB888_1X24 = 4106, V4L2_MBUS_FMT_RGB888_2X12_BE = 4107, V4L2_MBUS_FMT_RGB888_2X12_LE = 4108, V4L2_MBUS_FMT_ARGB8888_1X32 = 4109, V4L2_MBUS_FMT_Y8_1X8 = 8193, V4L2_MBUS_FMT_UV8_1X8 = 8213, V4L2_MBUS_FMT_UYVY8_1_5X8 = 8194, V4L2_MBUS_FMT_VYUY8_1_5X8 = 8195, V4L2_MBUS_FMT_YUYV8_1_5X8 = 8196, V4L2_MBUS_FMT_YVYU8_1_5X8 = 8197, V4L2_MBUS_FMT_UYVY8_2X8 = 8198, V4L2_MBUS_FMT_VYUY8_2X8 = 8199, V4L2_MBUS_FMT_YUYV8_2X8 = 8200, V4L2_MBUS_FMT_YVYU8_2X8 = 8201, V4L2_MBUS_FMT_Y10_1X10 = 8202, V4L2_MBUS_FMT_UYVY10_2X10 = 8216, V4L2_MBUS_FMT_VYUY10_2X10 = 8217, V4L2_MBUS_FMT_YUYV10_2X10 = 8203, V4L2_MBUS_FMT_YVYU10_2X10 = 8204, V4L2_MBUS_FMT_Y12_1X12 = 8211, V4L2_MBUS_FMT_UYVY8_1X16 = 8207, V4L2_MBUS_FMT_VYUY8_1X16 = 8208, V4L2_MBUS_FMT_YUYV8_1X16 = 8209, V4L2_MBUS_FMT_YVYU8_1X16 = 8210, V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 8212, V4L2_MBUS_FMT_UYVY10_1X20 = 8218, V4L2_MBUS_FMT_VYUY10_1X20 = 8219, V4L2_MBUS_FMT_YUYV10_1X20 = 8205, V4L2_MBUS_FMT_YVYU10_1X20 = 8206, V4L2_MBUS_FMT_YUV10_1X30 = 8214, V4L2_MBUS_FMT_AYUV8_1X32 = 8215, V4L2_MBUS_FMT_UYVY12_2X12 = 8220, V4L2_MBUS_FMT_VYUY12_2X12 = 8221, V4L2_MBUS_FMT_YUYV12_2X12 = 8222, V4L2_MBUS_FMT_YVYU12_2X12 = 8223, V4L2_MBUS_FMT_UYVY12_1X24 = 8224, V4L2_MBUS_FMT_VYUY12_1X24 = 8225, V4L2_MBUS_FMT_YUYV12_1X24 = 8226, V4L2_MBUS_FMT_YVYU12_1X24 = 8227, V4L2_MBUS_FMT_SBGGR8_1X8 = 12289, V4L2_MBUS_FMT_SGBRG8_1X8 = 12307, V4L2_MBUS_FMT_SGRBG8_1X8 = 12290, V4L2_MBUS_FMT_SRGGB8_1X8 = 12308, V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 12309, V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 12310, V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 12311, V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 12312, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 12299, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 12300, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 12297, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 12301, V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 12291, V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 12292, V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 12293, V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 12294, V4L2_MBUS_FMT_SBGGR10_1X10 = 12295, V4L2_MBUS_FMT_SGBRG10_1X10 = 12302, V4L2_MBUS_FMT_SGRBG10_1X10 = 12298, V4L2_MBUS_FMT_SRGGB10_1X10 = 12303, V4L2_MBUS_FMT_SBGGR12_1X12 = 12296, V4L2_MBUS_FMT_SGBRG12_1X12 = 12304, V4L2_MBUS_FMT_SGRBG12_1X12 = 12305, V4L2_MBUS_FMT_SRGGB12_1X12 = 12306, V4L2_MBUS_FMT_JPEG_1X8 = 16385, V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 20481, V4L2_MBUS_FMT_AHSV8888_1X32 = 24577 } ; 177 struct v4l2_mbus_framefmt { __u32 width; __u32 height; __u32 code; __u32 field; __u32 colorspace; __u32 reserved[7U]; } ; 151 struct v4l2_subdev_format { __u32 which; __u32 pad; struct v4l2_mbus_framefmt format; __u32 reserved[8U]; } ; 53 struct v4l2_subdev_crop { __u32 which; __u32 pad; struct v4l2_rect rect; __u32 reserved[8U]; } ; 66 struct v4l2_subdev_mbus_code_enum { __u32 pad; __u32 index; __u32 code; __u32 reserved[9U]; } ; 79 struct v4l2_subdev_frame_size_enum { __u32 index; __u32 pad; __u32 code; __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; __u32 reserved[9U]; } ; 96 struct v4l2_subdev_frame_interval { __u32 pad; struct v4l2_fract interval; __u32 reserved[9U]; } ; 107 struct v4l2_subdev_frame_interval_enum { __u32 index; __u32 pad; __u32 code; __u32 width; __u32 height; struct v4l2_fract interval; __u32 reserved[9U]; } ; 126 struct v4l2_subdev_selection { __u32 which; __u32 pad; __u32 target; __u32 flags; struct v4l2_rect r; __u32 reserved[8U]; } ; 150 struct v4l2_device ; 151 struct v4l2_subdev ; 152 struct v4l2_async_notifier ; 153 enum v4l2_async_match_type { V4L2_ASYNC_MATCH_CUSTOM = 0, V4L2_ASYNC_MATCH_DEVNAME = 1, V4L2_ASYNC_MATCH_I2C = 2, V4L2_ASYNC_MATCH_OF = 3 } ; 160 struct __anonstruct_of_234 { const struct device_node *node; } ; 160 struct __anonstruct_device_name_235 { const char *name; } ; 160 struct __anonstruct_i2c_236 { int adapter_id; unsigned short address; } ; 160 struct __anonstruct_custom_237 { bool (*match)(struct device *, struct v4l2_async_subdev *); void *priv; } ; 160 union __anonunion_match_233 { struct __anonstruct_of_234 of; struct __anonstruct_device_name_235 device_name; struct __anonstruct_i2c_236 i2c; struct __anonstruct_custom_237 custom; } ; 160 struct v4l2_async_subdev { enum v4l2_async_match_type match_type; union __anonunion_match_233 match; struct list_head list; } ; 63 struct v4l2_async_notifier { unsigned int num_subdevs; struct v4l2_async_subdev **subdevs; struct v4l2_device *v4l2_dev; struct list_head waiting; struct list_head done; struct list_head list; int (*bound)(struct v4l2_async_notifier *, struct v4l2_subdev *, struct v4l2_async_subdev *); int (*complete)(struct v4l2_async_notifier *); void (*unbind)(struct v4l2_async_notifier *, struct v4l2_subdev *, struct v4l2_async_subdev *); } ; 98 struct video_device ; 99 struct v4l2_ctrl_handler ; 100 struct v4l2_prio_state { atomic_t prios[4U]; } ; 61 struct v4l2_file_operations { struct module *owner; ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*ioctl)(struct file *, unsigned int, unsigned long); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl32)(struct file *, unsigned int, unsigned long); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct file *); int (*release)(struct file *); } ; 79 struct v4l2_ioctl_ops ; 79 struct video_device { struct media_entity entity; const struct v4l2_file_operations *fops; struct device dev; struct cdev *cdev; struct v4l2_device *v4l2_dev; struct device *dev_parent; struct v4l2_ctrl_handler *ctrl_handler; struct vb2_queue *queue; struct v4l2_prio_state *prio; char name[32U]; int vfl_type; int vfl_dir; int minor; u16 num; unsigned long flags; int index; spinlock_t fh_lock; struct list_head fh_list; int debug; v4l2_std_id tvnorms; void (*release)(struct video_device *); const struct v4l2_ioctl_ops *ioctl_ops; unsigned long valid_ioctls[3U]; unsigned long disable_locking[3U]; struct mutex *lock; } ; 101 struct v4l2_subdev_ops ; 162 struct v4l2_priv_tun_config { int tuner; void *priv; } ; 206 struct v4l2_m2m_ctx ; 206 struct v4l2_fh { struct list_head list; struct video_device *vdev; struct v4l2_ctrl_handler *ctrl_handler; enum v4l2_priority prio; wait_queue_head_t wait; struct list_head subscribed; struct list_head available; unsigned int navailable; u32 sequence; struct v4l2_m2m_ctx *m2m_ctx; } ; 106 enum v4l2_mbus_type { V4L2_MBUS_PARALLEL = 0, V4L2_MBUS_BT656 = 1, V4L2_MBUS_CSI2 = 2 } ; 112 struct v4l2_mbus_config { enum v4l2_mbus_type type; unsigned int flags; } ; 109 struct v4l2_subdev_fh ; 110 struct tuner_setup ; 111 struct v4l2_mbus_frame_desc ; 112 struct v4l2_decode_vbi_line { u32 is_second_field; u8 *p; u32 line; u32 type; } ; 61 struct v4l2_subdev_io_pin_config { u32 flags; u8 pin; u8 function; u8 value; u8 strength; } ; 117 struct v4l2_subdev_core_ops { int (*log_status)(struct v4l2_subdev *); int (*s_io_pin_config)(struct v4l2_subdev *, size_t , struct v4l2_subdev_io_pin_config *); int (*init)(struct v4l2_subdev *, u32 ); int (*load_fw)(struct v4l2_subdev *); int (*reset)(struct v4l2_subdev *, u32 ); int (*s_gpio)(struct v4l2_subdev *, u32 ); int (*queryctrl)(struct v4l2_subdev *, struct v4l2_queryctrl *); int (*g_ctrl)(struct v4l2_subdev *, struct v4l2_control *); int (*s_ctrl)(struct v4l2_subdev *, struct v4l2_control *); int (*g_ext_ctrls)(struct v4l2_subdev *, struct v4l2_ext_controls *); int (*s_ext_ctrls)(struct v4l2_subdev *, struct v4l2_ext_controls *); int (*try_ext_ctrls)(struct v4l2_subdev *, struct v4l2_ext_controls *); int (*querymenu)(struct v4l2_subdev *, struct v4l2_querymenu *); long int (*ioctl)(struct v4l2_subdev *, unsigned int, void *); long int (*compat_ioctl32)(struct v4l2_subdev *, unsigned int, unsigned long); int (*g_register)(struct v4l2_subdev *, struct v4l2_dbg_register *); int (*s_register)(struct v4l2_subdev *, const struct v4l2_dbg_register *); int (*s_power)(struct v4l2_subdev *, int); int (*interrupt_service_routine)(struct v4l2_subdev *, u32 , bool *); int (*subscribe_event)(struct v4l2_subdev *, struct v4l2_fh *, struct v4l2_event_subscription *); int (*unsubscribe_event)(struct v4l2_subdev *, struct v4l2_fh *, struct v4l2_event_subscription *); } ; 178 struct v4l2_subdev_tuner_ops { int (*s_radio)(struct v4l2_subdev *); int (*s_frequency)(struct v4l2_subdev *, const struct v4l2_frequency *); int (*g_frequency)(struct v4l2_subdev *, struct v4l2_frequency *); int (*enum_freq_bands)(struct v4l2_subdev *, struct v4l2_frequency_band *); int (*g_tuner)(struct v4l2_subdev *, struct v4l2_tuner *); int (*s_tuner)(struct v4l2_subdev *, const struct v4l2_tuner *); int (*g_modulator)(struct v4l2_subdev *, struct v4l2_modulator *); int (*s_modulator)(struct v4l2_subdev *, const struct v4l2_modulator *); int (*s_type_addr)(struct v4l2_subdev *, struct tuner_setup *); int (*s_config)(struct v4l2_subdev *, const struct v4l2_priv_tun_config *); } ; 205 struct v4l2_subdev_audio_ops { int (*s_clock_freq)(struct v4l2_subdev *, u32 ); int (*s_i2s_clock_freq)(struct v4l2_subdev *, u32 ); int (*s_routing)(struct v4l2_subdev *, u32 , u32 , u32 ); int (*s_stream)(struct v4l2_subdev *, int); } ; 232 struct v4l2_mbus_frame_desc_entry { u16 flags; u32 pixelcode; u32 length; } ; 253 struct v4l2_mbus_frame_desc { struct v4l2_mbus_frame_desc_entry entry[4U]; unsigned short num_entries; } ; 265 struct v4l2_subdev_video_ops { int (*s_routing)(struct v4l2_subdev *, u32 , u32 , u32 ); int (*s_crystal_freq)(struct v4l2_subdev *, u32 , u32 ); int (*g_std)(struct v4l2_subdev *, v4l2_std_id *); int (*s_std)(struct v4l2_subdev *, v4l2_std_id ); int (*s_std_output)(struct v4l2_subdev *, v4l2_std_id ); int (*g_std_output)(struct v4l2_subdev *, v4l2_std_id *); int (*querystd)(struct v4l2_subdev *, v4l2_std_id *); int (*g_tvnorms)(struct v4l2_subdev *, v4l2_std_id *); int (*g_tvnorms_output)(struct v4l2_subdev *, v4l2_std_id *); int (*g_input_status)(struct v4l2_subdev *, u32 *); int (*s_stream)(struct v4l2_subdev *, int); int (*cropcap)(struct v4l2_subdev *, struct v4l2_cropcap *); int (*g_crop)(struct v4l2_subdev *, struct v4l2_crop *); int (*s_crop)(struct v4l2_subdev *, const struct v4l2_crop *); int (*g_parm)(struct v4l2_subdev *, struct v4l2_streamparm *); int (*s_parm)(struct v4l2_subdev *, struct v4l2_streamparm *); int (*g_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_frame_interval *); int (*s_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_frame_interval *); int (*enum_framesizes)(struct v4l2_subdev *, struct v4l2_frmsizeenum *); int (*enum_frameintervals)(struct v4l2_subdev *, struct v4l2_frmivalenum *); int (*s_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*g_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*query_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*enum_mbus_fmt)(struct v4l2_subdev *, unsigned int, enum v4l2_mbus_pixelcode *); int (*enum_mbus_fsizes)(struct v4l2_subdev *, struct v4l2_frmsizeenum *); int (*g_mbus_fmt)(struct v4l2_subdev *, struct v4l2_mbus_framefmt *); int (*try_mbus_fmt)(struct v4l2_subdev *, struct v4l2_mbus_framefmt *); int (*s_mbus_fmt)(struct v4l2_subdev *, struct v4l2_mbus_framefmt *); int (*g_mbus_config)(struct v4l2_subdev *, struct v4l2_mbus_config *); int (*s_mbus_config)(struct v4l2_subdev *, const struct v4l2_mbus_config *); int (*s_rx_buffer)(struct v4l2_subdev *, void *, unsigned int *); } ; 359 struct v4l2_subdev_vbi_ops { int (*decode_vbi_line)(struct v4l2_subdev *, struct v4l2_decode_vbi_line *); int (*s_vbi_data)(struct v4l2_subdev *, const struct v4l2_sliced_vbi_data *); int (*g_vbi_data)(struct v4l2_subdev *, struct v4l2_sliced_vbi_data *); int (*g_sliced_vbi_cap)(struct v4l2_subdev *, struct v4l2_sliced_vbi_cap *); int (*s_raw_fmt)(struct v4l2_subdev *, struct v4l2_vbi_format *); int (*g_sliced_fmt)(struct v4l2_subdev *, struct v4l2_sliced_vbi_format *); int (*s_sliced_fmt)(struct v4l2_subdev *, struct v4l2_sliced_vbi_format *); } ; 399 struct v4l2_subdev_sensor_ops { int (*g_skip_top_lines)(struct v4l2_subdev *, u32 *); int (*g_skip_frames)(struct v4l2_subdev *, u32 *); } ; 414 enum v4l2_subdev_ir_mode { V4L2_SUBDEV_IR_MODE_PULSE_WIDTH = 0 } ; 418 struct v4l2_subdev_ir_parameters { unsigned int bytes_per_data_element; enum v4l2_subdev_ir_mode mode; bool enable; bool interrupt_enable; bool shutdown; bool modulation; u32 max_pulse_width; unsigned int carrier_freq; unsigned int duty_cycle; bool invert_level; bool invert_carrier_sense; u32 noise_filter_min_width; unsigned int carrier_range_lower; unsigned int carrier_range_upper; u32 resolution; } ; 466 struct v4l2_subdev_ir_ops { int (*rx_read)(struct v4l2_subdev *, u8 *, size_t , ssize_t *); int (*rx_g_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*rx_s_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*tx_write)(struct v4l2_subdev *, u8 *, size_t , ssize_t *); int (*tx_g_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*tx_s_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); } ; 485 struct v4l2_subdev_pad_ops { int (*enum_mbus_code)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_mbus_code_enum *); int (*enum_frame_size)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_frame_size_enum *); int (*enum_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_frame_interval_enum *); int (*get_fmt)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_format *); int (*set_fmt)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_format *); int (*set_crop)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_crop *); int (*get_crop)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_crop *); int (*get_selection)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_selection *); int (*set_selection)(struct v4l2_subdev *, struct v4l2_subdev_fh *, struct v4l2_subdev_selection *); int (*get_edid)(struct v4l2_subdev *, struct v4l2_edid *); int (*set_edid)(struct v4l2_subdev *, struct v4l2_edid *); int (*dv_timings_cap)(struct v4l2_subdev *, struct v4l2_dv_timings_cap *); int (*enum_dv_timings)(struct v4l2_subdev *, struct v4l2_enum_dv_timings *); int (*link_validate)(struct v4l2_subdev *, struct media_link *, struct v4l2_subdev_format *, struct v4l2_subdev_format *); int (*get_frame_desc)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_frame_desc *); int (*set_frame_desc)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_frame_desc *); } ; 529 struct v4l2_subdev_ops { const struct v4l2_subdev_core_ops *core; const struct v4l2_subdev_tuner_ops *tuner; const struct v4l2_subdev_audio_ops *audio; const struct v4l2_subdev_video_ops *video; const struct v4l2_subdev_vbi_ops *vbi; const struct v4l2_subdev_ir_ops *ir; const struct v4l2_subdev_sensor_ops *sensor; const struct v4l2_subdev_pad_ops *pad; } ; 541 struct v4l2_subdev_internal_ops { int (*registered)(struct v4l2_subdev *); void (*unregistered)(struct v4l2_subdev *); int (*open)(struct v4l2_subdev *, struct v4l2_subdev_fh *); int (*close)(struct v4l2_subdev *, struct v4l2_subdev_fh *); } ; 562 struct regulator_bulk_data ; 563 struct v4l2_subdev_platform_data { struct regulator_bulk_data *regulators; int num_regulators; void *host_priv; } ; 584 struct v4l2_subdev { struct media_entity entity; struct list_head list; struct module *owner; bool owner_v4l2_dev; u32 flags; struct v4l2_device *v4l2_dev; const struct v4l2_subdev_ops *ops; const struct v4l2_subdev_internal_ops *internal_ops; struct v4l2_ctrl_handler *ctrl_handler; char name[32U]; u32 grp_id; void *dev_priv; void *host_priv; struct video_device *devnode; struct device *dev; struct list_head async_list; struct v4l2_async_subdev *asd; struct v4l2_async_notifier *notifier; struct v4l2_subdev_platform_data *pdata; } ; 622 struct __anonstruct_pad_238 { struct v4l2_mbus_framefmt try_fmt; struct v4l2_rect try_crop; struct v4l2_rect try_compose; } ; 622 struct v4l2_subdev_fh { struct v4l2_fh vfh; struct __anonstruct_pad_238 *pad; } ; 691 struct v4l2_device { struct device *dev; struct media_device *mdev; struct list_head subdevs; spinlock_t lock; char name[36U]; void (*notify)(struct v4l2_subdev *, unsigned int, void *); struct v4l2_ctrl_handler *ctrl_handler; struct v4l2_prio_state prio; struct mutex ioctl_lock; struct kref ref; void (*release)(struct v4l2_device *); } ; 54 struct v4l2_ctrl_helper ; 55 struct v4l2_ctrl ; 57 union v4l2_ctrl_ptr { s32 *p_s32; s64 *p_s64; u8 *p_u8; u16 *p_u16; u32 *p_u32; char *p_char; void *p; } ; 57 struct v4l2_ctrl_ops { int (*g_volatile_ctrl)(struct v4l2_ctrl *); int (*try_ctrl)(struct v4l2_ctrl *); int (*s_ctrl)(struct v4l2_ctrl *); } ; 75 struct v4l2_ctrl_type_ops { bool (*equal)(const struct v4l2_ctrl *, u32 , union v4l2_ctrl_ptr , union v4l2_ctrl_ptr ); void (*init)(const struct v4l2_ctrl *, u32 , union v4l2_ctrl_ptr ); void (*log)(const struct v4l2_ctrl *); int (*validate)(const struct v4l2_ctrl *, u32 , union v4l2_ctrl_ptr ); } ; 94 union __anonunion_ldv_34579_239 { u64 step; u64 menu_skip_mask; } ; 94 union __anonunion_ldv_34583_240 { const const char **qmenu; const s64 *qmenu_int; } ; 94 struct __anonstruct_cur_241 { s32 val; } ; 94 struct v4l2_ctrl { struct list_head node; struct list_head ev_subs; struct v4l2_ctrl_handler *handler; struct v4l2_ctrl **cluster; unsigned int ncontrols; unsigned char done; unsigned char is_new; unsigned char has_changed; unsigned char is_private; unsigned char is_auto; unsigned char is_int; unsigned char is_string; unsigned char is_ptr; unsigned char is_array; unsigned char has_volatiles; unsigned char call_notify; unsigned char manual_mode_value; const struct v4l2_ctrl_ops *ops; const struct v4l2_ctrl_type_ops *type_ops; u32 id; const char *name; enum v4l2_ctrl_type type; s64 minimum; s64 maximum; s64 default_value; u32 elems; u32 elem_size; u32 dims[4U]; u32 nr_of_dims; union __anonunion_ldv_34579_239 ldv_34579; union __anonunion_ldv_34583_240 ldv_34583; unsigned long flags; void *priv; s32 val; struct __anonstruct_cur_241 cur; union v4l2_ctrl_ptr p_new; union v4l2_ctrl_ptr p_cur; } ; 212 struct v4l2_ctrl_ref { struct list_head node; struct v4l2_ctrl_ref *next; struct v4l2_ctrl *ctrl; struct v4l2_ctrl_helper *helper; } ; 229 struct v4l2_ctrl_handler { struct mutex _lock; struct mutex *lock; struct list_head ctrls; struct list_head ctrl_refs; struct v4l2_ctrl_ref *cached; struct v4l2_ctrl_ref **buckets; void (*notify)(struct v4l2_ctrl *, void *); void *notify_priv; u16 nr_of_buckets; int error; } ; 76 struct fence ; 77 struct fence_ops ; 78 struct fence_cb ; 79 struct fence { struct kref refcount; const struct fence_ops *ops; struct callback_head rcu; struct list_head cb_list; spinlock_t *lock; unsigned int context; unsigned int seqno; unsigned long flags; ktime_t timestamp; int status; } ; 91 struct fence_cb { struct list_head node; void (*func)(struct fence *, struct fence_cb *); } ; 104 struct fence_ops { const char * (*get_driver_name)(struct fence *); const char * (*get_timeline_name)(struct fence *); bool (*enable_signaling)(struct fence *); bool (*signaled)(struct fence *); long int (*wait)(struct fence *, bool , long); void (*release)(struct fence *); int (*fill_driver_data)(struct fence *, void *, int); void (*fence_value_str)(struct fence *, char *, int); void (*timeline_value_str)(struct fence *, char *, int); } ; 337 struct dma_buf ; 338 struct dma_buf_attachment ; 339 struct dma_buf_ops { int (*attach)(struct dma_buf *, struct device *, struct dma_buf_attachment *); void (*detach)(struct dma_buf *, struct dma_buf_attachment *); struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction ); void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction ); void (*release)(struct dma_buf *); int (*begin_cpu_access)(struct dma_buf *, size_t , size_t , enum dma_data_direction ); void (*end_cpu_access)(struct dma_buf *, size_t , size_t , enum dma_data_direction ); void * (*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void * (*kmap)(struct dma_buf *, unsigned long); void (*kunmap)(struct dma_buf *, unsigned long, void *); int (*mmap)(struct dma_buf *, struct vm_area_struct *); void * (*vmap)(struct dma_buf *); void (*vunmap)(struct dma_buf *, void *); } ; 110 struct dma_buf_poll_cb_t { struct fence_cb cb; wait_queue_head_t *poll; unsigned long active; } ; 145 struct reservation_object ; 145 struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; struct mutex lock; unsigned int vmapping_counter; void *vmap_ptr; const char *exp_name; struct list_head list_node; void *priv; struct reservation_object *resv; wait_queue_head_t poll; struct dma_buf_poll_cb_t cb_excl; struct dma_buf_poll_cb_t cb_shared; } ; 146 struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; void *priv; } ; 213 struct vb2_alloc_ctx ; 214 struct vb2_fileio_data ; 215 struct vb2_threadio_data ; 216 struct vb2_mem_ops { void * (*alloc)(void *, unsigned long, gfp_t ); void (*put)(void *); struct dma_buf * (*get_dmabuf)(void *, unsigned long); void * (*get_userptr)(void *, unsigned long, unsigned long, int); void (*put_userptr)(void *); void (*prepare)(void *); void (*finish)(void *); void * (*attach_dmabuf)(void *, struct dma_buf *, unsigned long, int); void (*detach_dmabuf)(void *); int (*map_dmabuf)(void *); void (*unmap_dmabuf)(void *); void * (*vaddr)(void *); void * (*cookie)(void *); unsigned int (*num_users)(void *); int (*mmap)(void *, struct vm_area_struct *); } ; 109 struct vb2_plane { void *mem_priv; struct dma_buf *dbuf; unsigned int dbuf_mapped; } ; 128 enum vb2_buffer_state { VB2_BUF_STATE_DEQUEUED = 0, VB2_BUF_STATE_PREPARING = 1, VB2_BUF_STATE_PREPARED = 2, VB2_BUF_STATE_QUEUED = 3, VB2_BUF_STATE_ACTIVE = 4, VB2_BUF_STATE_DONE = 5, VB2_BUF_STATE_ERROR = 6 } ; 138 struct vb2_buffer { struct v4l2_buffer v4l2_buf; struct v4l2_plane v4l2_planes[8U]; struct vb2_queue *vb2_queue; unsigned int num_planes; enum vb2_buffer_state state; struct list_head queued_entry; struct list_head done_entry; struct vb2_plane planes[8U]; u32 cnt_mem_alloc; u32 cnt_mem_put; u32 cnt_mem_get_dmabuf; u32 cnt_mem_get_userptr; u32 cnt_mem_put_userptr; u32 cnt_mem_prepare; u32 cnt_mem_finish; u32 cnt_mem_attach_dmabuf; u32 cnt_mem_detach_dmabuf; u32 cnt_mem_map_dmabuf; u32 cnt_mem_unmap_dmabuf; u32 cnt_mem_vaddr; u32 cnt_mem_cookie; u32 cnt_mem_num_users; u32 cnt_mem_mmap; u32 cnt_buf_init; u32 cnt_buf_prepare; u32 cnt_buf_finish; u32 cnt_buf_cleanup; u32 cnt_buf_queue; u32 cnt_buf_done; } ; 238 struct vb2_ops { int (*queue_setup)(struct vb2_queue *, const struct v4l2_format *, unsigned int *, unsigned int *, unsigned int *, void **); void (*wait_prepare)(struct vb2_queue *); void (*wait_finish)(struct vb2_queue *); int (*buf_init)(struct vb2_buffer *); int (*buf_prepare)(struct vb2_buffer *); void (*buf_finish)(struct vb2_buffer *); void (*buf_cleanup)(struct vb2_buffer *); int (*start_streaming)(struct vb2_queue *, unsigned int); void (*stop_streaming)(struct vb2_queue *); void (*buf_queue)(struct vb2_buffer *); } ; 335 struct vb2_queue { enum v4l2_buf_type type; unsigned int io_modes; unsigned int io_flags; struct mutex *lock; struct v4l2_fh *owner; const struct vb2_ops *ops; const struct vb2_mem_ops *mem_ops; void *drv_priv; unsigned int buf_struct_size; u32 timestamp_flags; gfp_t gfp_flags; u32 min_buffers_needed; enum v4l2_memory memory; struct vb2_buffer *bufs[32U]; unsigned int num_buffers; struct list_head queued_list; unsigned int queued_count; atomic_t owned_by_drv_count; struct list_head done_list; spinlock_t done_lock; wait_queue_head_t done_wq; void *alloc_ctx[8U]; unsigned int plane_sizes[8U]; unsigned char streaming; unsigned char start_streaming_called; unsigned char error; struct vb2_fileio_data *fileio; struct vb2_threadio_data *threadio; u32 cnt_queue_setup; u32 cnt_wait_prepare; u32 cnt_wait_finish; u32 cnt_start_streaming; u32 cnt_stop_streaming; } ; 634 enum mcam_state { S_NOTREADY = 0, S_IDLE = 1, S_FLAKED = 2, S_STREAMING = 3, S_BUFWAIT = 4 } ; 642 enum mcam_buffer_mode { B_vmalloc = 0, B_DMA_contig = 1, B_DMA_sg = 2 } ; 648 enum mcam_chip_id { MCAM_CAFE = 0, MCAM_ARMADA610 = 1 } ; 81 struct mcam_frame_state { unsigned int frames; unsigned int singles; unsigned int delivered; } ; 90 struct clk ; 90 struct mcam_vb_buffer ; 90 struct mcam_camera { struct i2c_adapter *i2c_adapter; unsigned char *regs; unsigned int regs_size; spinlock_t dev_lock; struct device *dev; enum mcam_chip_id chip_id; short clock_speed; short use_smbus; enum mcam_buffer_mode buffer_mode; int mclk_min; int mclk_src; int mclk_div; int ccic_id; enum v4l2_mbus_type bus_type; int *dphy; bool mipi_enabled; int lane; struct clk *clk[3U]; int (*plat_power_up)(struct mcam_camera *); void (*plat_power_down)(struct mcam_camera *); void (*calc_dphy)(struct mcam_camera *); void (*ctlr_reset)(struct mcam_camera *); struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler ctrl_handler; enum mcam_state state; unsigned long flags; int users; struct mcam_frame_state frame_state; struct video_device vdev; struct v4l2_subdev *sensor; unsigned short sensor_addr; struct vb2_queue vb_queue; struct list_head buffers; unsigned int nbufs; int next_buf; unsigned int dma_buf_size; void *dma_bufs[3U]; dma_addr_t dma_handles[3U]; struct tasklet_struct s_tasklet; unsigned int sequence; unsigned int buf_seq[3U]; struct mcam_vb_buffer *vb_bufs[3U]; struct vb2_alloc_ctx *vb_alloc_ctx; void (*dma_setup)(struct mcam_camera *); void (*frame_complete)(struct mcam_camera *, int); struct v4l2_pix_format pix_format; enum v4l2_mbus_pixelcode mbus_code; struct mutex s_mutex; } ; 105 struct cafe_camera { int registered; struct mcam_camera mcam; struct pci_dev *pdev; wait_queue_head_t smbus_wait; } ; 1007 typedef int ldv_func_ret_type; 395 struct paravirt_callee_save { void *func; } ; 196 struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save restore_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; void (*safe_halt)(); void (*halt)(); void (*adjust_exception_frame)(); } ; 222 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 42 struct i2c_board_info ; 248 struct i2c_board_info { char type[20U]; unsigned short flags; unsigned short addr; void *platform_data; struct dev_archdata *archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; int irq; } ; 225 struct v4l2_capability { __u8 driver[16U]; __u8 card[32U]; __u8 bus_info[32U]; __u32 version; __u32 capabilities; __u32 device_caps; __u32 reserved[3U]; } ; 297 struct v4l2_fmtdesc { __u32 index; __u32 type; __u32 flags; __u8 description[32U]; __u32 pixelformat; __u32 reserved[4U]; } ; 562 struct v4l2_jpegcompression { int quality; int APPn; int APP_len; char APP_data[60U]; int COM_len; char COM_data[60U]; __u32 jpeg_markers; } ; 591 struct v4l2_requestbuffers { __u32 count; __u32 type; __u32 memory; __u32 reserved[2U]; } ; 701 struct v4l2_exportbuffer { __u32 type; __u32 index; __u32 plane; __u32 flags; __s32 fd; __u32 reserved[11U]; } ; 760 struct __anonstruct_fmt_205 { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 bytesperline; __u32 sizeimage; __u32 colorspace; __u32 priv; } ; 760 struct v4l2_framebuffer { __u32 capability; __u32 flags; void *base; struct __anonstruct_fmt_205 fmt; } ; 853 struct v4l2_selection { __u32 type; __u32 target; __u32 flags; struct v4l2_rect r; __u32 reserved[9U]; } ; 1200 struct v4l2_input { __u32 index; __u8 name[32U]; __u32 type; __u32 audioset; __u32 tuner; v4l2_std_id std; __u32 status; __u32 capabilities; __u32 reserved[3U]; } ; 1216 struct v4l2_output { __u32 index; __u8 name[32U]; __u32 type; __u32 audioset; __u32 modulator; v4l2_std_id std; __u32 capabilities; __u32 reserved[3U]; } ; 1339 struct v4l2_query_ext_ctrl { __u32 id; __u32 type; char name[32U]; __s64 minimum; __s64 maximum; __u64 step; __s64 default_value; __u32 flags; __u32 elem_size; __u32 elems; __u32 nr_of_dims; __u32 dims[4U]; __u32 reserved[32U]; } ; 1469 struct v4l2_hw_freq_seek { __u32 tuner; __u32 type; __u32 seek_upward; __u32 wrap_around; __u32 spacing; __u32 rangelow; __u32 rangehigh; __u32 reserved[5U]; } ; 1490 struct v4l2_audio { __u32 index; __u8 name[32U]; __u32 capability; __u32 mode; __u32 reserved[2U]; } ; 1512 struct v4l2_audioout { __u32 index; __u8 name[32U]; __u32 capability; __u32 mode; __u32 reserved[2U]; } ; 1527 struct v4l2_enc_idx_entry { __u64 offset; __u64 pts; __u32 length; __u32 flags; __u32 reserved[2U]; } ; 1546 struct v4l2_enc_idx { __u32 entries; __u32 entries_cap; __u32 reserved[4U]; struct v4l2_enc_idx_entry entry[64U]; } ; 1554 struct __anonstruct_raw_211 { __u32 data[8U]; } ; 1554 union __anonunion_ldv_31208_210 { struct __anonstruct_raw_211 raw; } ; 1554 struct v4l2_encoder_cmd { __u32 cmd; __u32 flags; union __anonunion_ldv_31208_210 ldv_31208; } ; 1573 struct __anonstruct_stop_213 { __u64 pts; } ; 1573 struct __anonstruct_start_214 { __s32 speed; __u32 format; } ; 1573 struct __anonstruct_raw_215 { __u32 data[16U]; } ; 1573 union __anonunion_ldv_31223_212 { struct __anonstruct_stop_213 stop; struct __anonstruct_start_214 start; struct __anonstruct_raw_215 raw; } ; 1573 struct v4l2_decoder_cmd { __u32 cmd; __u32 flags; union __anonunion_ldv_31223_212 ldv_31223; } ; 1958 struct v4l2_dbg_chip_info { struct v4l2_dbg_match match; char name[32U]; __u32 flags; __u32 reserved[32U]; } ; 1969 struct v4l2_create_buffers { __u32 index; __u32 count; __u32 memory; struct v4l2_format format; __u32 reserved[8U]; } ; 40 typedef struct poll_table_struct poll_table; 130 struct v4l2_ioctl_ops { int (*vidioc_querycap)(struct file *, void *, struct v4l2_capability *); int (*vidioc_g_priority)(struct file *, void *, enum v4l2_priority *); int (*vidioc_s_priority)(struct file *, void *, enum v4l2_priority ); int (*vidioc_enum_fmt_vid_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_overlay)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_out)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_sdr_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_g_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_reqbufs)(struct file *, void *, struct v4l2_requestbuffers *); int (*vidioc_querybuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_qbuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_expbuf)(struct file *, void *, struct v4l2_exportbuffer *); int (*vidioc_dqbuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_create_bufs)(struct file *, void *, struct v4l2_create_buffers *); int (*vidioc_prepare_buf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_overlay)(struct file *, void *, unsigned int); int (*vidioc_g_fbuf)(struct file *, void *, struct v4l2_framebuffer *); int (*vidioc_s_fbuf)(struct file *, void *, const struct v4l2_framebuffer *); int (*vidioc_streamon)(struct file *, void *, enum v4l2_buf_type ); int (*vidioc_streamoff)(struct file *, void *, enum v4l2_buf_type ); int (*vidioc_g_std)(struct file *, void *, v4l2_std_id *); int (*vidioc_s_std)(struct file *, void *, v4l2_std_id ); int (*vidioc_querystd)(struct file *, void *, v4l2_std_id *); int (*vidioc_enum_input)(struct file *, void *, struct v4l2_input *); int (*vidioc_g_input)(struct file *, void *, unsigned int *); int (*vidioc_s_input)(struct file *, void *, unsigned int); int (*vidioc_enum_output)(struct file *, void *, struct v4l2_output *); int (*vidioc_g_output)(struct file *, void *, unsigned int *); int (*vidioc_s_output)(struct file *, void *, unsigned int); int (*vidioc_queryctrl)(struct file *, void *, struct v4l2_queryctrl *); int (*vidioc_query_ext_ctrl)(struct file *, void *, struct v4l2_query_ext_ctrl *); int (*vidioc_g_ctrl)(struct file *, void *, struct v4l2_control *); int (*vidioc_s_ctrl)(struct file *, void *, struct v4l2_control *); int (*vidioc_g_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_s_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_try_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_querymenu)(struct file *, void *, struct v4l2_querymenu *); int (*vidioc_enumaudio)(struct file *, void *, struct v4l2_audio *); int (*vidioc_g_audio)(struct file *, void *, struct v4l2_audio *); int (*vidioc_s_audio)(struct file *, void *, const struct v4l2_audio *); int (*vidioc_enumaudout)(struct file *, void *, struct v4l2_audioout *); int (*vidioc_g_audout)(struct file *, void *, struct v4l2_audioout *); int (*vidioc_s_audout)(struct file *, void *, const struct v4l2_audioout *); int (*vidioc_g_modulator)(struct file *, void *, struct v4l2_modulator *); int (*vidioc_s_modulator)(struct file *, void *, const struct v4l2_modulator *); int (*vidioc_cropcap)(struct file *, void *, struct v4l2_cropcap *); int (*vidioc_g_crop)(struct file *, void *, struct v4l2_crop *); int (*vidioc_s_crop)(struct file *, void *, const struct v4l2_crop *); int (*vidioc_g_selection)(struct file *, void *, struct v4l2_selection *); int (*vidioc_s_selection)(struct file *, void *, struct v4l2_selection *); int (*vidioc_g_jpegcomp)(struct file *, void *, struct v4l2_jpegcompression *); int (*vidioc_s_jpegcomp)(struct file *, void *, const struct v4l2_jpegcompression *); int (*vidioc_g_enc_index)(struct file *, void *, struct v4l2_enc_idx *); int (*vidioc_encoder_cmd)(struct file *, void *, struct v4l2_encoder_cmd *); int (*vidioc_try_encoder_cmd)(struct file *, void *, struct v4l2_encoder_cmd *); int (*vidioc_decoder_cmd)(struct file *, void *, struct v4l2_decoder_cmd *); int (*vidioc_try_decoder_cmd)(struct file *, void *, struct v4l2_decoder_cmd *); int (*vidioc_g_parm)(struct file *, void *, struct v4l2_streamparm *); int (*vidioc_s_parm)(struct file *, void *, struct v4l2_streamparm *); int (*vidioc_g_tuner)(struct file *, void *, struct v4l2_tuner *); int (*vidioc_s_tuner)(struct file *, void *, const struct v4l2_tuner *); int (*vidioc_g_frequency)(struct file *, void *, struct v4l2_frequency *); int (*vidioc_s_frequency)(struct file *, void *, const struct v4l2_frequency *); int (*vidioc_enum_freq_bands)(struct file *, void *, struct v4l2_frequency_band *); int (*vidioc_g_sliced_vbi_cap)(struct file *, void *, struct v4l2_sliced_vbi_cap *); int (*vidioc_log_status)(struct file *, void *); int (*vidioc_s_hw_freq_seek)(struct file *, void *, const struct v4l2_hw_freq_seek *); int (*vidioc_g_register)(struct file *, void *, struct v4l2_dbg_register *); int (*vidioc_s_register)(struct file *, void *, const struct v4l2_dbg_register *); int (*vidioc_g_chip_info)(struct file *, void *, struct v4l2_dbg_chip_info *); int (*vidioc_enum_framesizes)(struct file *, void *, struct v4l2_frmsizeenum *); int (*vidioc_enum_frameintervals)(struct file *, void *, struct v4l2_frmivalenum *); int (*vidioc_s_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_g_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_query_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_enum_dv_timings)(struct file *, void *, struct v4l2_enum_dv_timings *); int (*vidioc_dv_timings_cap)(struct file *, void *, struct v4l2_dv_timings_cap *); int (*vidioc_g_edid)(struct file *, void *, struct v4l2_edid *); int (*vidioc_s_edid)(struct file *, void *, struct v4l2_edid *); int (*vidioc_subscribe_event)(struct v4l2_fh *, const struct v4l2_event_subscription *); int (*vidioc_unsubscribe_event)(struct v4l2_fh *, const struct v4l2_event_subscription *); long int (*vidioc_default)(struct file *, void *, bool , unsigned int, void *); } ; 812 struct ov7670_config { int min_width; int min_height; int clock_speed; bool use_smbus; bool pll_bypass; bool pclk_hb_disable; } ; 144 struct mcam_format_struct { __u8 *desc; __u32 pixelformat; int bpp; bool planar; enum v4l2_mbus_pixelcode mbus_code; } ; 252 struct mcam_dma_desc { u32 dma_addr; u32 segment_len; } ; 265 struct yuv_pointer_t { dma_addr_t y; dma_addr_t u; dma_addr_t v; } ; 271 struct mcam_vb_buffer { struct vb2_buffer vb_buf; struct list_head queue; struct mcam_dma_desc *dma_desc; dma_addr_t dma_desc_pa; int dma_desc_nent; struct yuv_pointer_t yuv_p; } ; 2780 typedef int ldv_func_ret_type___0; 29 struct attribute___0 { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 60 struct attribute_group___0 { const char *name; umode_t (*is_visible)(struct kobject___0 *, struct attribute___0 *, int); struct attribute___0 **attrs; struct bin_attribute___0 **bin_attrs; } ; 121 struct bin_attribute___0 { struct attribute___0 attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject___0 *, struct bin_attribute___0 *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject___0 *, struct bin_attribute___0 *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject___0 *, struct bin_attribute___0 *attr, struct vm_area_struct *vma); } ; 175 struct sysfs_ops___0 { ssize_t (*show)(struct kobject___0 *, struct attribute___0 *, char *); ssize_t (*store)(struct kobject___0 *, struct attribute___0 *, const char *, size_t ); } ; 63 struct kobject___0 { const char *name; struct list_head entry; struct kobject___0 *parent; struct kset *kset; struct kobj_type___0 *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned int state_initialized; unsigned int state_in_sysfs; unsigned int state_add_uevent_sent; unsigned int state_remove_uevent_sent; unsigned int uevent_suppress; } ; 115 struct kobj_type___0 { void (*release)(struct kobject___0 *kobj); const struct sysfs_ops___0 *sysfs_ops; struct attribute___0 **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject___0 *kobj); const void * (*namespace)(struct kobject___0 *kobj); } ; 295 struct dev_pm_ops___0 { int (*prepare)(struct device___0 *dev); void (*complete)(struct device___0 *dev); int (*suspend)(struct device___0 *dev); int (*resume)(struct device___0 *dev); int (*freeze)(struct device___0 *dev); int (*thaw)(struct device___0 *dev); int (*poweroff)(struct device___0 *dev); int (*restore)(struct device___0 *dev); int (*suspend_late)(struct device___0 *dev); int (*resume_early)(struct device___0 *dev); int (*freeze_late)(struct device___0 *dev); int (*thaw_early)(struct device___0 *dev); int (*poweroff_late)(struct device___0 *dev); int (*restore_early)(struct device___0 *dev); int (*suspend_noirq)(struct device___0 *dev); int (*resume_noirq)(struct device___0 *dev); int (*freeze_noirq)(struct device___0 *dev); int (*thaw_noirq)(struct device___0 *dev); int (*poweroff_noirq)(struct device___0 *dev); int (*restore_noirq)(struct device___0 *dev); int (*runtime_suspend)(struct device___0 *dev); int (*runtime_resume)(struct device___0 *dev); int (*runtime_idle)(struct device___0 *dev); } ; 558 struct dev_pm_info___0 { pm_message_t power_state; unsigned int can_wakeup; unsigned int async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source___0 *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned int disable_depth; unsigned int idle_notification; unsigned int request_pending; unsigned int deferred_resume; unsigned int run_wake; unsigned int runtime_auto; unsigned int no_callbacks; unsigned int irq_safe; unsigned int use_autosuspend; unsigned int timer_autosuspends; unsigned int memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device___0 *, s32 ); struct dev_pm_qos *qos; } ; 620 struct dev_pm_domain___0 { struct dev_pm_ops___0 ops; } ; 104 struct bus_type___0 { const char *name; const char *dev_name; struct device___0 *dev_root; struct device_attribute___0 *dev_attrs; const struct attribute_group___0 **bus_groups; const struct attribute_group___0 **dev_groups; const struct attribute_group___0 **drv_groups; int (*match)(struct device___0 *dev, struct device_driver___0 *drv); int (*uevent)(struct device___0 *dev, struct kobj_uevent_env *env); int (*probe)(struct device___0 *dev); int (*remove)(struct device___0 *dev); void (*shutdown)(struct device___0 *dev); int (*online)(struct device___0 *dev); int (*offline)(struct device___0 *dev); int (*suspend)(struct device___0 *dev, pm_message_t state); int (*resume)(struct device___0 *dev); const struct dev_pm_ops___0 *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 228 struct device_driver___0 { const char *name; struct bus_type___0 *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device___0 *dev); int (*remove)(struct device___0 *dev); void (*shutdown)(struct device___0 *dev); int (*suspend)(struct device___0 *dev, pm_message_t state); int (*resume)(struct device___0 *dev); const struct attribute_group___0 **groups; const struct dev_pm_ops___0 *pm; struct driver_private *p; } ; 351 struct class___0 { const char *name; struct module *owner; struct class_attribute___0 *class_attrs; const struct attribute_group___0 **dev_groups; struct kobject___0 *dev_kobj; int (*dev_uevent)(struct device___0 *dev, struct kobj_uevent_env *env); char * (*devnode)(struct device___0 *dev, umode_t *mode); void (*class_release)(struct class___0 *class); void (*dev_release)(struct device___0 *dev); int (*suspend)(struct device___0 *dev, pm_message_t state); int (*resume)(struct device___0 *dev); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device___0 *dev); const struct dev_pm_ops___0 *pm; struct subsys_private *p; } ; 417 struct class_attribute___0 { struct attribute___0 attr; ssize_t (*show)(struct class___0 *class, struct class_attribute___0 *attr, char *buf); ssize_t (*store)(struct class___0 *class, struct class_attribute___0 *attr, const char *buf, size_t count); } ; 500 struct device_type___0 { const char *name; const struct attribute_group___0 **groups; int (*uevent)(struct device___0 *dev, struct kobj_uevent_env *env); char * (*devnode)(struct device___0 *dev, umode_t *mode, kuid_t *uid, kgid_t *gid); void (*release)(struct device___0 *dev); const struct dev_pm_ops___0 *pm; } ; 512 struct device_attribute___0 { struct attribute___0 attr; ssize_t (*show)(struct device___0 *dev, struct device_attribute___0 *attr, char *buf); ssize_t (*store)(struct device___0 *dev, struct device_attribute___0 *attr, const char *buf, size_t count); } ; 729 struct device___0 { struct device___0 *parent; struct device_private *p; struct kobject___0 kobj; const char *init_name; const struct device_type___0 *type; struct mutex mutex; struct bus_type___0 *bus; struct device_driver___0 *driver; void *platform_data; void *driver_data; struct dev_pm_info___0 power; struct dev_pm_domain___0 *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class___0 *class; const struct attribute_group___0 **groups; void (*release)(struct device___0 *dev); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 46 struct wakeup_source___0 { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1 long int __builtin_expect(long exp, long c); 33 extern struct module __this_module; 142 int printk(const char *, ...); 14 void ldv_error(); 24 void ldv_stop(); 29 int ldv_undef_int(); 30 void * ldv_undef_ptr(); 33 void * __VERIFIER_alloc(size_t size); 34 void * ldv_zalloc(size_t size); 35 void * ldv_successful_alloc(size_t size); 58 int ldv_undef_int_nonpositive(); 79 void __builtin_trap(); 18 void ldv_free(); 28 void ldv_assume_and_increase(void *res); 24 void INIT_LIST_HEAD(struct list_head *list); 62 char * strcpy(char *, const char *); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 32 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 39 void _raw_spin_unlock(raw_spinlock_t *); 43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 290 raw_spinlock_t * spinlock_check(spinlock_t *lock); 301 void spin_lock(spinlock_t *lock); 341 void spin_unlock(spinlock_t *lock); 356 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 69 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *); 146 void __wake_up(wait_queue_head_t *, unsigned int, int, void *); 825 long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int); 826 void finish_wait(wait_queue_head_t *, wait_queue_t *); 31 unsigned int ioread32(void *); 37 void iowrite32(u32 , void *); 72 void pci_iounmap(struct pci_dev *, void *); 17 void * pci_iomap(struct pci_dev *, int, unsigned long); 144 void kfree(const void *); 147 void ldv_kfree_10(const void *ldv_func_arg1); 151 void ldv_kfree_11(const void *ldv_func_arg1); 155 void ldv_kfree_12(const void *ldv_func_arg1); 159 void ldv_kfree_13(const void *ldv_func_arg1); 637 void * kzalloc(size_t size, gfp_t flags); 33 struct v4l2_streamparm *mcam_v4l_ioctl_ops_group2 = 0; 34 int LDV_IN_INTERRUPT = 1; 35 int __VERIFIER_nondet_int(); 36 struct i2c_adapter *cafe_smbus_algo_group0 = 0; 37 struct file *mcam_v4l_fops_group0 = 0; 38 struct v4l2_format *mcam_v4l_ioctl_ops_group1 = 0; 39 int ldv_state_variable_6 = 0; 40 int ldv_state_variable_0 = 0; 41 struct vb2_queue *mcam_vb2_ops_group0 = 0; 42 struct vb2_queue *mcam_vb2_sg_ops_group1 = 0; 43 int ldv_state_variable_5 = 0; 44 int ldv_state_variable_3 = 0; 45 struct vb2_buffer *mcam_vb2_sg_ops_group0 = 0; 46 struct pci_dev *cafe_pci_driver_group1 = 0; 47 int ldv_state_variable_2 = 0; 48 int ref_cnt = 0; 49 int ldv_state_variable_1 = 0; 50 int ldv_state_variable_7 = 0; 51 int ldv_state_variable_4 = 0; 52 struct file *mcam_v4l_ioctl_ops_group3 = 0; 53 struct v4l2_buffer *mcam_v4l_ioctl_ops_group0 = 0; 55 void ldv_initialyze_v4l2_file_operations_2(); 56 void ldv_initialyze_i2c_algorithm_7(); 57 void ldv_initialyze_vb2_ops_4(); 58 void ldv_initialyze_vb2_ops_5(); 59 void ldv_initialyze_v4l2_ioctl_ops_3(); 839 void * dev_get_drvdata(const struct device *dev); 844 void dev_set_drvdata(struct device *dev, void *data); 1044 int dev_err(const struct device *, const char *, ...); 1046 int dev_warn(const struct device *, const char *, ...); 924 int pci_enable_device(struct pci_dev *); 941 void pci_disable_device(struct pci_dev *); 944 void pci_set_master(struct pci_dev *); 999 int pci_save_state(struct pci_dev *); 1000 void pci_restore_state(struct pci_dev *); 1109 int __pci_register_driver(struct pci_driver *, struct module *, const char *); 1118 void pci_unregister_driver(struct pci_driver *); 377 long int schedule_timeout(long); 449 void * i2c_get_adapdata(const struct i2c_adapter *dev); 454 void i2c_set_adapdata(struct i2c_adapter *dev, void *data); 505 int i2c_add_adapter(struct i2c_adapter *); 506 void i2c_del_adapter(struct i2c_adapter *); 123 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 128 int ldv_request_threaded_irq_9(unsigned int ldv_func_arg1, irqreturn_t (*ldv_func_arg2)(int, void *), irqreturn_t (*ldv_func_arg3)(int, void *), unsigned long ldv_func_arg4, const char *ldv_func_arg5, void *ldv_func_arg6); 132 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 146 void free_irq(unsigned int, void *); 10 void __const_udelay(unsigned long); 46 void msleep(unsigned int); 200 void mcam_reg_write(struct mcam_camera *cam, unsigned int reg, unsigned int val); 206 unsigned int mcam_reg_read(struct mcam_camera *cam, unsigned int reg); 213 void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg, unsigned int val, unsigned int mask); 228 void mcam_reg_set_bit(struct mcam_camera *cam, unsigned int reg, unsigned int val); 237 int mccic_register(struct mcam_camera *cam); 238 int mccic_irq(struct mcam_camera *cam, unsigned int irqs); 239 void mccic_shutdown(struct mcam_camera *cam); 241 void mccic_suspend(struct mcam_camera *cam); 242 int mccic_resume(struct mcam_camera *cam); 203 struct cafe_camera * to_cam(struct v4l2_device *dev); 210 int cafe_smbus_write_done(struct mcam_camera *mcam); 227 int cafe_smbus_write_data(struct cafe_camera *cam, u16 addr, u8 command, u8 value); 287 int cafe_smbus_read_done(struct mcam_camera *mcam); 306 int cafe_smbus_read_data(struct cafe_camera *cam, u16 addr, u8 command, u8 *value); 349 int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char rw, u8 command, int size, union i2c_smbus_data *data); 373 void cafe_smbus_enable_irq(struct cafe_camera *cam); 382 u32 cafe_smbus_func(struct i2c_adapter *adapter); 388 struct i2c_algorithm cafe_smbus_algo = { 0, &cafe_smbus_xfer, &cafe_smbus_func }; 393 int cafe_smbus_setup(struct cafe_camera *cam); 414 void cafe_smbus_shutdown(struct cafe_camera *cam); 425 void cafe_ctlr_init(struct mcam_camera *mcam); 461 int cafe_ctlr_power_up(struct mcam_camera *mcam); 480 void cafe_ctlr_power_down(struct mcam_camera *mcam); 492 irqreturn_t cafe_irq(int irq, void *data); 516 int cafe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); 608 void cafe_shutdown(struct cafe_camera *cam); 617 void cafe_pci_remove(struct pci_dev *pdev); 635 int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state); 650 int cafe_pci_resume(struct pci_dev *pdev); 669 struct pci_device_id cafe_ids[2U] = { { 4523U, 16642U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } }; 675 const struct pci_device_id __mod_pci__cafe_ids_device_table = { }; 677 struct pci_driver cafe_pci_driver = { { 0, 0 }, "cafe1000-ccic", (const struct pci_device_id *)(&cafe_ids), &cafe_pci_probe, &cafe_pci_remove, &cafe_pci_suspend, 0, 0, &cafe_pci_resume, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } }; 691 int cafe_init(); 709 void cafe_exit(); 717 int ldv_retval_0 = 0; 718 void ldv_initialize(); 719 void ldv_check_final_state(); 720 int ldv_retval_2 = 0; 729 void ldv_main_exported_4(); 730 void ldv_main_exported_1(); 731 void ldv_main_exported_3(); 732 void ldv_main_exported_2(); 733 void ldv_main_exported_5(); 740 void entry_point(); 1 void * __builtin_memcpy(void *, const void *, unsigned long); 358 extern struct pv_irq_ops pv_irq_ops; 72 void set_bit(long nr, volatile unsigned long *addr); 110 void clear_bit(long nr, volatile unsigned long *addr); 204 int test_and_set_bit(long nr, volatile unsigned long *addr); 308 int constant_test_bit(long nr, const volatile unsigned long *addr); 314 int variable_test_bit(long nr, const volatile unsigned long *addr); 53 int __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 47 void __list_add(struct list_head *, struct list_head *, struct list_head *); 60 void list_add(struct list_head *new, struct list_head *head); 111 void __list_del_entry(struct list_head *); 142 void list_del_init(struct list_head *entry); 186 int list_empty(const struct list_head *head); 71 void warn_slowpath_null(const char *, const int); 55 void * memset(void *, int, size_t ); 26 size_t strlcpy(char *, const char *, size_t ); 802 unsigned long int arch_local_save_flags(); 155 int arch_irqs_disabled_flags(unsigned long flags); 120 void __mutex_init(struct mutex *, const char *, struct lock_class_key *); 139 void mutex_lock_nested(struct mutex *, unsigned int); 175 void mutex_unlock(struct mutex *); 147 void ldv_kfree_28(const void *ldv_func_arg1); 912 void * lowmem_page_address(const struct page *page); 128 int ldv_request_threaded_irq_27(unsigned int ldv_func_arg1, irqreturn_t (*ldv_func_arg2)(int, void *), irqreturn_t (*ldv_func_arg3)(int, void *), unsigned long ldv_func_arg4, const char *ldv_func_arg5, void *ldv_func_arg6); 514 void __tasklet_schedule(struct tasklet_struct *); 516 void tasklet_schedule(struct tasklet_struct *t); 572 void tasklet_init(struct tasklet_struct *, void (*)(unsigned long), unsigned long); 95 struct page * sg_page(struct scatterlist *sg); 218 void * sg_virt(struct scatterlist *sg); 224 struct scatterlist * sg_next(struct scatterlist *); 69 int valid_dma_direction(int dma_direction); 76 int is_device_dma_capable(struct device *dev); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 47 void debug_dma_map_sg(struct device *, struct scatterlist *, int, int, int); 50 void debug_dma_unmap_sg(struct device *, struct scatterlist *, int, int); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 56 void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t ); 27 extern struct device x86_dma_fallback_dev; 30 extern struct dma_map_ops *dma_ops; 32 struct dma_map_ops * get_dma_ops(struct device *dev); 42 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); 59 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); 103 unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp); 115 gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp); 131 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs); 160 void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs); 149 int __video_register_device(struct video_device *, int, int, int, struct module *); 156 int video_register_device(struct video_device *vdev, int type, int nr); 163 int ldv_video_register_device_29(struct video_device *vdev, int type, int nr); 176 void video_unregister_device(struct video_device *); 179 void ldv_video_unregister_device_30(struct video_device *ldv_func_arg1); 192 void video_device_release_empty(struct video_device *); 215 void * video_get_drvdata(struct video_device *vdev); 220 void video_set_drvdata(struct video_device *vdev, void *data); 225 struct video_device * video_devdata(struct file *); 229 void * video_drvdata(struct file *file); 123 struct v4l2_subdev * v4l2_i2c_new_subdev_board(struct v4l2_device *, struct i2c_adapter *, struct i2c_board_info *, const unsigned short *); 90 void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, const struct v4l2_mbus_framefmt *mbus_fmt); 99 void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format *pix_fmt, enum v4l2_mbus_pixelcode code); 79 int v4l2_device_register(struct device *, struct v4l2_device *); 106 void v4l2_device_unregister(struct v4l2_device *); 330 long int video_ioctl2(struct file *, unsigned int, unsigned long); 343 int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *, unsigned int, struct lock_class_key *, const char *); 370 void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *); 437 void * vb2_plane_vaddr(struct vb2_buffer *, unsigned int); 438 void * vb2_plane_cookie(struct vb2_buffer *, unsigned int); 440 void vb2_buffer_done(struct vb2_buffer *, enum vb2_buffer_state ); 444 int vb2_querybuf(struct vb2_queue *, struct v4l2_buffer *); 445 int vb2_reqbufs(struct vb2_queue *, struct v4l2_requestbuffers *); 450 int vb2_queue_init(struct vb2_queue *); 452 void vb2_queue_release(struct vb2_queue *); 455 int vb2_qbuf(struct vb2_queue *, struct v4l2_buffer *); 457 int vb2_dqbuf(struct vb2_queue *, struct v4l2_buffer *, bool ); 459 int vb2_streamon(struct vb2_queue *, enum v4l2_buf_type ); 460 int vb2_streamoff(struct vb2_queue *, enum v4l2_buf_type ); 462 int vb2_mmap(struct vb2_queue *, struct vm_area_struct *); 470 unsigned int vb2_poll(struct vb2_queue *, struct file *, poll_table *); 471 size_t vb2_read(struct vb2_queue *, char *, size_t , loff_t *, int); 546 void * vb2_get_drv_priv(struct vb2_queue *q); 557 void vb2_set_plane_payload(struct vb2_buffer *vb, unsigned int plane_no, unsigned long size); 18 extern const struct vb2_mem_ops vb2_vmalloc_memops; 20 dma_addr_t vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no); 27 void * vb2_dma_contig_init_ctx(struct device *); 28 void vb2_dma_contig_cleanup_ctx(void *); 30 extern const struct vb2_mem_ops vb2_dma_contig_memops; 18 struct sg_table * vb2_dma_sg_plane_desc(struct vb2_buffer *vb, unsigned int plane_no); 24 extern const struct vb2_mem_ops vb2_dma_sg_memops; 64 int mcam_buffer_mode_supported(enum mcam_buffer_mode mode); 222 void mcam_reg_clear_bit(struct mcam_camera *cam, unsigned int reg, unsigned int val); 106 bool alloc_bufs_at_read = 0; 115 int n_dma_bufs = 3; 121 int dma_buf_size = 614400; 132 bool flip = 0; 138 int buffer_mode = -1; 168 struct mcam_format_struct mcam_formats[8U] = { { (__u8 *)"YUYV 4:2:2", 1448695129U, 2, 0, 8200 }, { (__u8 *)"UYVY 4:2:2", 1498831189U, 2, 0, 8200 }, { (__u8 *)"YUV 4:2:2 PLANAR", 1345466932U, 2, 1, 8200 }, { (__u8 *)"YUV 4:2:0 PLANAR", 842093913U, 2, 1, 8200 }, { (__u8 *)"YVU 4:2:0 PLANAR", 842094169U, 2, 1, 8200 }, { (__u8 *)"RGB 444", 875836498U, 2, 0, 4098 }, { (__u8 *)"RGB 565", 1346520914U, 2, 0, 4104 }, { (__u8 *)"Raw RGB Bayer", 825770306U, 1, 0, 12289 } }; 228 struct mcam_format_struct * mcam_find_format(u32 pixelformat); 242 const struct v4l2_pix_format mcam_def_pix_format = { 640U, 480U, 1448695129U, 1U, 1280U, 614400U, 0U, 0U, 0U }; 251 const enum v4l2_mbus_pixelcode mcam_def_mbus_code = 8200; 286 struct mcam_vb_buffer * vb_to_mvb(struct vb2_buffer *vb); 294 void mcam_buffer_done(struct mcam_camera *cam, int frame, struct vb2_buffer *vbuf); 319 void mcam_reset_buffers(struct mcam_camera *cam); 330 int mcam_needs_config(struct mcam_camera *cam); 335 void mcam_set_config_needed(struct mcam_camera *cam, int needed); 348 void mcam_ctlr_start(struct mcam_camera *cam); 355 void mcam_ctlr_stop(struct mcam_camera *cam); 360 void mcam_enable_mipi(struct mcam_camera *mcam); 389 void mcam_disable_mipi(struct mcam_camera *mcam); 409 int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime); 450 void mcam_free_dma_bufs(struct mcam_camera *cam); 466 void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam); 488 void mcam_frame_tasklet(unsigned long data); 530 int mcam_check_dma_buffers(struct mcam_camera *cam); 539 void mcam_vmalloc_done(struct mcam_camera *cam, int frame); 572 bool mcam_fmt_is_planar(__u32 pfmt); 588 void mcam_set_contig_buffer(struct mcam_camera *cam, int frame); 648 void mcam_ctlr_dma_contig(struct mcam_camera *cam); 659 void mcam_dma_contig_done(struct mcam_camera *cam, int frame); 682 void mcam_sg_next_buffer(struct mcam_camera *cam); 705 void mcam_ctlr_dma_sg(struct mcam_camera *cam); 735 void mcam_dma_sg_done(struct mcam_camera *cam, int frame); 774 void mcam_sg_restart(struct mcam_camera *cam); 798 void mcam_ctlr_image(struct mcam_camera *cam); 891 int mcam_ctlr_configure(struct mcam_camera *cam); 904 void mcam_ctlr_irq_enable(struct mcam_camera *cam); 914 void mcam_ctlr_irq_disable(struct mcam_camera *cam); 921 void mcam_ctlr_init(struct mcam_camera *cam); 948 void mcam_ctlr_stop_dma(struct mcam_camera *cam); 981 int mcam_ctlr_power_up(struct mcam_camera *cam); 998 void mcam_ctlr_power_down(struct mcam_camera *cam); 1018 int __mcam_cam_reset(struct mcam_camera *cam); 1027 int mcam_cam_init(struct mcam_camera *cam); 1047 int mcam_cam_set_flip(struct mcam_camera *cam); 1058 int mcam_cam_configure(struct mcam_camera *cam); 1077 int mcam_read_setup(struct mcam_camera *cam); 1127 int mcam_vb_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbufs, unsigned int *num_planes, unsigned int *sizes, void **alloc_ctxs); 1145 void mcam_vb_buf_queue(struct vb2_buffer *vb); 1168 void mcam_vb_wait_prepare(struct vb2_queue *vq); 1175 void mcam_vb_wait_finish(struct vb2_queue *vq); 1185 int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count); 1217 void mcam_vb_stop_streaming(struct vb2_queue *vq); 1246 const struct vb2_ops mcam_vb2_ops = { &mcam_vb_queue_setup, &mcam_vb_wait_prepare, &mcam_vb_wait_finish, 0, 0, 0, 0, &mcam_vb_start_streaming, &mcam_vb_stop_streaming, &mcam_vb_buf_queue }; 1261 int mcam_vb_sg_buf_init(struct vb2_buffer *vb); 1277 int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb); 1298 void mcam_vb_sg_buf_finish(struct vb2_buffer *vb); 1308 void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb); 1319 const struct vb2_ops mcam_vb2_sg_ops = { &mcam_vb_queue_setup, &mcam_vb_wait_prepare, &mcam_vb_wait_finish, &mcam_vb_sg_buf_init, &mcam_vb_sg_buf_prepare, &mcam_vb_sg_buf_finish, &mcam_vb_sg_buf_cleanup, &mcam_vb_start_streaming, &mcam_vb_stop_streaming, &mcam_vb_buf_queue }; 1334 int mcam_setup_vb2(struct mcam_camera *cam); 1380 void mcam_cleanup_vb2(struct mcam_camera *cam); 1395 int mcam_vidioc_streamon(struct file *filp, void *priv, enum v4l2_buf_type type); 1408 int mcam_vidioc_streamoff(struct file *filp, void *priv, enum v4l2_buf_type type); 1421 int mcam_vidioc_reqbufs(struct file *filp, void *priv, struct v4l2_requestbuffers *req); 1434 int mcam_vidioc_querybuf(struct file *filp, void *priv, struct v4l2_buffer *buf); 1446 int mcam_vidioc_qbuf(struct file *filp, void *priv, struct v4l2_buffer *buf); 1458 int mcam_vidioc_dqbuf(struct file *filp, void *priv, struct v4l2_buffer *buf); 1470 int mcam_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap); 1482 int mcam_vidioc_enum_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_fmtdesc *fmt); 1493 int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_format *fmt); 1522 int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_format *fmt); 1571 int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_format *f); 1583 int mcam_vidioc_enum_input(struct file *filp, void *priv, struct v4l2_input *input); 1595 int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i); 1601 int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i); 1609 int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id a); 1614 int mcam_vidioc_g_std(struct file *filp, void *priv, v4l2_std_id *a); 1624 int mcam_vidioc_g_parm(struct file *filp, void *priv, struct v4l2_streamparm *parms); 1637 int mcam_vidioc_s_parm(struct file *filp, void *priv, struct v4l2_streamparm *parms); 1650 int mcam_vidioc_enum_framesizes(struct file *filp, void *priv, struct v4l2_frmsizeenum *sizes); 1662 int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv, struct v4l2_frmivalenum *interval); 1675 int mcam_vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg); 1687 int mcam_vidioc_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg); 1699 const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = { &mcam_vidioc_querycap, 0, 0, &mcam_vidioc_enum_fmt_vid_cap, 0, 0, 0, 0, 0, &mcam_vidioc_g_fmt_vid_cap, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &mcam_vidioc_s_fmt_vid_cap, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &mcam_vidioc_try_fmt_vid_cap, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &mcam_vidioc_reqbufs, &mcam_vidioc_querybuf, &mcam_vidioc_qbuf, 0, &mcam_vidioc_dqbuf, 0, 0, 0, 0, 0, &mcam_vidioc_streamon, &mcam_vidioc_streamoff, &mcam_vidioc_g_std, &mcam_vidioc_s_std, 0, &mcam_vidioc_enum_input, &mcam_vidioc_g_input, &mcam_vidioc_s_input, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &mcam_vidioc_g_parm, &mcam_vidioc_s_parm, 0, 0, 0, 0, 0, 0, 0, 0, &mcam_vidioc_g_register, &mcam_vidioc_s_register, 0, &mcam_vidioc_enum_framesizes, &mcam_vidioc_enum_frameintervals, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 1730 int mcam_v4l_open(struct file *filp); 1758 int mcam_v4l_release(struct file *filp); 1780 ssize_t mcam_v4l_read(struct file *filp, char *buffer, size_t len, loff_t *pos); 1795 unsigned int mcam_v4l_poll(struct file *filp, struct poll_table_struct *pt); 1808 int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma); 1821 const struct v4l2_file_operations mcam_v4l_fops = { &__this_module, &mcam_v4l_read, 0, &mcam_v4l_poll, 0, &video_ioctl2, 0, 0, &mcam_v4l_mmap, &mcam_v4l_open, &mcam_v4l_release }; 1836 struct video_device mcam_v4l_template = { { { 0, 0 }, 0, 0U, 0, 0U, 0U, 0UL, 0U, 0U, 0U, 0U, 0U, 0, 0, 0, 0, 0, 0, { .alsa = { 0U, 0U, 0U } } }, &mcam_v4l_fops, { 0, 0, { 0, { 0, 0 }, 0, 0, 0, 0, { { 0 } }, { { { 0L }, { 0, 0 }, 0, { 0, { 0, 0 }, 0, 0, 0UL } }, { { 0, 0 }, 0UL, 0, 0, 0UL, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, { 0, 0 }, 0, 0, 0UL } }, 0, 0 }, 0U, 0U, 0U, 0U, 0U }, 0, 0, { { 0 }, { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 }, 0, 0, 0, { 0, { 0, 0 }, 0, 0, 0UL } }, 0, 0, 0, 0, { { 0 }, 0U, 0U, 0, 0, 0, 0, 0, 0, 0, { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 }, { 0U, { { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } }, 0, 0, 0, { { 0, 0 }, 0UL, 0, 0, 0UL, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, { 0, 0 }, 0, 0, 0UL } }, 0UL, { { 0L }, { 0, 0 }, 0, { 0, { 0, 0 }, 0, 0, 0UL } }, { { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } }, { 0 }, { 0 }, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0, 0, 0, 0, 0UL, 0UL, 0UL, 0UL, 0, 0, 0 }, 0, 0, 0, 0, 0ULL, 0UL, 0, { 0, 0 }, 0, 0, { 0, 0 }, 0, { 0 }, 0U, 0U, { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 }, { 0, { 0, 0 }, { { 0 } } }, 0, 0, 0, 0, 0, 0 }, 0, 0, 0, 0, 0, 0, { 'm', 'c', 'a', 'm', '\x0' }, 0, 0, 0, 0U, 0UL, 0, { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 }, 0, 4096ULL, &video_device_release_empty, &mcam_v4l_ioctl_ops, { 0UL, 0UL, 0UL }, { 0UL, 0UL, 0UL }, 0 }; 1849 void mcam_frame_complete(struct mcam_camera *cam, int frame); 1923 struct ov7670_config sensor_cfg = { 320, 240, 0, 0, 0, 0 }; 2094 int ldv_probe_1(); 2095 int ldv_retval_1 = 0; 14 void ldv_error___1(); 24 void ldv_stop___1(); 7 bool ldv_is_err(const void *ptr); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 8 int ldv_alloc_count = 0; 11 void (*gadget_release_pointer)(struct device___0 *_dev) = 0; 19 void * ldv_alloc(size_t size); 26 void * ldv_zero_alloc(size_t size); 43 void * ldv_nonzero_alloc(size_t size); 56 void * ldv_alloc_without_counter(size_t size); 64 void * ldv_zalloc_without_counter(size_t size); 80 void ldv_condition_free(); 89 void ldv_save_gadget_release(void (*func)(struct device___0 *_dev)); 94 int ldv_dev_set_drvdata(struct device___0 *dev, void *data); 100 void * ldv_dev_get_drvdata(const struct device___0 *dev); return ; } { 742 const struct pci_device_id *ldvarg1; 743 pm_message_t ldvarg0; 744 char ldvarg39; 745 union i2c_smbus_data *ldvarg41; 746 u8 ldvarg43; 747 u16 ldvarg42; 748 unsigned short ldvarg40; 749 int ldvarg38; 750 int tmp; 751 int tmp___0; 752 int tmp___1; 753 int tmp___2; 754 void *tmp___3; 741 ldv_initialize() { /* Function call is skipped due to function is undefined */} 752 ldv_state_variable_6 = 0; 753 ldv_state_variable_3 = 0; 754 ldv_state_variable_7 = 0; 755 ldv_state_variable_2 = 0; 756 ldv_state_variable_1 = 0; 757 ldv_state_variable_4 = 0; 758 ref_cnt = 0; 759 ldv_state_variable_0 = 1; 760 ldv_state_variable_5 = 0; 761 ldv_35838:; 762 tmp = __VERIFIER_nondet_int() { /* Function call is skipped due to function is undefined */} 762 switch (tmp) 895 tmp___2 = __VERIFIER_nondet_int() { /* Function call is skipped due to function is undefined */} 895 switch (tmp___2) { 693 int ret; 695 printk("\rMarvell M88ALP01 \'CAFE\' Camera Controller version %d\n", 2) { /* Function call is skipped due to function is undefined */} 697 ret = __pci_register_driver(&cafe_pci_driver, &__this_module, "cafe_ccic") { /* Function call is skipped due to function is undefined */} 702 ret = 0; 703 out:; } 917 ldv_state_variable_0 = 2; 918 ldv_state_variable_5 = 1; { 2109 void *tmp; 2108 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2108 mcam_vb2_ops_group0 = (struct vb2_queue *)tmp; } 920 ldv_state_variable_2 = 1; { 2100 void *tmp; 2099 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2099 mcam_v4l_fops_group0 = (struct file *)tmp; } 922 ldv_state_variable_7 = 1; { 726 void *tmp; 725 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 725 cafe_smbus_algo_group0 = (struct i2c_adapter *)tmp; } 924 ldv_state_variable_3 = 1; { 2113 void *tmp; 2114 void *tmp___0; 2115 void *tmp___1; 2116 void *tmp___2; 2112 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2112 mcam_v4l_ioctl_ops_group0 = (struct v4l2_buffer *)tmp; 2113 tmp___0 = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2113 mcam_v4l_ioctl_ops_group1 = (struct v4l2_format *)tmp___0; 2114 tmp___1 = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2114 mcam_v4l_ioctl_ops_group3 = (struct file *)tmp___1; 2115 tmp___2 = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2115 mcam_v4l_ioctl_ops_group2 = (struct v4l2_streamparm *)tmp___2; } 926 ldv_state_variable_4 = 1; { 2104 void *tmp; 2105 void *tmp___0; 2103 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2103 mcam_vb2_sg_ops_group0 = (struct vb2_buffer *)tmp; 2104 tmp___0 = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 2104 mcam_vb2_sg_ops_group1 = (struct vb2_queue *)tmp___0; } 928 ldv_state_variable_6 = 1; { 37 void *ret; 38 void *tmp; 37 tmp = __VERIFIER_alloc(size) { /* Function call is skipped due to function is undefined */} 37 ret = tmp; } 929 cafe_pci_driver_group1 = (struct pci_dev *)tmp___3; 933 goto ldv_35833; 938 goto ldv_35820; 949 goto ldv_35838; 762 tmp = __VERIFIER_nondet_int() { /* Function call is skipped due to function is undefined */} 762 switch (tmp) 767 tmp___0 = __VERIFIER_nondet_int() { /* Function call is skipped due to function is undefined */} 767 switch (tmp___0) { 518 int ret; 519 struct cafe_camera *cam; 520 struct mcam_camera *mcam; 521 void *tmp; 522 struct lock_class_key __key; 523 struct lock_class_key __key___0; 524 void *tmp___0; 526 ret = -12; { 999 void *res; 1000 void *tmp; 1000 tmp = ldv_zalloc(size) { /* Function call is skipped due to function is undefined */} 1000 res = tmp; } 527 cam = (struct cafe_camera *)tmp; 530 cam->pdev = pdev; 531 mcam = &(cam->mcam); 532 mcam->chip_id = 0; 533 __raw_spin_lock_init(&(mcam->dev_lock.ldv_6411.rlock), "&(&mcam->dev_lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */} 534 __init_waitqueue_head(&(cam->smbus_wait), "&cam->smbus_wait", &__key___0) { /* Function call is skipped due to function is undefined */} 535 mcam->plat_power_up = &cafe_ctlr_power_up; 536 mcam->plat_power_down = &cafe_ctlr_power_down; 537 mcam->dev = &(pdev->dev); 542 mcam->clock_speed = 45; 543 mcam->use_smbus = 1; 549 mcam->buffer_mode = 0; 553 ret = pci_enable_device(pdev) { /* Function call is skipped due to function is undefined */} 556 pci_set_master(pdev) { /* Function call is skipped due to function is undefined */} 558 ret = -5; 559 tmp___0 = pci_iomap(pdev, 0, 0UL) { /* Function call is skipped due to function is undefined */} 559 mcam->regs = (unsigned char *)tmp___0; 560 unsigned long __CPAchecker_TMP_0 = (unsigned long)(mcam->regs); 564 unsigned int __CPAchecker_TMP_1; 564 __CPAchecker_TMP_1 = (((unsigned int)(((pdev->resource)[0]).end)) - ((unsigned int)(((pdev->resource)[0]).start))) + 1U; 564 mcam->regs_size = __CPAchecker_TMP_1; { 135 int tmp; { } 1010 ldv_func_ret_type ldv_func_res; 1011 int tmp; 1012 int tmp___0; 1009 tmp = request_threaded_irq(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4, ldv_func_arg5, ldv_func_arg6) { /* Function call is skipped due to function is undefined */} 1009 ldv_func_res = tmp; { } 60 int ret; 61 int tmp; 60 tmp = ldv_undef_int() { /* Function call is skipped due to function is undefined */} 60 ret = tmp; } { 427 unsigned long flags; 428 raw_spinlock_t *tmp; 429 raw_spinlock_t *tmp___0; 429 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 358 _raw_spin_unlock_irqrestore(&(lock->ldv_6411.rlock), flags) { /* Function call is skipped due to function is undefined */} } 448 msleep(5U) { /* Function call is skipped due to function is undefined */} 449 flags = _raw_spin_lock_irqsave(tmp___0) { /* Function call is skipped due to function is undefined */} { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { { 216 unsigned int v; 217 unsigned int tmp; { 208 unsigned int tmp; 209 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 209 tmp = ioread32(__CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 209 return tmp;; } 216 v = tmp; 218 v = ((~mask) & v) | (val & mask); { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } 220 return ;; } 232 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { } 358 _raw_spin_unlock_irqrestore(&(lock->ldv_6411.rlock), flags) { /* Function call is skipped due to function is undefined */} } { { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { } 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 395 struct i2c_adapter *adap; 396 int ret; 397 void *tmp; { 999 void *res; 1000 void *tmp; 1000 tmp = ldv_zalloc(size) { /* Function call is skipped due to function is undefined */} 1000 res = tmp; } 398 adap = (struct i2c_adapter *)tmp; 401 cam->mcam.i2c_adapter = adap; { 375 unsigned long flags; 376 raw_spinlock_t *tmp; 377 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} { { 216 unsigned int v; 217 unsigned int tmp; { 208 unsigned int tmp; 209 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 209 tmp = ioread32(__CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 209 return tmp;; } 216 v = tmp; 218 v = ((~mask) & v) | (val & mask); { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } 220 return ;; } 232 return ;; } { } 358 _raw_spin_unlock_irqrestore(&(lock->ldv_6411.rlock), flags) { /* Function call is skipped due to function is undefined */} } 403 adap->owner = &__this_module; 404 adap->algo = (const struct i2c_algorithm *)(&cafe_smbus_algo); 405 strcpy((char *)(&(adap->name)), "cafe_ccic") { /* Function call is skipped due to function is undefined */} 406 adap->dev.parent = &(cam->pdev->dev); { } 408 ret = i2c_add_adapter(adap) { /* Function call is skipped due to function is undefined */} 410 printk("\vUnable to register cafe i2c adapter\n") { /* Function call is skipped due to function is undefined */} } 582 goto out_pdown; { { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } { } 203 void *__CPAchecker_TMP_0 = (void *)(cam->regs); 203 iowrite32(val, __CPAchecker_TMP_0 + ((unsigned long)reg)) { /* Function call is skipped due to function is undefined */} 204 return ;; } 593 free_irq(pdev->irq, (void *)cam) { /* Function call is skipped due to function is undefined */} 594 out_iounmap:; 595 void *__CPAchecker_TMP_2 = (void *)(mcam->regs); 595 pci_iounmap(pdev, __CPAchecker_TMP_2) { /* Function call is skipped due to function is undefined */} 596 out_disable:; 597 pci_disable_device(pdev) { /* Function call is skipped due to function is undefined */} 598 out_free:; 600 out:; } 779 goto ldv_35815; 828 goto ldv_35820; 949 goto ldv_35838; 762 tmp = __VERIFIER_nondet_int() { /* Function call is skipped due to function is undefined */} 762 switch (tmp) 895 tmp___2 = __VERIFIER_nondet_int() { /* Function call is skipped due to function is undefined */} 895 switch (tmp___2) { 711 pci_unregister_driver(&cafe_pci_driver) { /* Function call is skipped due to function is undefined */} } 901 ldv_state_variable_0 = 3; 902 goto ldv_final; 950 ldv_final:; } | Source code
1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 /* TODO: this include should be avoided here while bindings below should
9 * call just model functions from LDV_COMMON_MODEL rather then, say,
10 * ldv_undef_int_negative(). */
11 #include <verifier/rcv.h>
12 // Provide model function prototypes before their usage.
13 //for dev_get_drvdata, dev_set_drvdata
14 //#include <linux/device.h>
15
16 void* ldv_alloc(size_t size);
17 void* ldv_zero_alloc(size_t size);
18 void ldv_free(void);
19 void ldv_condition_free(void);
20 void ldv_save_gadget_release(void (*func)(struct device *_dev));
21 void *ldv_dev_get_drvdata(const struct device *dev);
22 int ldv_dev_set_drvdata(struct device *dev, void *data);
23 void* ldv_alloc_without_counter(size_t size);
24 void* ldv_zalloc_without_counter(size_t size);
25 void* ldv_nonzero_alloc(size_t size);
26 void* __VERIFIER_alloc(size_t size);
27 void* ldv_zalloc(size_t size);
28 void ldv_assume_and_increase(void* res) ;
29
30 #include <linux/slab.h>
31 #include <verifier/rcv.h>
32 #include <linux/gfp.h>
33 struct v4l2_streamparm *mcam_v4l_ioctl_ops_group2;
34 int LDV_IN_INTERRUPT = 1;
35 int __VERIFIER_nondet_int(void);
36 struct i2c_adapter *cafe_smbus_algo_group0;
37 struct file *mcam_v4l_fops_group0;
38 struct v4l2_format *mcam_v4l_ioctl_ops_group1;
39 int ldv_state_variable_6;
40 int ldv_state_variable_0;
41 struct vb2_queue *mcam_vb2_ops_group0;
42 struct vb2_queue *mcam_vb2_sg_ops_group1;
43 int ldv_state_variable_5;
44 int ldv_state_variable_3;
45 struct vb2_buffer *mcam_vb2_sg_ops_group0;
46 struct pci_dev *cafe_pci_driver_group1;
47 int ldv_state_variable_2;
48 int ref_cnt;
49 int ldv_state_variable_1;
50 int ldv_state_variable_7;
51 int ldv_state_variable_4;
52 struct file *mcam_v4l_ioctl_ops_group3;
53 struct v4l2_buffer *mcam_v4l_ioctl_ops_group0;
54 int __VERIFIER_nondet_int(void);
55 void ldv_initialyze_v4l2_file_operations_2(void);
56 void ldv_initialyze_i2c_algorithm_7(void);
57 void ldv_initialyze_vb2_ops_4(void);
58 void ldv_initialyze_vb2_ops_5(void);
59 void ldv_initialyze_v4l2_ioctl_ops_3(void);
60 /*
61 * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
62 * multifunction chip. Currently works with the Omnivision OV7670
63 * sensor.
64 *
65 * The data sheet for this device can be found at:
66 * http://www.marvell.com/products/pc_connectivity/88alp01/
67 *
68 * Copyright 2006-11 One Laptop Per Child Association, Inc.
69 * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
70 *
71 * Written by Jonathan Corbet, corbet@lwn.net.
72 *
73 * v4l2_device/v4l2_subdev conversion by:
74 * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
75 *
76 * This file may be distributed under the terms of the GNU General
77 * Public License, version 2.
78 */
79 #include <linux/kernel.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
82 #include <linux/pci.h>
83 #include <linux/i2c.h>
84 #include <linux/interrupt.h>
85 #include <linux/spinlock.h>
86 #include <linux/slab.h>
87 #include <linux/videodev2.h>
88 #include <media/v4l2-device.h>
89 #include <linux/device.h>
90 #include <linux/wait.h>
91 #include <linux/delay.h>
92 #include <linux/io.h>
93
94 #include "mcam-core.h"
95
96 #define CAFE_VERSION 0x000002
97
98
99 /*
100 * Parameters.
101 */
102 MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
103 MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
104 MODULE_LICENSE("GPL");
105 MODULE_SUPPORTED_DEVICE("Video");
106
107
108
109
110 struct cafe_camera {
111 int registered; /* Fully initialized? */
112 struct mcam_camera mcam;
113 struct pci_dev *pdev;
114 wait_queue_head_t smbus_wait; /* Waiting on i2c events */
115 };
116
117 /*
118 * Most of the camera controller registers are defined in mcam-core.h,
119 * but the Cafe platform has some additional registers of its own;
120 * they are described here.
121 */
122
123 /*
124 * "General purpose register" has a couple of GPIOs used for sensor
125 * power and reset on OLPC XO 1.0 systems.
126 */
127 #define REG_GPR 0xb4
128 #define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
129 #define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
130 #define GPR_C1 0x00000002 /* Control 1 value */
131 /*
132 * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
133 * it is active low.
134 */
135 #define GPR_C0 0x00000001 /* Control 0 value */
136
137 /*
138 * These registers control the SMBUS module for communicating
139 * with the sensor.
140 */
141 #define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
142 #define TWSIC0_EN 0x00000001 /* TWSI enable */
143 #define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
144 #define TWSIC0_SID 0x000003fc /* Slave ID */
145 /*
146 * Subtle trickery: the slave ID field starts with bit 2. But the
147 * Linux i2c stack wants to treat the bottommost bit as a separate
148 * read/write bit, which is why slave ID's are usually presented
149 * >>1. For consistency with that behavior, we shift over three
150 * bits instead of two.
151 */
152 #define TWSIC0_SID_SHIFT 3
153 #define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
154 #define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
155 #define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
156
157 #define REG_TWSIC1 0xbc /* TWSI control 1 */
158 #define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
159 #define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
160 #define TWSIC1_ADDR_SHIFT 16
161 #define TWSIC1_READ 0x01000000 /* Set for read op */
162 #define TWSIC1_WSTAT 0x02000000 /* Write status */
163 #define TWSIC1_RVALID 0x04000000 /* Read data valid */
164 #define TWSIC1_ERROR 0x08000000 /* Something screwed up */
165
166 /*
167 * Here's the weird global control registers
168 */
169 #define REG_GL_CSR 0x3004 /* Control/status register */
170 #define GCSR_SRS 0x00000001 /* SW Reset set */
171 #define GCSR_SRC 0x00000002 /* SW Reset clear */
172 #define GCSR_MRS 0x00000004 /* Master reset set */
173 #define GCSR_MRC 0x00000008 /* HW Reset clear */
174 #define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
175 #define REG_GL_IMASK 0x300c /* Interrupt mask register */
176 #define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
177
178 #define REG_GL_FCR 0x3038 /* GPIO functional control register */
179 #define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
180 #define REG_GL_GPIOR 0x315c /* GPIO register */
181 #define GGPIO_OUT 0x80000 /* GPIO output */
182 #define GGPIO_VAL 0x00008 /* Output pin value */
183
184 #define REG_LEN (REG_GL_IMASK + 4)
185
186
187 /*
188 * Debugging and related.
189 */
190 #define cam_err(cam, fmt, arg...) \
191 dev_err(&(cam)->pdev->dev, fmt, ##arg);
192 #define cam_warn(cam, fmt, arg...) \
193 dev_warn(&(cam)->pdev->dev, fmt, ##arg);
194
195 /* -------------------------------------------------------------------- */
196 /*
197 * The I2C/SMBUS interface to the camera itself starts here. The
198 * controller handles SMBUS itself, presenting a relatively simple register
199 * interface; all we have to do is to tell it where to route the data.
200 */
201 #define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
202
203 static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
204 {
205 struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
206 return container_of(m, struct cafe_camera, mcam);
207 }
208
209
210 static int cafe_smbus_write_done(struct mcam_camera *mcam)
211 {
212 unsigned long flags;
213 int c1;
214
215 /*
216 * We must delay after the interrupt, or the controller gets confused
217 * and never does give us good status. Fortunately, we don't do this
218 * often.
219 */
220 udelay(20);
221 spin_lock_irqsave(&mcam->dev_lock, flags);
222 c1 = mcam_reg_read(mcam, REG_TWSIC1);
223 spin_unlock_irqrestore(&mcam->dev_lock, flags);
224 return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
225 }
226
227 static int cafe_smbus_write_data(struct cafe_camera *cam,
228 u16 addr, u8 command, u8 value)
229 {
230 unsigned int rval;
231 unsigned long flags;
232 struct mcam_camera *mcam = &cam->mcam;
233
234 spin_lock_irqsave(&mcam->dev_lock, flags);
235 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
236 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
237 /*
238 * Marvell sez set clkdiv to all 1's for now.
239 */
240 rval |= TWSIC0_CLKDIV;
241 mcam_reg_write(mcam, REG_TWSIC0, rval);
242 (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
243 rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
244 mcam_reg_write(mcam, REG_TWSIC1, rval);
245 spin_unlock_irqrestore(&mcam->dev_lock, flags);
246
247 /* Unfortunately, reading TWSIC1 too soon after sending a command
248 * causes the device to die.
249 * Use a busy-wait because we often send a large quantity of small
250 * commands at-once; using msleep() would cause a lot of context
251 * switches which take longer than 2ms, resulting in a noticeable
252 * boot-time and capture-start delays.
253 */
254 mdelay(2);
255
256 /*
257 * Another sad fact is that sometimes, commands silently complete but
258 * cafe_smbus_write_done() never becomes aware of this.
259 * This happens at random and appears to possible occur with any
260 * command.
261 * We don't understand why this is. We work around this issue
262 * with the timeout in the wait below, assuming that all commands
263 * complete within the timeout.
264 */
265 wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
266 CAFE_SMBUS_TIMEOUT);
267
268 spin_lock_irqsave(&mcam->dev_lock, flags);
269 rval = mcam_reg_read(mcam, REG_TWSIC1);
270 spin_unlock_irqrestore(&mcam->dev_lock, flags);
271
272 if (rval & TWSIC1_WSTAT) {
273 cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
274 command, value);
275 return -EIO;
276 }
277 if (rval & TWSIC1_ERROR) {
278 cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
279 command, value);
280 return -EIO;
281 }
282 return 0;
283 }
284
285
286
287 static int cafe_smbus_read_done(struct mcam_camera *mcam)
288 {
289 unsigned long flags;
290 int c1;
291
292 /*
293 * We must delay after the interrupt, or the controller gets confused
294 * and never does give us good status. Fortunately, we don't do this
295 * often.
296 */
297 udelay(20);
298 spin_lock_irqsave(&mcam->dev_lock, flags);
299 c1 = mcam_reg_read(mcam, REG_TWSIC1);
300 spin_unlock_irqrestore(&mcam->dev_lock, flags);
301 return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
302 }
303
304
305
306 static int cafe_smbus_read_data(struct cafe_camera *cam,
307 u16 addr, u8 command, u8 *value)
308 {
309 unsigned int rval;
310 unsigned long flags;
311 struct mcam_camera *mcam = &cam->mcam;
312
313 spin_lock_irqsave(&mcam->dev_lock, flags);
314 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
315 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
316 /*
317 * Marvel sez set clkdiv to all 1's for now.
318 */
319 rval |= TWSIC0_CLKDIV;
320 mcam_reg_write(mcam, REG_TWSIC0, rval);
321 (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
322 rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
323 mcam_reg_write(mcam, REG_TWSIC1, rval);
324 spin_unlock_irqrestore(&mcam->dev_lock, flags);
325
326 wait_event_timeout(cam->smbus_wait,
327 cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
328 spin_lock_irqsave(&mcam->dev_lock, flags);
329 rval = mcam_reg_read(mcam, REG_TWSIC1);
330 spin_unlock_irqrestore(&mcam->dev_lock, flags);
331
332 if (rval & TWSIC1_ERROR) {
333 cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
334 return -EIO;
335 }
336 if (!(rval & TWSIC1_RVALID)) {
337 cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
338 command);
339 return -EIO;
340 }
341 *value = rval & 0xff;
342 return 0;
343 }
344
345 /*
346 * Perform a transfer over SMBUS. This thing is called under
347 * the i2c bus lock, so we shouldn't race with ourselves...
348 */
349 static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
350 unsigned short flags, char rw, u8 command,
351 int size, union i2c_smbus_data *data)
352 {
353 struct cafe_camera *cam = i2c_get_adapdata(adapter);
354 int ret = -EINVAL;
355
356 /*
357 * This interface would appear to only do byte data ops. OK
358 * it can do word too, but the cam chip has no use for that.
359 */
360 if (size != I2C_SMBUS_BYTE_DATA) {
361 cam_err(cam, "funky xfer size %d\n", size);
362 return -EINVAL;
363 }
364
365 if (rw == I2C_SMBUS_WRITE)
366 ret = cafe_smbus_write_data(cam, addr, command, data->byte);
367 else if (rw == I2C_SMBUS_READ)
368 ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
369 return ret;
370 }
371
372
373 static void cafe_smbus_enable_irq(struct cafe_camera *cam)
374 {
375 unsigned long flags;
376
377 spin_lock_irqsave(&cam->mcam.dev_lock, flags);
378 mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
379 spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
380 }
381
382 static u32 cafe_smbus_func(struct i2c_adapter *adapter)
383 {
384 return I2C_FUNC_SMBUS_READ_BYTE_DATA |
385 I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
386 }
387
388 static struct i2c_algorithm cafe_smbus_algo = {
389 .smbus_xfer = cafe_smbus_xfer,
390 .functionality = cafe_smbus_func
391 };
392
393 static int cafe_smbus_setup(struct cafe_camera *cam)
394 {
395 struct i2c_adapter *adap;
396 int ret;
397
398 adap = kzalloc(sizeof(*adap), GFP_KERNEL);
399 if (adap == NULL)
400 return -ENOMEM;
401 cam->mcam.i2c_adapter = adap;
402 cafe_smbus_enable_irq(cam);
403 adap->owner = THIS_MODULE;
404 adap->algo = &cafe_smbus_algo;
405 strcpy(adap->name, "cafe_ccic");
406 adap->dev.parent = &cam->pdev->dev;
407 i2c_set_adapdata(adap, cam);
408 ret = i2c_add_adapter(adap);
409 if (ret)
410 printk(KERN_ERR "Unable to register cafe i2c adapter\n");
411 return ret;
412 }
413
414 static void cafe_smbus_shutdown(struct cafe_camera *cam)
415 {
416 i2c_del_adapter(cam->mcam.i2c_adapter);
417 kfree(cam->mcam.i2c_adapter);
418 }
419
420
421 /*
422 * Controller-level stuff
423 */
424
425 static void cafe_ctlr_init(struct mcam_camera *mcam)
426 {
427 unsigned long flags;
428
429 spin_lock_irqsave(&mcam->dev_lock, flags);
430 /*
431 * Added magic to bring up the hardware on the B-Test board
432 */
433 mcam_reg_write(mcam, 0x3038, 0x8);
434 mcam_reg_write(mcam, 0x315c, 0x80008);
435 /*
436 * Go through the dance needed to wake the device up.
437 * Note that these registers are global and shared
438 * with the NAND and SD devices. Interaction between the
439 * three still needs to be examined.
440 */
441 mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
442 mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
443 mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
444 /*
445 * Here we must wait a bit for the controller to come around.
446 */
447 spin_unlock_irqrestore(&mcam->dev_lock, flags);
448 msleep(5);
449 spin_lock_irqsave(&mcam->dev_lock, flags);
450
451 mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
452 mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
453 /*
454 * Mask all interrupts.
455 */
456 mcam_reg_write(mcam, REG_IRQMASK, 0);
457 spin_unlock_irqrestore(&mcam->dev_lock, flags);
458 }
459
460
461 static int cafe_ctlr_power_up(struct mcam_camera *mcam)
462 {
463 /*
464 * Part one of the sensor dance: turn the global
465 * GPIO signal on.
466 */
467 mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
468 mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
469 /*
470 * Put the sensor into operational mode (assumes OLPC-style
471 * wiring). Control 0 is reset - set to 1 to operate.
472 * Control 1 is power down, set to 0 to operate.
473 */
474 mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
475 mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
476
477 return 0;
478 }
479
480 static void cafe_ctlr_power_down(struct mcam_camera *mcam)
481 {
482 mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
483 mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
484 mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
485 }
486
487
488
489 /*
490 * The platform interrupt handler.
491 */
492 static irqreturn_t cafe_irq(int irq, void *data)
493 {
494 struct cafe_camera *cam = data;
495 struct mcam_camera *mcam = &cam->mcam;
496 unsigned int irqs, handled;
497
498 spin_lock(&mcam->dev_lock);
499 irqs = mcam_reg_read(mcam, REG_IRQSTAT);
500 handled = cam->registered && mccic_irq(mcam, irqs);
501 if (irqs & TWSIIRQS) {
502 mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
503 wake_up(&cam->smbus_wait);
504 handled = 1;
505 }
506 spin_unlock(&mcam->dev_lock);
507 return IRQ_RETVAL(handled);
508 }
509
510
511 /* -------------------------------------------------------------------------- */
512 /*
513 * PCI interface stuff.
514 */
515
516 static int cafe_pci_probe(struct pci_dev *pdev,
517 const struct pci_device_id *id)
518 {
519 int ret;
520 struct cafe_camera *cam;
521 struct mcam_camera *mcam;
522
523 /*
524 * Start putting together one of our big camera structures.
525 */
526 ret = -ENOMEM;
527 cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
528 if (cam == NULL)
529 goto out;
530 cam->pdev = pdev;
531 mcam = &cam->mcam;
532 mcam->chip_id = MCAM_CAFE;
533 spin_lock_init(&mcam->dev_lock);
534 init_waitqueue_head(&cam->smbus_wait);
535 mcam->plat_power_up = cafe_ctlr_power_up;
536 mcam->plat_power_down = cafe_ctlr_power_down;
537 mcam->dev = &pdev->dev;
538 /*
539 * Set the clock speed for the XO 1; I don't believe this
540 * driver has ever run anywhere else.
541 */
542 mcam->clock_speed = 45;
543 mcam->use_smbus = 1;
544 /*
545 * Vmalloc mode for buffers is traditional with this driver.
546 * We *might* be able to run DMA_contig, especially on a system
547 * with CMA in it.
548 */
549 mcam->buffer_mode = B_vmalloc;
550 /*
551 * Get set up on the PCI bus.
552 */
553 ret = pci_enable_device(pdev);
554 if (ret)
555 goto out_free;
556 pci_set_master(pdev);
557
558 ret = -EIO;
559 mcam->regs = pci_iomap(pdev, 0, 0);
560 if (!mcam->regs) {
561 printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
562 goto out_disable;
563 }
564 mcam->regs_size = pci_resource_len(pdev, 0);
565 ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
566 if (ret)
567 goto out_iounmap;
568
569 /*
570 * Initialize the controller and leave it powered up. It will
571 * stay that way until the sensor driver shows up.
572 */
573 cafe_ctlr_init(mcam);
574 cafe_ctlr_power_up(mcam);
575 /*
576 * Set up I2C/SMBUS communications. We have to drop the mutex here
577 * because the sensor could attach in this call chain, leading to
578 * unsightly deadlocks.
579 */
580 ret = cafe_smbus_setup(cam);
581 if (ret)
582 goto out_pdown;
583
584 ret = mccic_register(mcam);
585 if (ret == 0) {
586 cam->registered = 1;
587 return 0;
588 }
589
590 cafe_smbus_shutdown(cam);
591 out_pdown:
592 cafe_ctlr_power_down(mcam);
593 free_irq(pdev->irq, cam);
594 out_iounmap:
595 pci_iounmap(pdev, mcam->regs);
596 out_disable:
597 pci_disable_device(pdev);
598 out_free:
599 kfree(cam);
600 out:
601 return ret;
602 }
603
604
605 /*
606 * Shut down an initialized device
607 */
608 static void cafe_shutdown(struct cafe_camera *cam)
609 {
610 mccic_shutdown(&cam->mcam);
611 cafe_smbus_shutdown(cam);
612 free_irq(cam->pdev->irq, cam);
613 pci_iounmap(cam->pdev, cam->mcam.regs);
614 }
615
616
617 static void cafe_pci_remove(struct pci_dev *pdev)
618 {
619 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
620 struct cafe_camera *cam = to_cam(v4l2_dev);
621
622 if (cam == NULL) {
623 printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
624 return;
625 }
626 cafe_shutdown(cam);
627 kfree(cam);
628 }
629
630
631 #ifdef CONFIG_PM
632 /*
633 * Basic power management.
634 */
635 static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
636 {
637 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
638 struct cafe_camera *cam = to_cam(v4l2_dev);
639 int ret;
640
641 ret = pci_save_state(pdev);
642 if (ret)
643 return ret;
644 mccic_suspend(&cam->mcam);
645 pci_disable_device(pdev);
646 return 0;
647 }
648
649
650 static int cafe_pci_resume(struct pci_dev *pdev)
651 {
652 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
653 struct cafe_camera *cam = to_cam(v4l2_dev);
654 int ret = 0;
655
656 pci_restore_state(pdev);
657 ret = pci_enable_device(pdev);
658
659 if (ret) {
660 cam_warn(cam, "Unable to re-enable device on resume!\n");
661 return ret;
662 }
663 cafe_ctlr_init(&cam->mcam);
664 return mccic_resume(&cam->mcam);
665 }
666
667 #endif /* CONFIG_PM */
668
669 static struct pci_device_id cafe_ids[] = {
670 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
671 PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
672 { 0, }
673 };
674
675 MODULE_DEVICE_TABLE(pci, cafe_ids);
676
677 static struct pci_driver cafe_pci_driver = {
678 .name = "cafe1000-ccic",
679 .id_table = cafe_ids,
680 .probe = cafe_pci_probe,
681 .remove = cafe_pci_remove,
682 #ifdef CONFIG_PM
683 .suspend = cafe_pci_suspend,
684 .resume = cafe_pci_resume,
685 #endif
686 };
687
688
689
690
691 static int __init cafe_init(void)
692 {
693 int ret;
694
695 printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
696 CAFE_VERSION);
697 ret = pci_register_driver(&cafe_pci_driver);
698 if (ret) {
699 printk(KERN_ERR "Unable to register cafe_ccic driver\n");
700 goto out;
701 }
702 ret = 0;
703
704 out:
705 return ret;
706 }
707
708
709 static void __exit cafe_exit(void)
710 {
711 pci_unregister_driver(&cafe_pci_driver);
712 }
713
714 module_init(cafe_init);
715 module_exit(cafe_exit);
716
717 int ldv_retval_0;
718 void ldv_initialize(void);
719 void ldv_check_final_state(void);
720 int ldv_retval_2;
721
722 int __VERIFIER_nondet_int(void);
723
724 void ldv_initialyze_i2c_algorithm_7(void){
725 cafe_smbus_algo_group0 = ldv_undef_ptr();
726 }
727
728 /* DEG-ENVIRONMENT-BEGIN */
729 extern void ldv_main_exported_4(void);
730 extern void ldv_main_exported_1(void);
731 extern void ldv_main_exported_3(void);
732 extern void ldv_main_exported_2(void);
733 extern void ldv_main_exported_5(void);
734 extern void ldv_main_exported_6(void);
735 extern void ldv_main_exported_0(void);
736 extern void ldv_main_exported_7(void);
737
738 //********************* LDV MAIN *********************
739 //main
740 void entry_point(void){
741 ldv_initialize();
742 //args for callbacks
743 struct pci_device_id const *ldvarg1;
744 pm_message_t ldvarg0;
745 char ldvarg39;
746 union i2c_smbus_data *ldvarg41;
747 u8 ldvarg43;
748 u16 ldvarg42;
749 short unsigned int ldvarg40;
750 int ldvarg38;
751 //initialization of machine states
752 ldv_state_variable_6=0;
753 ldv_state_variable_3=0;
754 ldv_state_variable_7=0;
755 ldv_state_variable_2=0;
756 ldv_state_variable_1=0;
757 ldv_state_variable_4=0;
758 ref_cnt=0;
759 ldv_state_variable_0=1;
760 ldv_state_variable_5=0;
761 while(1){
762 switch(__VERIFIER_nondet_int()){
763 case 0:{
764 /*DEG-struct: handlers from structure cafe_pci_driver*/
765 /*DEG-CHECK: checking registration of cafe_pci_driver structure*/
766 if(ldv_state_variable_6 != 0){
767 switch(__VERIFIER_nondet_int()){
768 case 0:{
769 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
770 if(ldv_state_variable_6 == 1){
771 /*DEG-CALL: handler probe from cafe_pci_driver*/
772 ldv_retval_0=(& cafe_pci_probe)(cafe_pci_driver_group1,ldvarg1);
773 if(ldv_retval_0==0){
774 ldv_state_variable_6 = 2;
775 ref_cnt++;
776 }
777 }
778 }
779 break;
780 case 1:{
781 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
782 if(ldv_state_variable_6 == 1){
783 /*DEG-CALL: handler suspend from cafe_pci_driver*/
784 (& cafe_pci_suspend)(cafe_pci_driver_group1,ldvarg0);
785 /*DEG-postcall: default*/
786 ldv_state_variable_6 = 1;
787 }
788 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
789 if(ldv_state_variable_6 == 2){
790 /*DEG-CALL: handler suspend from cafe_pci_driver*/
791 (& cafe_pci_suspend)(cafe_pci_driver_group1,ldvarg0);
792 /*DEG-postcall: default*/
793 ldv_state_variable_6 = 2;
794 }
795 }
796 break;
797 case 2:{
798 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
799 if(ldv_state_variable_6 == 2){
800 /*DEG-CALL: handler remove from cafe_pci_driver*/
801 (& cafe_pci_remove)(cafe_pci_driver_group1);
802 ldv_state_variable_6 = 1;
803 ref_cnt--;
804 }
805 }
806 break;
807 case 3:{
808 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
809 if(ldv_state_variable_6 == 1){
810 /*DEG-CALL: handler resume from cafe_pci_driver*/
811 (& cafe_pci_resume)(cafe_pci_driver_group1);
812 /*DEG-postcall: default*/
813 ldv_state_variable_6 = 1;
814 }
815 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
816 if(ldv_state_variable_6 == 2){
817 /*DEG-CALL: handler resume from cafe_pci_driver*/
818 (& cafe_pci_resume)(cafe_pci_driver_group1);
819 /*DEG-postcall: default*/
820 ldv_state_variable_6 = 2;
821 }
822 }
823 break;
824 default: ldv_assume(0);
825 }
826 }
827 }
828 break;
829 case 1:{
830 /*DEG-struct: handlers from structure mcam_v4l_ioctl_ops*/
831 /*DEG-CHECK: checking registration of mcam_v4l_ioctl_ops structure*/
832 if(ldv_state_variable_3 != 0){
833 ldv_main_exported_3();
834 }
835 }
836 break;
837 case 2:{
838 /*DEG-struct: handlers from structure cafe_smbus_algo*/
839 /*DEG-CHECK: checking registration of cafe_smbus_algo structure*/
840 if(ldv_state_variable_7 != 0){
841 switch(__VERIFIER_nondet_int()){
842 case 0:{
843 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
844 if(ldv_state_variable_7 == 1){
845 /*DEG-CALL: handler smbus_xfer from cafe_smbus_algo*/
846 (& cafe_smbus_xfer)(cafe_smbus_algo_group0,ldvarg42,ldvarg40,ldvarg39,ldvarg43,ldvarg38,ldvarg41);
847 /*DEG-postcall: default*/
848 ldv_state_variable_7 = 1;
849 }
850 }
851 break;
852 case 1:{
853 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
854 if(ldv_state_variable_7 == 1){
855 /*DEG-CALL: handler functionality from cafe_smbus_algo*/
856 (& cafe_smbus_func)(cafe_smbus_algo_group0);
857 /*DEG-postcall: default*/
858 ldv_state_variable_7 = 1;
859 }
860 }
861 break;
862 default: ldv_assume(0);
863 }
864 }
865 }
866 break;
867 case 3:{
868 /*DEG-struct: handlers from structure mcam_v4l_fops*/
869 /*DEG-CHECK: checking registration of mcam_v4l_fops structure*/
870 if(ldv_state_variable_2 != 0){
871 ldv_main_exported_2();
872 }
873 }
874 break;
875 case 4:{
876 /*DEG-struct: handlers from structure mcam_v4l_template*/
877 /*DEG-CHECK: checking registration of mcam_v4l_template structure*/
878 if(ldv_state_variable_1 != 0){
879 ldv_main_exported_1();
880 }
881 }
882 break;
883 case 5:{
884 /*DEG-struct: handlers from structure mcam_vb2_sg_ops*/
885 /*DEG-CHECK: checking registration of mcam_vb2_sg_ops structure*/
886 if(ldv_state_variable_4 != 0){
887 ldv_main_exported_4();
888 }
889 }
890 break;
891 case 6:{
892 /*DEG-struct: handlers from structure module*/
893 /*DEG-CHECK: checking registration of module structure*/
894 if(ldv_state_variable_0 != 0){
895 switch(__VERIFIER_nondet_int()){
896 case 0:{
897 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
898 if(ldv_state_variable_0 == 2 && ref_cnt==0){
899 /*DEG-CALL: handler module_exit from module*/
900 cafe_exit();
901 ldv_state_variable_0 = 3;
902 goto ldv_final;
903 }
904 }
905 break;
906 case 1:{
907 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
908 if(ldv_state_variable_0 == 1){
909 /*DEG-CALL: handler module_init from module*/
910 ldv_retval_2=cafe_init();
911 if(ldv_retval_2!=0){
912 ldv_state_variable_0 = 3;
913 goto ldv_final;
914 }
915 /*DEG-postcall: if success*/
916 if(ldv_retval_2==0){
917 ldv_state_variable_0 = 2;
918 ldv_state_variable_5 = 1;
919 ldv_initialyze_vb2_ops_5();
920 ldv_state_variable_2 = 1;
921 ldv_initialyze_v4l2_file_operations_2();
922 ldv_state_variable_7 = 1;
923 ldv_initialyze_i2c_algorithm_7();
924 ldv_state_variable_3 = 1;
925 ldv_initialyze_v4l2_ioctl_ops_3();
926 ldv_state_variable_4 = 1;
927 ldv_initialyze_vb2_ops_4();
928 ldv_state_variable_6 = 1;
929 cafe_pci_driver_group1 = ldv_successful_alloc(sizeof(struct pci_dev));
930 }
931 }
932 }
933 break;
934 default: ldv_assume(0);
935 }
936 }
937 }
938 break;
939 case 7:{
940 /*DEG-struct: handlers from structure mcam_vb2_ops*/
941 /*DEG-CHECK: checking registration of mcam_vb2_ops structure*/
942 if(ldv_state_variable_5 != 0){
943 ldv_main_exported_5();
944 }
945 }
946 break;
947 default: ldv_assume(0);
948 }
949 }
950 ldv_final:
951 ldv_check_final_state();
952 return;
953 }
954 /* DEG-ENVIRONMENT-END */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 /* TODO: this include should be avoided here while bindings below should
9 * call just model functions from LDV_COMMON_MODEL rather then, say,
10 * ldv_undef_int_negative(). */
11 #include <verifier/rcv.h>
12 // Provide model function prototypes before their usage.
13 //for dev_get_drvdata, dev_set_drvdata
14 //#include <linux/device.h>
15
16 void* ldv_alloc(size_t size);
17 void* ldv_zero_alloc(size_t size);
18 void ldv_free(void);
19 void ldv_condition_free(void);
20 void ldv_save_gadget_release(void (*func)(struct device *_dev));
21 void *ldv_dev_get_drvdata(const struct device *dev);
22 int ldv_dev_set_drvdata(struct device *dev, void *data);
23 void* ldv_alloc_without_counter(size_t size);
24 void* ldv_zalloc_without_counter(size_t size);
25 void* ldv_nonzero_alloc(size_t size);
26 void* __VERIFIER_alloc(size_t size);
27 void* ldv_zalloc(size_t size);
28 void ldv_assume_and_increase(void* res) ;
29
30 #include <linux/slab.h>
31 #include <verifier/rcv.h>
32 #include <linux/gfp.h>
33 extern struct v4l2_streamparm *mcam_v4l_ioctl_ops_group2;
34 extern int LDV_IN_INTERRUPT = 1;
35 extern struct i2c_adapter *cafe_smbus_algo_group0;
36 extern struct file *mcam_v4l_fops_group0;
37 extern struct v4l2_format *mcam_v4l_ioctl_ops_group1;
38 extern int ldv_state_variable_6;
39 extern int ldv_state_variable_0;
40 extern struct vb2_queue *mcam_vb2_ops_group0;
41 extern struct vb2_queue *mcam_vb2_sg_ops_group1;
42 extern int ldv_state_variable_5;
43 extern int ldv_state_variable_3;
44 extern struct vb2_buffer *mcam_vb2_sg_ops_group0;
45 extern struct pci_dev *cafe_pci_driver_group1;
46 extern int ldv_state_variable_2;
47 extern int ref_cnt;
48 extern int ldv_state_variable_1;
49 extern int ldv_state_variable_7;
50 extern int ldv_state_variable_4;
51 extern struct file *mcam_v4l_ioctl_ops_group3;
52 extern struct v4l2_buffer *mcam_v4l_ioctl_ops_group0;
53 extern int __VERIFIER_nondet_int(void);
54 extern void ldv_initialyze_v4l2_file_operations_2(void);
55 extern void ldv_initialyze_i2c_algorithm_7(void);
56 extern void ldv_initialyze_vb2_ops_4(void);
57 extern void ldv_initialyze_vb2_ops_5(void);
58 extern void ldv_initialyze_v4l2_ioctl_ops_3(void);
59 /*
60 * The Marvell camera core. This device appears in a number of settings,
61 * so it needs platform-specific support outside of the core.
62 *
63 * Copyright 2011 Jonathan Corbet corbet@lwn.net
64 */
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/fs.h>
68 #include <linux/mm.h>
69 #include <linux/i2c.h>
70 #include <linux/interrupt.h>
71 #include <linux/spinlock.h>
72 #include <linux/slab.h>
73 #include <linux/device.h>
74 #include <linux/wait.h>
75 #include <linux/list.h>
76 #include <linux/dma-mapping.h>
77 #include <linux/delay.h>
78 #include <linux/vmalloc.h>
79 #include <linux/io.h>
80 #include <linux/clk.h>
81 #include <linux/videodev2.h>
82 #include <media/v4l2-device.h>
83 #include <media/v4l2-ioctl.h>
84 #include <media/v4l2-ctrls.h>
85 #include <media/ov7670.h>
86 #include <media/videobuf2-vmalloc.h>
87 #include <media/videobuf2-dma-contig.h>
88 #include <media/videobuf2-dma-sg.h>
89
90 #include "mcam-core.h"
91
92 #ifdef MCAM_MODE_VMALLOC
93 /*
94 * Internal DMA buffer management. Since the controller cannot do S/G I/O,
95 * we must have physically contiguous buffers to bring frames into.
96 * These parameters control how many buffers we use, whether we
97 * allocate them at load time (better chance of success, but nails down
98 * memory) or when somebody tries to use the camera (riskier), and,
99 * for load-time allocation, how big they should be.
100 *
101 * The controller can cycle through three buffers. We could use
102 * more by flipping pointers around, but it probably makes little
103 * sense.
104 */
105
106 static bool alloc_bufs_at_read;
107 module_param(alloc_bufs_at_read, bool, 0444);
108 MODULE_PARM_DESC(alloc_bufs_at_read,
109 "Non-zero value causes DMA buffers to be allocated when the "
110 "video capture device is read, rather than at module load "
111 "time. This saves memory, but decreases the chances of "
112 "successfully getting those buffers. This parameter is "
113 "only used in the vmalloc buffer mode");
114
115 static int n_dma_bufs = 3;
116 module_param(n_dma_bufs, uint, 0644);
117 MODULE_PARM_DESC(n_dma_bufs,
118 "The number of DMA buffers to allocate. Can be either two "
119 "(saves memory, makes timing tighter) or three.");
120
121 static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
122 module_param(dma_buf_size, uint, 0444);
123 MODULE_PARM_DESC(dma_buf_size,
124 "The size of the allocated DMA buffers. If actual operating "
125 "parameters require larger buffers, an attempt to reallocate "
126 "will be made.");
127 #else /* MCAM_MODE_VMALLOC */
128 static const bool alloc_bufs_at_read = 0;
129 static const int n_dma_bufs = 3; /* Used by S/G_PARM */
130 #endif /* MCAM_MODE_VMALLOC */
131
132 static bool flip;
133 module_param(flip, bool, 0444);
134 MODULE_PARM_DESC(flip,
135 "If set, the sensor will be instructed to flip the image "
136 "vertically.");
137
138 static int buffer_mode = -1;
139 module_param(buffer_mode, int, 0444);
140 MODULE_PARM_DESC(buffer_mode,
141 "Set the buffer mode to be used; default is to go with what "
142 "the platform driver asks for. Set to 0 for vmalloc, 1 for "
143 "DMA contiguous.");
144
145 /*
146 * Status flags. Always manipulated with bit operations.
147 */
148 #define CF_BUF0_VALID 0 /* Buffers valid - first three */
149 #define CF_BUF1_VALID 1
150 #define CF_BUF2_VALID 2
151 #define CF_DMA_ACTIVE 3 /* A frame is incoming */
152 #define CF_CONFIG_NEEDED 4 /* Must configure hardware */
153 #define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
154 #define CF_SG_RESTART 6 /* SG restart needed */
155 #define CF_FRAME_SOF0 7 /* Frame 0 started */
156 #define CF_FRAME_SOF1 8
157 #define CF_FRAME_SOF2 9
158
159 #define sensor_call(cam, o, f, args...) \
160 v4l2_subdev_call(cam->sensor, o, f, ##args)
161
162 static struct mcam_format_struct {
163 __u8 *desc;
164 __u32 pixelformat;
165 int bpp; /* Bytes per pixel */
166 bool planar;
167 enum v4l2_mbus_pixelcode mbus_code;
168 } mcam_formats[] = {
169 {
170 .desc = "YUYV 4:2:2",
171 .pixelformat = V4L2_PIX_FMT_YUYV,
172 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
173 .bpp = 2,
174 .planar = false,
175 },
176 {
177 .desc = "UYVY 4:2:2",
178 .pixelformat = V4L2_PIX_FMT_UYVY,
179 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
180 .bpp = 2,
181 .planar = false,
182 },
183 {
184 .desc = "YUV 4:2:2 PLANAR",
185 .pixelformat = V4L2_PIX_FMT_YUV422P,
186 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
187 .bpp = 2,
188 .planar = true,
189 },
190 {
191 .desc = "YUV 4:2:0 PLANAR",
192 .pixelformat = V4L2_PIX_FMT_YUV420,
193 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
194 .bpp = 2,
195 .planar = true,
196 },
197 {
198 .desc = "YVU 4:2:0 PLANAR",
199 .pixelformat = V4L2_PIX_FMT_YVU420,
200 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
201 .bpp = 2,
202 .planar = true,
203 },
204 {
205 .desc = "RGB 444",
206 .pixelformat = V4L2_PIX_FMT_RGB444,
207 .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
208 .bpp = 2,
209 .planar = false,
210 },
211 {
212 .desc = "RGB 565",
213 .pixelformat = V4L2_PIX_FMT_RGB565,
214 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
215 .bpp = 2,
216 .planar = false,
217 },
218 {
219 .desc = "Raw RGB Bayer",
220 .pixelformat = V4L2_PIX_FMT_SBGGR8,
221 .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
222 .bpp = 1,
223 .planar = false,
224 },
225 };
226 #define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
227
228 static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
229 {
230 unsigned i;
231
232 for (i = 0; i < N_MCAM_FMTS; i++)
233 if (mcam_formats[i].pixelformat == pixelformat)
234 return mcam_formats + i;
235 /* Not found? Then return the first format. */
236 return mcam_formats;
237 }
238
239 /*
240 * The default format we use until somebody says otherwise.
241 */
242 static const struct v4l2_pix_format mcam_def_pix_format = {
243 .width = VGA_WIDTH,
244 .height = VGA_HEIGHT,
245 .pixelformat = V4L2_PIX_FMT_YUYV,
246 .field = V4L2_FIELD_NONE,
247 .bytesperline = VGA_WIDTH*2,
248 .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
249 };
250
251 static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
252 V4L2_MBUS_FMT_YUYV8_2X8;
253
254
255 /*
256 * The two-word DMA descriptor format used by the Armada 610 and like. There
257 * Is a three-word format as well (set C1_DESC_3WORD) where the third
258 * word is a pointer to the next descriptor, but we don't use it. Two-word
259 * descriptors have to be contiguous in memory.
260 */
261 struct mcam_dma_desc {
262 u32 dma_addr;
263 u32 segment_len;
264 };
265
266 struct yuv_pointer_t {
267 dma_addr_t y;
268 dma_addr_t u;
269 dma_addr_t v;
270 };
271
272 /*
273 * Our buffer type for working with videobuf2. Note that the vb2
274 * developers have decreed that struct vb2_buffer must be at the
275 * beginning of this structure.
276 */
277 struct mcam_vb_buffer {
278 struct vb2_buffer vb_buf;
279 struct list_head queue;
280 struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
281 dma_addr_t dma_desc_pa; /* Descriptor physical address */
282 int dma_desc_nent; /* Number of mapped descriptors */
283 struct yuv_pointer_t yuv_p;
284 };
285
286 static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
287 {
288 return container_of(vb, struct mcam_vb_buffer, vb_buf);
289 }
290
291 /*
292 * Hand a completed buffer back to user space.
293 */
294 static void mcam_buffer_done(struct mcam_camera *cam, int frame,
295 struct vb2_buffer *vbuf)
296 {
297 vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
298 vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
299 vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
300 vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
301 }
302
303
304
305 /*
306 * Debugging and related.
307 */
308 #define cam_err(cam, fmt, arg...) \
309 dev_err((cam)->dev, fmt, ##arg);
310 #define cam_warn(cam, fmt, arg...) \
311 dev_warn((cam)->dev, fmt, ##arg);
312 #define cam_dbg(cam, fmt, arg...) \
313 dev_dbg((cam)->dev, fmt, ##arg);
314
315
316 /*
317 * Flag manipulation helpers
318 */
319 static void mcam_reset_buffers(struct mcam_camera *cam)
320 {
321 int i;
322
323 cam->next_buf = -1;
324 for (i = 0; i < cam->nbufs; i++) {
325 clear_bit(i, &cam->flags);
326 clear_bit(CF_FRAME_SOF0 + i, &cam->flags);
327 }
328 }
329
330 static inline int mcam_needs_config(struct mcam_camera *cam)
331 {
332 return test_bit(CF_CONFIG_NEEDED, &cam->flags);
333 }
334
335 static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
336 {
337 if (needed)
338 set_bit(CF_CONFIG_NEEDED, &cam->flags);
339 else
340 clear_bit(CF_CONFIG_NEEDED, &cam->flags);
341 }
342
343 /* ------------------------------------------------------------------- */
344 /*
345 * Make the controller start grabbing images. Everything must
346 * be set up before doing this.
347 */
348 static void mcam_ctlr_start(struct mcam_camera *cam)
349 {
350 /* set_bit performs a read, so no other barrier should be
351 needed here */
352 mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
353 }
354
355 static void mcam_ctlr_stop(struct mcam_camera *cam)
356 {
357 mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
358 }
359
360 static void mcam_enable_mipi(struct mcam_camera *mcam)
361 {
362 /* Using MIPI mode and enable MIPI */
363 cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
364 mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
365 mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
366 mcam_reg_write(mcam, REG_CSI2_DPHY5, mcam->dphy[1]);
367 mcam_reg_write(mcam, REG_CSI2_DPHY6, mcam->dphy[2]);
368
369 if (!mcam->mipi_enabled) {
370 if (mcam->lane > 4 || mcam->lane <= 0) {
371 cam_warn(mcam, "lane number error\n");
372 mcam->lane = 1; /* set the default value */
373 }
374 /*
375 * 0x41 actives 1 lane
376 * 0x43 actives 2 lanes
377 * 0x45 actives 3 lanes (never happen)
378 * 0x47 actives 4 lanes
379 */
380 mcam_reg_write(mcam, REG_CSI2_CTRL0,
381 CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
382 mcam_reg_write(mcam, REG_CLKCTRL,
383 (mcam->mclk_src << 29) | mcam->mclk_div);
384
385 mcam->mipi_enabled = true;
386 }
387 }
388
389 static void mcam_disable_mipi(struct mcam_camera *mcam)
390 {
391 /* Using Parallel mode or disable MIPI */
392 mcam_reg_write(mcam, REG_CSI2_CTRL0, 0x0);
393 mcam_reg_write(mcam, REG_CSI2_DPHY3, 0x0);
394 mcam_reg_write(mcam, REG_CSI2_DPHY5, 0x0);
395 mcam_reg_write(mcam, REG_CSI2_DPHY6, 0x0);
396 mcam->mipi_enabled = false;
397 }
398
399 /* ------------------------------------------------------------------- */
400
401 #ifdef MCAM_MODE_VMALLOC
402 /*
403 * Code specific to the vmalloc buffer mode.
404 */
405
406 /*
407 * Allocate in-kernel DMA buffers for vmalloc mode.
408 */
409 static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
410 {
411 int i;
412
413 mcam_set_config_needed(cam, 1);
414 if (loadtime)
415 cam->dma_buf_size = dma_buf_size;
416 else
417 cam->dma_buf_size = cam->pix_format.sizeimage;
418 if (n_dma_bufs > 3)
419 n_dma_bufs = 3;
420
421 cam->nbufs = 0;
422 for (i = 0; i < n_dma_bufs; i++) {
423 cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
424 cam->dma_buf_size, cam->dma_handles + i,
425 GFP_KERNEL);
426 if (cam->dma_bufs[i] == NULL) {
427 cam_warn(cam, "Failed to allocate DMA buffer\n");
428 break;
429 }
430 (cam->nbufs)++;
431 }
432
433 switch (cam->nbufs) {
434 case 1:
435 dma_free_coherent(cam->dev, cam->dma_buf_size,
436 cam->dma_bufs[0], cam->dma_handles[0]);
437 cam->nbufs = 0;
438 case 0:
439 cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
440 return -ENOMEM;
441
442 case 2:
443 if (n_dma_bufs > 2)
444 cam_warn(cam, "Will limp along with only 2 buffers\n");
445 break;
446 }
447 return 0;
448 }
449
450 static void mcam_free_dma_bufs(struct mcam_camera *cam)
451 {
452 int i;
453
454 for (i = 0; i < cam->nbufs; i++) {
455 dma_free_coherent(cam->dev, cam->dma_buf_size,
456 cam->dma_bufs[i], cam->dma_handles[i]);
457 cam->dma_bufs[i] = NULL;
458 }
459 cam->nbufs = 0;
460 }
461
462
463 /*
464 * Set up DMA buffers when operating in vmalloc mode
465 */
466 static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
467 {
468 /*
469 * Store the first two Y buffers (we aren't supporting
470 * planar formats for now, so no UV bufs). Then either
471 * set the third if it exists, or tell the controller
472 * to just use two.
473 */
474 mcam_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
475 mcam_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
476 if (cam->nbufs > 2) {
477 mcam_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
478 mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
479 } else
480 mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
481 if (cam->chip_id == MCAM_CAFE)
482 mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
483 }
484
485 /*
486 * Copy data out to user space in the vmalloc case
487 */
488 static void mcam_frame_tasklet(unsigned long data)
489 {
490 struct mcam_camera *cam = (struct mcam_camera *) data;
491 int i;
492 unsigned long flags;
493 struct mcam_vb_buffer *buf;
494
495 spin_lock_irqsave(&cam->dev_lock, flags);
496 for (i = 0; i < cam->nbufs; i++) {
497 int bufno = cam->next_buf;
498
499 if (cam->state != S_STREAMING || bufno < 0)
500 break; /* I/O got stopped */
501 if (++(cam->next_buf) >= cam->nbufs)
502 cam->next_buf = 0;
503 if (!test_bit(bufno, &cam->flags))
504 continue;
505 if (list_empty(&cam->buffers)) {
506 cam->frame_state.singles++;
507 break; /* Leave it valid, hope for better later */
508 }
509 cam->frame_state.delivered++;
510 clear_bit(bufno, &cam->flags);
511 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
512 queue);
513 list_del_init(&buf->queue);
514 /*
515 * Drop the lock during the big copy. This *should* be safe...
516 */
517 spin_unlock_irqrestore(&cam->dev_lock, flags);
518 memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
519 cam->pix_format.sizeimage);
520 mcam_buffer_done(cam, bufno, &buf->vb_buf);
521 spin_lock_irqsave(&cam->dev_lock, flags);
522 }
523 spin_unlock_irqrestore(&cam->dev_lock, flags);
524 }
525
526
527 /*
528 * Make sure our allocated buffers are up to the task.
529 */
530 static int mcam_check_dma_buffers(struct mcam_camera *cam)
531 {
532 if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
533 mcam_free_dma_bufs(cam);
534 if (cam->nbufs == 0)
535 return mcam_alloc_dma_bufs(cam, 0);
536 return 0;
537 }
538
539 static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
540 {
541 tasklet_schedule(&cam->s_tasklet);
542 }
543
544 #else /* MCAM_MODE_VMALLOC */
545
546 static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
547 {
548 return 0;
549 }
550
551 static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
552 {
553 return;
554 }
555
556 static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
557 {
558 return 0;
559 }
560
561
562
563 #endif /* MCAM_MODE_VMALLOC */
564
565
566 #ifdef MCAM_MODE_DMA_CONTIG
567 /* ---------------------------------------------------------------------- */
568 /*
569 * DMA-contiguous code.
570 */
571
572 static bool mcam_fmt_is_planar(__u32 pfmt)
573 {
574 struct mcam_format_struct *f;
575
576 f = mcam_find_format(pfmt);
577 return f->planar;
578 }
579
580 /*
581 * Set up a contiguous buffer for the given frame. Here also is where
582 * the underrun strategy is set: if there is no buffer available, reuse
583 * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
584 * keep the interrupt handler from giving that buffer back to user
585 * space. In this way, we always have a buffer to DMA to and don't
586 * have to try to play games stopping and restarting the controller.
587 */
588 static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
589 {
590 struct mcam_vb_buffer *buf;
591 struct v4l2_pix_format *fmt = &cam->pix_format;
592 dma_addr_t dma_handle;
593 u32 pixel_count = fmt->width * fmt->height;
594 struct vb2_buffer *vb;
595
596 /*
597 * If there are no available buffers, go into single mode
598 */
599 if (list_empty(&cam->buffers)) {
600 buf = cam->vb_bufs[frame ^ 0x1];
601 set_bit(CF_SINGLE_BUFFER, &cam->flags);
602 cam->frame_state.singles++;
603 } else {
604 /*
605 * OK, we have a buffer we can use.
606 */
607 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
608 queue);
609 list_del_init(&buf->queue);
610 clear_bit(CF_SINGLE_BUFFER, &cam->flags);
611 }
612
613 cam->vb_bufs[frame] = buf;
614 vb = &buf->vb_buf;
615
616 dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0);
617 buf->yuv_p.y = dma_handle;
618
619 switch (cam->pix_format.pixelformat) {
620 case V4L2_PIX_FMT_YUV422P:
621 buf->yuv_p.u = buf->yuv_p.y + pixel_count;
622 buf->yuv_p.v = buf->yuv_p.u + pixel_count / 2;
623 break;
624 case V4L2_PIX_FMT_YUV420:
625 buf->yuv_p.u = buf->yuv_p.y + pixel_count;
626 buf->yuv_p.v = buf->yuv_p.u + pixel_count / 4;
627 break;
628 case V4L2_PIX_FMT_YVU420:
629 buf->yuv_p.v = buf->yuv_p.y + pixel_count;
630 buf->yuv_p.u = buf->yuv_p.v + pixel_count / 4;
631 break;
632 default:
633 break;
634 }
635
636 mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR, buf->yuv_p.y);
637 if (mcam_fmt_is_planar(fmt->pixelformat)) {
638 mcam_reg_write(cam, frame == 0 ?
639 REG_U0BAR : REG_U1BAR, buf->yuv_p.u);
640 mcam_reg_write(cam, frame == 0 ?
641 REG_V0BAR : REG_V1BAR, buf->yuv_p.v);
642 }
643 }
644
645 /*
646 * Initial B_DMA_contig setup.
647 */
648 static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
649 {
650 mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
651 cam->nbufs = 2;
652 mcam_set_contig_buffer(cam, 0);
653 mcam_set_contig_buffer(cam, 1);
654 }
655
656 /*
657 * Frame completion handling.
658 */
659 static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
660 {
661 struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
662
663 if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
664 cam->frame_state.delivered++;
665 mcam_buffer_done(cam, frame, &buf->vb_buf);
666 }
667 mcam_set_contig_buffer(cam, frame);
668 }
669
670 #endif /* MCAM_MODE_DMA_CONTIG */
671
672 #ifdef MCAM_MODE_DMA_SG
673 /* ---------------------------------------------------------------------- */
674 /*
675 * Scatter/gather-specific code.
676 */
677
678 /*
679 * Set up the next buffer for S/G I/O; caller should be sure that
680 * the controller is stopped and a buffer is available.
681 */
682 static void mcam_sg_next_buffer(struct mcam_camera *cam)
683 {
684 struct mcam_vb_buffer *buf;
685
686 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
687 list_del_init(&buf->queue);
688 /*
689 * Very Bad Not Good Things happen if you don't clear
690 * C1_DESC_ENA before making any descriptor changes.
691 */
692 mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
693 mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
694 mcam_reg_write(cam, REG_DESC_LEN_Y,
695 buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
696 mcam_reg_write(cam, REG_DESC_LEN_U, 0);
697 mcam_reg_write(cam, REG_DESC_LEN_V, 0);
698 mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
699 cam->vb_bufs[0] = buf;
700 }
701
702 /*
703 * Initial B_DMA_sg setup
704 */
705 static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
706 {
707 /*
708 * The list-empty condition can hit us at resume time
709 * if the buffer list was empty when the system was suspended.
710 */
711 if (list_empty(&cam->buffers)) {
712 set_bit(CF_SG_RESTART, &cam->flags);
713 return;
714 }
715
716 mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
717 mcam_sg_next_buffer(cam);
718 cam->nbufs = 3;
719 }
720
721
722 /*
723 * Frame completion with S/G is trickier. We can't muck with
724 * a descriptor chain on the fly, since the controller buffers it
725 * internally. So we have to actually stop and restart; Marvell
726 * says this is the way to do it.
727 *
728 * Of course, stopping is easier said than done; experience shows
729 * that the controller can start a frame *after* C0_ENABLE has been
730 * cleared. So when running in S/G mode, the controller is "stopped"
731 * on receipt of the start-of-frame interrupt. That means we can
732 * safely change the DMA descriptor array here and restart things
733 * (assuming there's another buffer waiting to go).
734 */
735 static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
736 {
737 struct mcam_vb_buffer *buf = cam->vb_bufs[0];
738
739 /*
740 * If we're no longer supposed to be streaming, don't do anything.
741 */
742 if (cam->state != S_STREAMING)
743 return;
744 /*
745 * If we have another buffer available, put it in and
746 * restart the engine.
747 */
748 if (!list_empty(&cam->buffers)) {
749 mcam_sg_next_buffer(cam);
750 mcam_ctlr_start(cam);
751 /*
752 * Otherwise set CF_SG_RESTART and the controller will
753 * be restarted once another buffer shows up.
754 */
755 } else {
756 set_bit(CF_SG_RESTART, &cam->flags);
757 cam->frame_state.singles++;
758 cam->vb_bufs[0] = NULL;
759 }
760 /*
761 * Now we can give the completed frame back to user space.
762 */
763 cam->frame_state.delivered++;
764 mcam_buffer_done(cam, frame, &buf->vb_buf);
765 }
766
767
768 /*
769 * Scatter/gather mode requires stopping the controller between
770 * frames so we can put in a new DMA descriptor array. If no new
771 * buffer exists at frame completion, the controller is left stopped;
772 * this function is charged with gettig things going again.
773 */
774 static void mcam_sg_restart(struct mcam_camera *cam)
775 {
776 mcam_ctlr_dma_sg(cam);
777 mcam_ctlr_start(cam);
778 clear_bit(CF_SG_RESTART, &cam->flags);
779 }
780
781 #else /* MCAM_MODE_DMA_SG */
782
783 static inline void mcam_sg_restart(struct mcam_camera *cam)
784 {
785 return;
786 }
787
788 #endif /* MCAM_MODE_DMA_SG */
789
790 /* ---------------------------------------------------------------------- */
791 /*
792 * Buffer-mode-independent controller code.
793 */
794
795 /*
796 * Image format setup
797 */
798 static void mcam_ctlr_image(struct mcam_camera *cam)
799 {
800 struct v4l2_pix_format *fmt = &cam->pix_format;
801 u32 widthy = 0, widthuv = 0, imgsz_h, imgsz_w;
802
803 cam_dbg(cam, "camera: bytesperline = %d; height = %d\n",
804 fmt->bytesperline, fmt->sizeimage / fmt->bytesperline);
805 imgsz_h = (fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK;
806 imgsz_w = (fmt->width * 2) & IMGSZ_H_MASK;
807
808 switch (fmt->pixelformat) {
809 case V4L2_PIX_FMT_YUYV:
810 case V4L2_PIX_FMT_UYVY:
811 widthy = fmt->width * 2;
812 widthuv = 0;
813 break;
814 case V4L2_PIX_FMT_JPEG:
815 imgsz_h = (fmt->sizeimage / fmt->bytesperline) << IMGSZ_V_SHIFT;
816 widthy = fmt->bytesperline;
817 widthuv = 0;
818 break;
819 case V4L2_PIX_FMT_YUV422P:
820 case V4L2_PIX_FMT_YUV420:
821 case V4L2_PIX_FMT_YVU420:
822 widthy = fmt->width;
823 widthuv = fmt->width / 2;
824 break;
825 default:
826 widthy = fmt->bytesperline;
827 widthuv = 0;
828 }
829
830 mcam_reg_write_mask(cam, REG_IMGPITCH, widthuv << 16 | widthy,
831 IMGP_YP_MASK | IMGP_UVP_MASK);
832 mcam_reg_write(cam, REG_IMGSIZE, imgsz_h | imgsz_w);
833 mcam_reg_write(cam, REG_IMGOFFSET, 0x0);
834
835 /*
836 * Tell the controller about the image format we are using.
837 */
838 switch (fmt->pixelformat) {
839 case V4L2_PIX_FMT_YUV422P:
840 mcam_reg_write_mask(cam, REG_CTRL0,
841 C0_DF_YUV | C0_YUV_PLANAR | C0_YUVE_YVYU, C0_DF_MASK);
842 break;
843 case V4L2_PIX_FMT_YUV420:
844 case V4L2_PIX_FMT_YVU420:
845 mcam_reg_write_mask(cam, REG_CTRL0,
846 C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
847 break;
848 case V4L2_PIX_FMT_YUYV:
849 mcam_reg_write_mask(cam, REG_CTRL0,
850 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
851 break;
852 case V4L2_PIX_FMT_UYVY:
853 mcam_reg_write_mask(cam, REG_CTRL0,
854 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
855 break;
856 case V4L2_PIX_FMT_JPEG:
857 mcam_reg_write_mask(cam, REG_CTRL0,
858 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
859 break;
860 case V4L2_PIX_FMT_RGB444:
861 mcam_reg_write_mask(cam, REG_CTRL0,
862 C0_DF_RGB | C0_RGBF_444 | C0_RGB4_XRGB, C0_DF_MASK);
863 /* Alpha value? */
864 break;
865 case V4L2_PIX_FMT_RGB565:
866 mcam_reg_write_mask(cam, REG_CTRL0,
867 C0_DF_RGB | C0_RGBF_565 | C0_RGB5_BGGR, C0_DF_MASK);
868 break;
869 default:
870 cam_err(cam, "camera: unknown format: %#x\n", fmt->pixelformat);
871 break;
872 }
873
874 /*
875 * Make sure it knows we want to use hsync/vsync.
876 */
877 mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
878 /*
879 * This field controls the generation of EOF(DVP only)
880 */
881 if (cam->bus_type != V4L2_MBUS_CSI2)
882 mcam_reg_set_bit(cam, REG_CTRL0,
883 C0_EOF_VSYNC | C0_VEDGE_CTRL);
884 }
885
886
887 /*
888 * Configure the controller for operation; caller holds the
889 * device mutex.
890 */
891 static int mcam_ctlr_configure(struct mcam_camera *cam)
892 {
893 unsigned long flags;
894
895 spin_lock_irqsave(&cam->dev_lock, flags);
896 clear_bit(CF_SG_RESTART, &cam->flags);
897 cam->dma_setup(cam);
898 mcam_ctlr_image(cam);
899 mcam_set_config_needed(cam, 0);
900 spin_unlock_irqrestore(&cam->dev_lock, flags);
901 return 0;
902 }
903
904 static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
905 {
906 /*
907 * Clear any pending interrupts, since we do not
908 * expect to have I/O active prior to enabling.
909 */
910 mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
911 mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
912 }
913
914 static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
915 {
916 mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
917 }
918
919
920
921 static void mcam_ctlr_init(struct mcam_camera *cam)
922 {
923 unsigned long flags;
924
925 spin_lock_irqsave(&cam->dev_lock, flags);
926 /*
927 * Make sure it's not powered down.
928 */
929 mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
930 /*
931 * Turn off the enable bit. It sure should be off anyway,
932 * but it's good to be sure.
933 */
934 mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
935 /*
936 * Clock the sensor appropriately. Controller clock should
937 * be 48MHz, sensor "typical" value is half that.
938 */
939 mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
940 spin_unlock_irqrestore(&cam->dev_lock, flags);
941 }
942
943
944 /*
945 * Stop the controller, and don't return until we're really sure that no
946 * further DMA is going on.
947 */
948 static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
949 {
950 unsigned long flags;
951
952 /*
953 * Theory: stop the camera controller (whether it is operating
954 * or not). Delay briefly just in case we race with the SOF
955 * interrupt, then wait until no DMA is active.
956 */
957 spin_lock_irqsave(&cam->dev_lock, flags);
958 clear_bit(CF_SG_RESTART, &cam->flags);
959 mcam_ctlr_stop(cam);
960 cam->state = S_IDLE;
961 spin_unlock_irqrestore(&cam->dev_lock, flags);
962 /*
963 * This is a brutally long sleep, but experience shows that
964 * it can take the controller a while to get the message that
965 * it needs to stop grabbing frames. In particular, we can
966 * sometimes (on mmp) get a frame at the end WITHOUT the
967 * start-of-frame indication.
968 */
969 msleep(150);
970 if (test_bit(CF_DMA_ACTIVE, &cam->flags))
971 cam_err(cam, "Timeout waiting for DMA to end\n");
972 /* This would be bad news - what now? */
973 spin_lock_irqsave(&cam->dev_lock, flags);
974 mcam_ctlr_irq_disable(cam);
975 spin_unlock_irqrestore(&cam->dev_lock, flags);
976 }
977
978 /*
979 * Power up and down.
980 */
981 static int mcam_ctlr_power_up(struct mcam_camera *cam)
982 {
983 unsigned long flags;
984 int ret;
985
986 spin_lock_irqsave(&cam->dev_lock, flags);
987 ret = cam->plat_power_up(cam);
988 if (ret) {
989 spin_unlock_irqrestore(&cam->dev_lock, flags);
990 return ret;
991 }
992 mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
993 spin_unlock_irqrestore(&cam->dev_lock, flags);
994 msleep(5); /* Just to be sure */
995 return 0;
996 }
997
998 static void mcam_ctlr_power_down(struct mcam_camera *cam)
999 {
1000 unsigned long flags;
1001
1002 spin_lock_irqsave(&cam->dev_lock, flags);
1003 /*
1004 * School of hard knocks department: be sure we do any register
1005 * twiddling on the controller *before* calling the platform
1006 * power down routine.
1007 */
1008 mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
1009 cam->plat_power_down(cam);
1010 spin_unlock_irqrestore(&cam->dev_lock, flags);
1011 }
1012
1013 /* -------------------------------------------------------------------- */
1014 /*
1015 * Communications with the sensor.
1016 */
1017
1018 static int __mcam_cam_reset(struct mcam_camera *cam)
1019 {
1020 return sensor_call(cam, core, reset, 0);
1021 }
1022
1023 /*
1024 * We have found the sensor on the i2c. Let's try to have a
1025 * conversation.
1026 */
1027 static int mcam_cam_init(struct mcam_camera *cam)
1028 {
1029 int ret;
1030
1031 mutex_lock(&cam->s_mutex);
1032 if (cam->state != S_NOTREADY)
1033 cam_warn(cam, "Cam init with device in funky state %d",
1034 cam->state);
1035 ret = __mcam_cam_reset(cam);
1036 /* Get/set parameters? */
1037 cam->state = S_IDLE;
1038 mcam_ctlr_power_down(cam);
1039 mutex_unlock(&cam->s_mutex);
1040 return ret;
1041 }
1042
1043 /*
1044 * Configure the sensor to match the parameters we have. Caller should
1045 * hold s_mutex
1046 */
1047 static int mcam_cam_set_flip(struct mcam_camera *cam)
1048 {
1049 struct v4l2_control ctrl;
1050
1051 memset(&ctrl, 0, sizeof(ctrl));
1052 ctrl.id = V4L2_CID_VFLIP;
1053 ctrl.value = flip;
1054 return sensor_call(cam, core, s_ctrl, &ctrl);
1055 }
1056
1057
1058 static int mcam_cam_configure(struct mcam_camera *cam)
1059 {
1060 struct v4l2_mbus_framefmt mbus_fmt;
1061 int ret;
1062
1063 v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
1064 ret = sensor_call(cam, core, init, 0);
1065 if (ret == 0)
1066 ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
1067 /*
1068 * OV7670 does weird things if flip is set *before* format...
1069 */
1070 ret += mcam_cam_set_flip(cam);
1071 return ret;
1072 }
1073
1074 /*
1075 * Get everything ready, and start grabbing frames.
1076 */
1077 static int mcam_read_setup(struct mcam_camera *cam)
1078 {
1079 int ret;
1080 unsigned long flags;
1081
1082 /*
1083 * Configuration. If we still don't have DMA buffers,
1084 * make one last, desperate attempt.
1085 */
1086 if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
1087 mcam_alloc_dma_bufs(cam, 0))
1088 return -ENOMEM;
1089
1090 if (mcam_needs_config(cam)) {
1091 mcam_cam_configure(cam);
1092 ret = mcam_ctlr_configure(cam);
1093 if (ret)
1094 return ret;
1095 }
1096
1097 /*
1098 * Turn it loose.
1099 */
1100 spin_lock_irqsave(&cam->dev_lock, flags);
1101 clear_bit(CF_DMA_ACTIVE, &cam->flags);
1102 mcam_reset_buffers(cam);
1103 /*
1104 * Update CSI2_DPHY value
1105 */
1106 if (cam->calc_dphy)
1107 cam->calc_dphy(cam);
1108 cam_dbg(cam, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
1109 cam->dphy[0], cam->dphy[1], cam->dphy[2]);
1110 if (cam->bus_type == V4L2_MBUS_CSI2)
1111 mcam_enable_mipi(cam);
1112 else
1113 mcam_disable_mipi(cam);
1114 mcam_ctlr_irq_enable(cam);
1115 cam->state = S_STREAMING;
1116 if (!test_bit(CF_SG_RESTART, &cam->flags))
1117 mcam_ctlr_start(cam);
1118 spin_unlock_irqrestore(&cam->dev_lock, flags);
1119 return 0;
1120 }
1121
1122 /* ----------------------------------------------------------------------- */
1123 /*
1124 * Videobuf2 interface code.
1125 */
1126
1127 static int mcam_vb_queue_setup(struct vb2_queue *vq,
1128 const struct v4l2_format *fmt, unsigned int *nbufs,
1129 unsigned int *num_planes, unsigned int sizes[],
1130 void *alloc_ctxs[])
1131 {
1132 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1133 int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
1134
1135 sizes[0] = cam->pix_format.sizeimage;
1136 *num_planes = 1; /* Someday we have to support planar formats... */
1137 if (*nbufs < minbufs)
1138 *nbufs = minbufs;
1139 if (cam->buffer_mode == B_DMA_contig)
1140 alloc_ctxs[0] = cam->vb_alloc_ctx;
1141 return 0;
1142 }
1143
1144
1145 static void mcam_vb_buf_queue(struct vb2_buffer *vb)
1146 {
1147 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1148 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1149 unsigned long flags;
1150 int start;
1151
1152 spin_lock_irqsave(&cam->dev_lock, flags);
1153 start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
1154 list_add(&mvb->queue, &cam->buffers);
1155 if (cam->state == S_STREAMING && test_bit(CF_SG_RESTART, &cam->flags))
1156 mcam_sg_restart(cam);
1157 spin_unlock_irqrestore(&cam->dev_lock, flags);
1158 if (start)
1159 mcam_read_setup(cam);
1160 }
1161
1162
1163 /*
1164 * vb2 uses these to release the mutex when waiting in dqbuf. I'm
1165 * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
1166 * to be called with the mutex held), but better safe than sorry.
1167 */
1168 static void mcam_vb_wait_prepare(struct vb2_queue *vq)
1169 {
1170 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1171
1172 mutex_unlock(&cam->s_mutex);
1173 }
1174
1175 static void mcam_vb_wait_finish(struct vb2_queue *vq)
1176 {
1177 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1178
1179 mutex_lock(&cam->s_mutex);
1180 }
1181
1182 /*
1183 * These need to be called with the mutex held from vb2
1184 */
1185 static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
1186 {
1187 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1188 unsigned int frame;
1189
1190 if (cam->state != S_IDLE) {
1191 INIT_LIST_HEAD(&cam->buffers);
1192 return -EINVAL;
1193 }
1194 cam->sequence = 0;
1195 /*
1196 * Videobuf2 sneakily hoards all the buffers and won't
1197 * give them to us until *after* streaming starts. But
1198 * we can't actually start streaming until we have a
1199 * destination. So go into a wait state and hope they
1200 * give us buffers soon.
1201 */
1202 if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
1203 cam->state = S_BUFWAIT;
1204 return 0;
1205 }
1206
1207 /*
1208 * Ensure clear the left over frame flags
1209 * before every really start streaming
1210 */
1211 for (frame = 0; frame < cam->nbufs; frame++)
1212 clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
1213
1214 return mcam_read_setup(cam);
1215 }
1216
1217 static void mcam_vb_stop_streaming(struct vb2_queue *vq)
1218 {
1219 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1220 unsigned long flags;
1221
1222 if (cam->state == S_BUFWAIT) {
1223 /* They never gave us buffers */
1224 cam->state = S_IDLE;
1225 return;
1226 }
1227 if (cam->state != S_STREAMING)
1228 return;
1229 mcam_ctlr_stop_dma(cam);
1230 /*
1231 * Reset the CCIC PHY after stopping streaming,
1232 * otherwise, the CCIC may be unstable.
1233 */
1234 if (cam->ctlr_reset)
1235 cam->ctlr_reset(cam);
1236 /*
1237 * VB2 reclaims the buffers, so we need to forget
1238 * about them.
1239 */
1240 spin_lock_irqsave(&cam->dev_lock, flags);
1241 INIT_LIST_HEAD(&cam->buffers);
1242 spin_unlock_irqrestore(&cam->dev_lock, flags);
1243 }
1244
1245
1246 static const struct vb2_ops mcam_vb2_ops = {
1247 .queue_setup = mcam_vb_queue_setup,
1248 .buf_queue = mcam_vb_buf_queue,
1249 .start_streaming = mcam_vb_start_streaming,
1250 .stop_streaming = mcam_vb_stop_streaming,
1251 .wait_prepare = mcam_vb_wait_prepare,
1252 .wait_finish = mcam_vb_wait_finish,
1253 };
1254
1255
1256 #ifdef MCAM_MODE_DMA_SG
1257 /*
1258 * Scatter/gather mode uses all of the above functions plus a
1259 * few extras to deal with DMA mapping.
1260 */
1261 static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
1262 {
1263 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1264 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1265 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
1266
1267 mvb->dma_desc = dma_alloc_coherent(cam->dev,
1268 ndesc * sizeof(struct mcam_dma_desc),
1269 &mvb->dma_desc_pa, GFP_KERNEL);
1270 if (mvb->dma_desc == NULL) {
1271 cam_err(cam, "Unable to get DMA descriptor array\n");
1272 return -ENOMEM;
1273 }
1274 return 0;
1275 }
1276
1277 static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1278 {
1279 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1280 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1281 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1282 struct mcam_dma_desc *desc = mvb->dma_desc;
1283 struct scatterlist *sg;
1284 int i;
1285
1286 mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
1287 sg_table->nents, DMA_FROM_DEVICE);
1288 if (mvb->dma_desc_nent <= 0)
1289 return -EIO; /* Not sure what's right here */
1290 for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
1291 desc->dma_addr = sg_dma_address(sg);
1292 desc->segment_len = sg_dma_len(sg);
1293 desc++;
1294 }
1295 return 0;
1296 }
1297
1298 static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
1299 {
1300 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1301 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1302
1303 if (sg_table)
1304 dma_unmap_sg(cam->dev, sg_table->sgl,
1305 sg_table->nents, DMA_FROM_DEVICE);
1306 }
1307
1308 static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
1309 {
1310 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1311 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1312 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
1313
1314 dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
1315 mvb->dma_desc, mvb->dma_desc_pa);
1316 }
1317
1318
1319 static const struct vb2_ops mcam_vb2_sg_ops = {
1320 .queue_setup = mcam_vb_queue_setup,
1321 .buf_init = mcam_vb_sg_buf_init,
1322 .buf_prepare = mcam_vb_sg_buf_prepare,
1323 .buf_queue = mcam_vb_buf_queue,
1324 .buf_finish = mcam_vb_sg_buf_finish,
1325 .buf_cleanup = mcam_vb_sg_buf_cleanup,
1326 .start_streaming = mcam_vb_start_streaming,
1327 .stop_streaming = mcam_vb_stop_streaming,
1328 .wait_prepare = mcam_vb_wait_prepare,
1329 .wait_finish = mcam_vb_wait_finish,
1330 };
1331
1332 #endif /* MCAM_MODE_DMA_SG */
1333
1334 static int mcam_setup_vb2(struct mcam_camera *cam)
1335 {
1336 struct vb2_queue *vq = &cam->vb_queue;
1337
1338 memset(vq, 0, sizeof(*vq));
1339 vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1340 vq->drv_priv = cam;
1341 INIT_LIST_HEAD(&cam->buffers);
1342 switch (cam->buffer_mode) {
1343 case B_DMA_contig:
1344 #ifdef MCAM_MODE_DMA_CONTIG
1345 vq->ops = &mcam_vb2_ops;
1346 vq->mem_ops = &vb2_dma_contig_memops;
1347 vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
1348 cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
1349 vq->io_modes = VB2_MMAP | VB2_USERPTR;
1350 cam->dma_setup = mcam_ctlr_dma_contig;
1351 cam->frame_complete = mcam_dma_contig_done;
1352 #endif
1353 break;
1354 case B_DMA_sg:
1355 #ifdef MCAM_MODE_DMA_SG
1356 vq->ops = &mcam_vb2_sg_ops;
1357 vq->mem_ops = &vb2_dma_sg_memops;
1358 vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
1359 vq->io_modes = VB2_MMAP | VB2_USERPTR;
1360 cam->dma_setup = mcam_ctlr_dma_sg;
1361 cam->frame_complete = mcam_dma_sg_done;
1362 #endif
1363 break;
1364 case B_vmalloc:
1365 #ifdef MCAM_MODE_VMALLOC
1366 tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
1367 (unsigned long) cam);
1368 vq->ops = &mcam_vb2_ops;
1369 vq->mem_ops = &vb2_vmalloc_memops;
1370 vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
1371 vq->io_modes = VB2_MMAP;
1372 cam->dma_setup = mcam_ctlr_dma_vmalloc;
1373 cam->frame_complete = mcam_vmalloc_done;
1374 #endif
1375 break;
1376 }
1377 return vb2_queue_init(vq);
1378 }
1379
1380 static void mcam_cleanup_vb2(struct mcam_camera *cam)
1381 {
1382 vb2_queue_release(&cam->vb_queue);
1383 #ifdef MCAM_MODE_DMA_CONTIG
1384 if (cam->buffer_mode == B_DMA_contig)
1385 vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
1386 #endif
1387 }
1388
1389
1390 /* ---------------------------------------------------------------------- */
1391 /*
1392 * The long list of V4L2 ioctl() operations.
1393 */
1394
1395 static int mcam_vidioc_streamon(struct file *filp, void *priv,
1396 enum v4l2_buf_type type)
1397 {
1398 struct mcam_camera *cam = filp->private_data;
1399 int ret;
1400
1401 mutex_lock(&cam->s_mutex);
1402 ret = vb2_streamon(&cam->vb_queue, type);
1403 mutex_unlock(&cam->s_mutex);
1404 return ret;
1405 }
1406
1407
1408 static int mcam_vidioc_streamoff(struct file *filp, void *priv,
1409 enum v4l2_buf_type type)
1410 {
1411 struct mcam_camera *cam = filp->private_data;
1412 int ret;
1413
1414 mutex_lock(&cam->s_mutex);
1415 ret = vb2_streamoff(&cam->vb_queue, type);
1416 mutex_unlock(&cam->s_mutex);
1417 return ret;
1418 }
1419
1420
1421 static int mcam_vidioc_reqbufs(struct file *filp, void *priv,
1422 struct v4l2_requestbuffers *req)
1423 {
1424 struct mcam_camera *cam = filp->private_data;
1425 int ret;
1426
1427 mutex_lock(&cam->s_mutex);
1428 ret = vb2_reqbufs(&cam->vb_queue, req);
1429 mutex_unlock(&cam->s_mutex);
1430 return ret;
1431 }
1432
1433
1434 static int mcam_vidioc_querybuf(struct file *filp, void *priv,
1435 struct v4l2_buffer *buf)
1436 {
1437 struct mcam_camera *cam = filp->private_data;
1438 int ret;
1439
1440 mutex_lock(&cam->s_mutex);
1441 ret = vb2_querybuf(&cam->vb_queue, buf);
1442 mutex_unlock(&cam->s_mutex);
1443 return ret;
1444 }
1445
1446 static int mcam_vidioc_qbuf(struct file *filp, void *priv,
1447 struct v4l2_buffer *buf)
1448 {
1449 struct mcam_camera *cam = filp->private_data;
1450 int ret;
1451
1452 mutex_lock(&cam->s_mutex);
1453 ret = vb2_qbuf(&cam->vb_queue, buf);
1454 mutex_unlock(&cam->s_mutex);
1455 return ret;
1456 }
1457
1458 static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
1459 struct v4l2_buffer *buf)
1460 {
1461 struct mcam_camera *cam = filp->private_data;
1462 int ret;
1463
1464 mutex_lock(&cam->s_mutex);
1465 ret = vb2_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
1466 mutex_unlock(&cam->s_mutex);
1467 return ret;
1468 }
1469
1470 static int mcam_vidioc_querycap(struct file *file, void *priv,
1471 struct v4l2_capability *cap)
1472 {
1473 strcpy(cap->driver, "marvell_ccic");
1474 strcpy(cap->card, "marvell_ccic");
1475 cap->version = 1;
1476 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1477 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
1478 return 0;
1479 }
1480
1481
1482 static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
1483 void *priv, struct v4l2_fmtdesc *fmt)
1484 {
1485 if (fmt->index >= N_MCAM_FMTS)
1486 return -EINVAL;
1487 strlcpy(fmt->description, mcam_formats[fmt->index].desc,
1488 sizeof(fmt->description));
1489 fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
1490 return 0;
1491 }
1492
1493 static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
1494 struct v4l2_format *fmt)
1495 {
1496 struct mcam_camera *cam = priv;
1497 struct mcam_format_struct *f;
1498 struct v4l2_pix_format *pix = &fmt->fmt.pix;
1499 struct v4l2_mbus_framefmt mbus_fmt;
1500 int ret;
1501
1502 f = mcam_find_format(pix->pixelformat);
1503 pix->pixelformat = f->pixelformat;
1504 v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
1505 mutex_lock(&cam->s_mutex);
1506 ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
1507 mutex_unlock(&cam->s_mutex);
1508 v4l2_fill_pix_format(pix, &mbus_fmt);
1509 switch (f->pixelformat) {
1510 case V4L2_PIX_FMT_YUV420:
1511 case V4L2_PIX_FMT_YVU420:
1512 pix->bytesperline = pix->width * 3 / 2;
1513 break;
1514 default:
1515 pix->bytesperline = pix->width * f->bpp;
1516 break;
1517 }
1518 pix->sizeimage = pix->height * pix->bytesperline;
1519 return ret;
1520 }
1521
1522 static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
1523 struct v4l2_format *fmt)
1524 {
1525 struct mcam_camera *cam = priv;
1526 struct mcam_format_struct *f;
1527 int ret;
1528
1529 /*
1530 * Can't do anything if the device is not idle
1531 * Also can't if there are streaming buffers in place.
1532 */
1533 if (cam->state != S_IDLE || cam->vb_queue.num_buffers > 0)
1534 return -EBUSY;
1535
1536 f = mcam_find_format(fmt->fmt.pix.pixelformat);
1537
1538 /*
1539 * See if the formatting works in principle.
1540 */
1541 ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
1542 if (ret)
1543 return ret;
1544 /*
1545 * Now we start to change things for real, so let's do it
1546 * under lock.
1547 */
1548 mutex_lock(&cam->s_mutex);
1549 cam->pix_format = fmt->fmt.pix;
1550 cam->mbus_code = f->mbus_code;
1551
1552 /*
1553 * Make sure we have appropriate DMA buffers.
1554 */
1555 if (cam->buffer_mode == B_vmalloc) {
1556 ret = mcam_check_dma_buffers(cam);
1557 if (ret)
1558 goto out;
1559 }
1560 mcam_set_config_needed(cam, 1);
1561 out:
1562 mutex_unlock(&cam->s_mutex);
1563 return ret;
1564 }
1565
1566 /*
1567 * Return our stored notion of how the camera is/should be configured.
1568 * The V4l2 spec wants us to be smarter, and actually get this from
1569 * the camera (and not mess with it at open time). Someday.
1570 */
1571 static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
1572 struct v4l2_format *f)
1573 {
1574 struct mcam_camera *cam = priv;
1575
1576 f->fmt.pix = cam->pix_format;
1577 return 0;
1578 }
1579
1580 /*
1581 * We only have one input - the sensor - so minimize the nonsense here.
1582 */
1583 static int mcam_vidioc_enum_input(struct file *filp, void *priv,
1584 struct v4l2_input *input)
1585 {
1586 if (input->index != 0)
1587 return -EINVAL;
1588
1589 input->type = V4L2_INPUT_TYPE_CAMERA;
1590 input->std = V4L2_STD_ALL; /* Not sure what should go here */
1591 strcpy(input->name, "Camera");
1592 return 0;
1593 }
1594
1595 static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
1596 {
1597 *i = 0;
1598 return 0;
1599 }
1600
1601 static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
1602 {
1603 if (i != 0)
1604 return -EINVAL;
1605 return 0;
1606 }
1607
1608 /* from vivi.c */
1609 static int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id a)
1610 {
1611 return 0;
1612 }
1613
1614 static int mcam_vidioc_g_std(struct file *filp, void *priv, v4l2_std_id *a)
1615 {
1616 *a = V4L2_STD_NTSC_M;
1617 return 0;
1618 }
1619
1620 /*
1621 * G/S_PARM. Most of this is done by the sensor, but we are
1622 * the level which controls the number of read buffers.
1623 */
1624 static int mcam_vidioc_g_parm(struct file *filp, void *priv,
1625 struct v4l2_streamparm *parms)
1626 {
1627 struct mcam_camera *cam = priv;
1628 int ret;
1629
1630 mutex_lock(&cam->s_mutex);
1631 ret = sensor_call(cam, video, g_parm, parms);
1632 mutex_unlock(&cam->s_mutex);
1633 parms->parm.capture.readbuffers = n_dma_bufs;
1634 return ret;
1635 }
1636
1637 static int mcam_vidioc_s_parm(struct file *filp, void *priv,
1638 struct v4l2_streamparm *parms)
1639 {
1640 struct mcam_camera *cam = priv;
1641 int ret;
1642
1643 mutex_lock(&cam->s_mutex);
1644 ret = sensor_call(cam, video, s_parm, parms);
1645 mutex_unlock(&cam->s_mutex);
1646 parms->parm.capture.readbuffers = n_dma_bufs;
1647 return ret;
1648 }
1649
1650 static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
1651 struct v4l2_frmsizeenum *sizes)
1652 {
1653 struct mcam_camera *cam = priv;
1654 int ret;
1655
1656 mutex_lock(&cam->s_mutex);
1657 ret = sensor_call(cam, video, enum_framesizes, sizes);
1658 mutex_unlock(&cam->s_mutex);
1659 return ret;
1660 }
1661
1662 static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
1663 struct v4l2_frmivalenum *interval)
1664 {
1665 struct mcam_camera *cam = priv;
1666 int ret;
1667
1668 mutex_lock(&cam->s_mutex);
1669 ret = sensor_call(cam, video, enum_frameintervals, interval);
1670 mutex_unlock(&cam->s_mutex);
1671 return ret;
1672 }
1673
1674 #ifdef CONFIG_VIDEO_ADV_DEBUG
1675 static int mcam_vidioc_g_register(struct file *file, void *priv,
1676 struct v4l2_dbg_register *reg)
1677 {
1678 struct mcam_camera *cam = priv;
1679
1680 if (reg->reg > cam->regs_size - 4)
1681 return -EINVAL;
1682 reg->val = mcam_reg_read(cam, reg->reg);
1683 reg->size = 4;
1684 return 0;
1685 }
1686
1687 static int mcam_vidioc_s_register(struct file *file, void *priv,
1688 const struct v4l2_dbg_register *reg)
1689 {
1690 struct mcam_camera *cam = priv;
1691
1692 if (reg->reg > cam->regs_size - 4)
1693 return -EINVAL;
1694 mcam_reg_write(cam, reg->reg, reg->val);
1695 return 0;
1696 }
1697 #endif
1698
1699 static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
1700 .vidioc_querycap = mcam_vidioc_querycap,
1701 .vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
1702 .vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
1703 .vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
1704 .vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
1705 .vidioc_enum_input = mcam_vidioc_enum_input,
1706 .vidioc_g_input = mcam_vidioc_g_input,
1707 .vidioc_s_input = mcam_vidioc_s_input,
1708 .vidioc_s_std = mcam_vidioc_s_std,
1709 .vidioc_g_std = mcam_vidioc_g_std,
1710 .vidioc_reqbufs = mcam_vidioc_reqbufs,
1711 .vidioc_querybuf = mcam_vidioc_querybuf,
1712 .vidioc_qbuf = mcam_vidioc_qbuf,
1713 .vidioc_dqbuf = mcam_vidioc_dqbuf,
1714 .vidioc_streamon = mcam_vidioc_streamon,
1715 .vidioc_streamoff = mcam_vidioc_streamoff,
1716 .vidioc_g_parm = mcam_vidioc_g_parm,
1717 .vidioc_s_parm = mcam_vidioc_s_parm,
1718 .vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
1719 .vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
1720 #ifdef CONFIG_VIDEO_ADV_DEBUG
1721 .vidioc_g_register = mcam_vidioc_g_register,
1722 .vidioc_s_register = mcam_vidioc_s_register,
1723 #endif
1724 };
1725
1726 /* ---------------------------------------------------------------------- */
1727 /*
1728 * Our various file operations.
1729 */
1730 static int mcam_v4l_open(struct file *filp)
1731 {
1732 struct mcam_camera *cam = video_drvdata(filp);
1733 int ret = 0;
1734
1735 filp->private_data = cam;
1736
1737 cam->frame_state.frames = 0;
1738 cam->frame_state.singles = 0;
1739 cam->frame_state.delivered = 0;
1740 mutex_lock(&cam->s_mutex);
1741 if (cam->users == 0) {
1742 ret = mcam_setup_vb2(cam);
1743 if (ret)
1744 goto out;
1745 ret = mcam_ctlr_power_up(cam);
1746 if (ret)
1747 goto out;
1748 __mcam_cam_reset(cam);
1749 mcam_set_config_needed(cam, 1);
1750 }
1751 (cam->users)++;
1752 out:
1753 mutex_unlock(&cam->s_mutex);
1754 return ret;
1755 }
1756
1757
1758 static int mcam_v4l_release(struct file *filp)
1759 {
1760 struct mcam_camera *cam = filp->private_data;
1761
1762 cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n",
1763 cam->frame_state.frames, cam->frame_state.singles,
1764 cam->frame_state.delivered);
1765 mutex_lock(&cam->s_mutex);
1766 (cam->users)--;
1767 if (cam->users == 0) {
1768 mcam_ctlr_stop_dma(cam);
1769 mcam_cleanup_vb2(cam);
1770 mcam_disable_mipi(cam);
1771 mcam_ctlr_power_down(cam);
1772 if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
1773 mcam_free_dma_bufs(cam);
1774 }
1775
1776 mutex_unlock(&cam->s_mutex);
1777 return 0;
1778 }
1779
1780 static ssize_t mcam_v4l_read(struct file *filp,
1781 char __user *buffer, size_t len, loff_t *pos)
1782 {
1783 struct mcam_camera *cam = filp->private_data;
1784 int ret;
1785
1786 mutex_lock(&cam->s_mutex);
1787 ret = vb2_read(&cam->vb_queue, buffer, len, pos,
1788 filp->f_flags & O_NONBLOCK);
1789 mutex_unlock(&cam->s_mutex);
1790 return ret;
1791 }
1792
1793
1794
1795 static unsigned int mcam_v4l_poll(struct file *filp,
1796 struct poll_table_struct *pt)
1797 {
1798 struct mcam_camera *cam = filp->private_data;
1799 int ret;
1800
1801 mutex_lock(&cam->s_mutex);
1802 ret = vb2_poll(&cam->vb_queue, filp, pt);
1803 mutex_unlock(&cam->s_mutex);
1804 return ret;
1805 }
1806
1807
1808 static int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
1809 {
1810 struct mcam_camera *cam = filp->private_data;
1811 int ret;
1812
1813 mutex_lock(&cam->s_mutex);
1814 ret = vb2_mmap(&cam->vb_queue, vma);
1815 mutex_unlock(&cam->s_mutex);
1816 return ret;
1817 }
1818
1819
1820
1821 static const struct v4l2_file_operations mcam_v4l_fops = {
1822 .owner = THIS_MODULE,
1823 .open = mcam_v4l_open,
1824 .release = mcam_v4l_release,
1825 .read = mcam_v4l_read,
1826 .poll = mcam_v4l_poll,
1827 .mmap = mcam_v4l_mmap,
1828 .unlocked_ioctl = video_ioctl2,
1829 };
1830
1831
1832 /*
1833 * This template device holds all of those v4l2 methods; we
1834 * clone it for specific real devices.
1835 */
1836 static struct video_device mcam_v4l_template = {
1837 .name = "mcam",
1838 .tvnorms = V4L2_STD_NTSC_M,
1839
1840 .fops = &mcam_v4l_fops,
1841 .ioctl_ops = &mcam_v4l_ioctl_ops,
1842 .release = video_device_release_empty,
1843 };
1844
1845 /* ---------------------------------------------------------------------- */
1846 /*
1847 * Interrupt handler stuff
1848 */
1849 static void mcam_frame_complete(struct mcam_camera *cam, int frame)
1850 {
1851 /*
1852 * Basic frame housekeeping.
1853 */
1854 set_bit(frame, &cam->flags);
1855 clear_bit(CF_DMA_ACTIVE, &cam->flags);
1856 cam->next_buf = frame;
1857 cam->buf_seq[frame] = ++(cam->sequence);
1858 cam->frame_state.frames++;
1859 /*
1860 * "This should never happen"
1861 */
1862 if (cam->state != S_STREAMING)
1863 return;
1864 /*
1865 * Process the frame and set up the next one.
1866 */
1867 cam->frame_complete(cam, frame);
1868 }
1869
1870
1871 /*
1872 * The interrupt handler; this needs to be called from the
1873 * platform irq handler with the lock held.
1874 */
1875 int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
1876 {
1877 unsigned int frame, handled = 0;
1878
1879 mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
1880 /*
1881 * Handle any frame completions. There really should
1882 * not be more than one of these, or we have fallen
1883 * far behind.
1884 *
1885 * When running in S/G mode, the frame number lacks any
1886 * real meaning - there's only one descriptor array - but
1887 * the controller still picks a different one to signal
1888 * each time.
1889 */
1890 for (frame = 0; frame < cam->nbufs; frame++)
1891 if (irqs & (IRQ_EOF0 << frame) &&
1892 test_bit(CF_FRAME_SOF0 + frame, &cam->flags)) {
1893 mcam_frame_complete(cam, frame);
1894 handled = 1;
1895 clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
1896 if (cam->buffer_mode == B_DMA_sg)
1897 break;
1898 }
1899 /*
1900 * If a frame starts, note that we have DMA active. This
1901 * code assumes that we won't get multiple frame interrupts
1902 * at once; may want to rethink that.
1903 */
1904 for (frame = 0; frame < cam->nbufs; frame++) {
1905 if (irqs & (IRQ_SOF0 << frame)) {
1906 set_bit(CF_FRAME_SOF0 + frame, &cam->flags);
1907 handled = IRQ_HANDLED;
1908 }
1909 }
1910
1911 if (handled == IRQ_HANDLED) {
1912 set_bit(CF_DMA_ACTIVE, &cam->flags);
1913 if (cam->buffer_mode == B_DMA_sg)
1914 mcam_ctlr_stop(cam);
1915 }
1916 return handled;
1917 }
1918
1919 /* ---------------------------------------------------------------------- */
1920 /*
1921 * Registration and such.
1922 */
1923 static struct ov7670_config sensor_cfg = {
1924 /*
1925 * Exclude QCIF mode, because it only captures a tiny portion
1926 * of the sensor FOV
1927 */
1928 .min_width = 320,
1929 .min_height = 240,
1930 };
1931
1932
1933 int mccic_register(struct mcam_camera *cam)
1934 {
1935 struct i2c_board_info ov7670_info = {
1936 .type = "ov7670",
1937 .addr = 0x42 >> 1,
1938 .platform_data = &sensor_cfg,
1939 };
1940 int ret;
1941
1942 /*
1943 * Validate the requested buffer mode.
1944 */
1945 if (buffer_mode >= 0)
1946 cam->buffer_mode = buffer_mode;
1947 if (cam->buffer_mode == B_DMA_sg &&
1948 cam->chip_id == MCAM_CAFE) {
1949 printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
1950 "attempting vmalloc mode instead\n");
1951 cam->buffer_mode = B_vmalloc;
1952 }
1953 if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
1954 printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
1955 cam->buffer_mode);
1956 return -EINVAL;
1957 }
1958 /*
1959 * Register with V4L
1960 */
1961 ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
1962 if (ret)
1963 return ret;
1964
1965 mutex_init(&cam->s_mutex);
1966 cam->state = S_NOTREADY;
1967 mcam_set_config_needed(cam, 1);
1968 cam->pix_format = mcam_def_pix_format;
1969 cam->mbus_code = mcam_def_mbus_code;
1970 INIT_LIST_HEAD(&cam->buffers);
1971 mcam_ctlr_init(cam);
1972
1973 /*
1974 * Try to find the sensor.
1975 */
1976 sensor_cfg.clock_speed = cam->clock_speed;
1977 sensor_cfg.use_smbus = cam->use_smbus;
1978 cam->sensor_addr = ov7670_info.addr;
1979 cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
1980 cam->i2c_adapter, &ov7670_info, NULL);
1981 if (cam->sensor == NULL) {
1982 ret = -ENODEV;
1983 goto out_unregister;
1984 }
1985
1986 ret = mcam_cam_init(cam);
1987 if (ret)
1988 goto out_unregister;
1989 /*
1990 * Get the v4l2 setup done.
1991 */
1992 ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
1993 if (ret)
1994 goto out_unregister;
1995 cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
1996
1997 mutex_lock(&cam->s_mutex);
1998 cam->vdev = mcam_v4l_template;
1999 cam->vdev.debug = 0;
2000 cam->vdev.v4l2_dev = &cam->v4l2_dev;
2001 video_set_drvdata(&cam->vdev, cam);
2002 ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
2003 if (ret)
2004 goto out;
2005
2006 /*
2007 * If so requested, try to get our DMA buffers now.
2008 */
2009 if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
2010 if (mcam_alloc_dma_bufs(cam, 1))
2011 cam_warn(cam, "Unable to alloc DMA buffers at load"
2012 " will try again later.");
2013 }
2014
2015 out:
2016 v4l2_ctrl_handler_free(&cam->ctrl_handler);
2017 mutex_unlock(&cam->s_mutex);
2018 return ret;
2019 out_unregister:
2020 v4l2_device_unregister(&cam->v4l2_dev);
2021 return ret;
2022 }
2023
2024
2025 void mccic_shutdown(struct mcam_camera *cam)
2026 {
2027 /*
2028 * If we have no users (and we really, really should have no
2029 * users) the device will already be powered down. Trying to
2030 * take it down again will wedge the machine, which is frowned
2031 * upon.
2032 */
2033 if (cam->users > 0) {
2034 cam_warn(cam, "Removing a device with users!\n");
2035 mcam_ctlr_power_down(cam);
2036 }
2037 vb2_queue_release(&cam->vb_queue);
2038 if (cam->buffer_mode == B_vmalloc)
2039 mcam_free_dma_bufs(cam);
2040 video_unregister_device(&cam->vdev);
2041 v4l2_ctrl_handler_free(&cam->ctrl_handler);
2042 v4l2_device_unregister(&cam->v4l2_dev);
2043 }
2044
2045 /*
2046 * Power management
2047 */
2048 #ifdef CONFIG_PM
2049
2050 void mccic_suspend(struct mcam_camera *cam)
2051 {
2052 mutex_lock(&cam->s_mutex);
2053 if (cam->users > 0) {
2054 enum mcam_state cstate = cam->state;
2055
2056 mcam_ctlr_stop_dma(cam);
2057 mcam_ctlr_power_down(cam);
2058 cam->state = cstate;
2059 }
2060 mutex_unlock(&cam->s_mutex);
2061 }
2062
2063 int mccic_resume(struct mcam_camera *cam)
2064 {
2065 int ret = 0;
2066
2067 mutex_lock(&cam->s_mutex);
2068 if (cam->users > 0) {
2069 ret = mcam_ctlr_power_up(cam);
2070 if (ret) {
2071 mutex_unlock(&cam->s_mutex);
2072 return ret;
2073 }
2074 __mcam_cam_reset(cam);
2075 } else {
2076 mcam_ctlr_power_down(cam);
2077 }
2078 mutex_unlock(&cam->s_mutex);
2079
2080 set_bit(CF_CONFIG_NEEDED, &cam->flags);
2081 if (cam->state == S_STREAMING) {
2082 /*
2083 * If there was a buffer in the DMA engine at suspend
2084 * time, put it back on the queue or we'll forget about it.
2085 */
2086 if (cam->buffer_mode == B_DMA_sg && cam->vb_bufs[0])
2087 list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
2088 ret = mcam_read_setup(cam);
2089 }
2090 return ret;
2091 }
2092 #endif /* CONFIG_PM */
2093
2094 int ldv_probe_1(void);
2095 int ldv_retval_1;
2096
2097
2098 void ldv_initialyze_v4l2_file_operations_2(void){
2099 mcam_v4l_fops_group0 = ldv_undef_ptr();
2100 }
2101
2102 void ldv_initialyze_vb2_ops_4(void){
2103 mcam_vb2_sg_ops_group0 = ldv_undef_ptr();
2104 mcam_vb2_sg_ops_group1 = ldv_undef_ptr();
2105 }
2106
2107 void ldv_initialyze_vb2_ops_5(void){
2108 mcam_vb2_ops_group0 = ldv_undef_ptr();
2109 }
2110
2111 void ldv_initialyze_v4l2_ioctl_ops_3(void){
2112 mcam_v4l_ioctl_ops_group0 = ldv_undef_ptr();
2113 mcam_v4l_ioctl_ops_group1 = ldv_undef_ptr();
2114 mcam_v4l_ioctl_ops_group3 = ldv_undef_ptr();
2115 mcam_v4l_ioctl_ops_group2 = ldv_undef_ptr();
2116 }
2117
2118 void ldv_main_exported_4(void){
2119 unsigned int ldvarg52;
2120 struct v4l2_format const *ldvarg56;
2121 unsigned int *ldvarg55;
2122 void **ldvarg53;
2123 unsigned int *ldvarg54;
2124 unsigned int *ldvarg57;
2125 /*DEG-struct: handlers from structure mcam_vb2_sg_ops*/
2126 switch(__VERIFIER_nondet_int()){
2127 case 0:{
2128 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2129 if(ldv_state_variable_4 == 1){
2130 /*DEG-CALL: handler buf_finish from mcam_vb2_sg_ops*/
2131 (& mcam_vb_sg_buf_finish)(mcam_vb2_sg_ops_group0);
2132 /*DEG-postcall: default*/
2133 ldv_state_variable_4 = 1;
2134 }
2135 }
2136 break;
2137 case 1:{
2138 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2139 if(ldv_state_variable_4 == 1){
2140 /*DEG-CALL: handler wait_prepare from mcam_vb2_sg_ops*/
2141 (& mcam_vb_wait_prepare)(mcam_vb2_sg_ops_group1);
2142 /*DEG-postcall: default*/
2143 ldv_state_variable_4 = 1;
2144 }
2145 }
2146 break;
2147 case 2:{
2148 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2149 if(ldv_state_variable_4 == 1){
2150 /*DEG-CALL: handler buf_init from mcam_vb2_sg_ops*/
2151 (& mcam_vb_sg_buf_init)(mcam_vb2_sg_ops_group0);
2152 /*DEG-postcall: default*/
2153 ldv_state_variable_4 = 1;
2154 }
2155 }
2156 break;
2157 case 3:{
2158 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2159 if(ldv_state_variable_4 == 1){
2160 /*DEG-CALL: handler buf_prepare from mcam_vb2_sg_ops*/
2161 (& mcam_vb_sg_buf_prepare)(mcam_vb2_sg_ops_group0);
2162 /*DEG-postcall: default*/
2163 ldv_state_variable_4 = 1;
2164 }
2165 }
2166 break;
2167 case 4:{
2168 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2169 if(ldv_state_variable_4 == 1){
2170 /*DEG-CALL: handler queue_setup from mcam_vb2_sg_ops*/
2171 (& mcam_vb_queue_setup)(mcam_vb2_sg_ops_group1,ldvarg56,ldvarg55,ldvarg54,ldvarg57,ldvarg53);
2172 /*DEG-postcall: default*/
2173 ldv_state_variable_4 = 1;
2174 }
2175 }
2176 break;
2177 case 5:{
2178 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2179 if(ldv_state_variable_4 == 1){
2180 /*DEG-CALL: handler wait_finish from mcam_vb2_sg_ops*/
2181 (& mcam_vb_wait_finish)(mcam_vb2_sg_ops_group1);
2182 /*DEG-postcall: default*/
2183 ldv_state_variable_4 = 1;
2184 }
2185 }
2186 break;
2187 case 6:{
2188 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2189 if(ldv_state_variable_4 == 1){
2190 /*DEG-CALL: handler buf_queue from mcam_vb2_sg_ops*/
2191 (& mcam_vb_buf_queue)(mcam_vb2_sg_ops_group0);
2192 /*DEG-postcall: default*/
2193 ldv_state_variable_4 = 1;
2194 }
2195 }
2196 break;
2197 case 7:{
2198 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2199 if(ldv_state_variable_4 == 1){
2200 /*DEG-CALL: handler stop_streaming from mcam_vb2_sg_ops*/
2201 (& mcam_vb_stop_streaming)(mcam_vb2_sg_ops_group1);
2202 /*DEG-postcall: default*/
2203 ldv_state_variable_4 = 1;
2204 }
2205 }
2206 break;
2207 case 8:{
2208 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2209 if(ldv_state_variable_4 == 1){
2210 /*DEG-CALL: handler start_streaming from mcam_vb2_sg_ops*/
2211 (& mcam_vb_start_streaming)(mcam_vb2_sg_ops_group1,ldvarg52);
2212 /*DEG-postcall: default*/
2213 ldv_state_variable_4 = 1;
2214 }
2215 }
2216 break;
2217 case 9:{
2218 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2219 if(ldv_state_variable_4 == 1){
2220 /*DEG-CALL: handler buf_cleanup from mcam_vb2_sg_ops*/
2221 (& mcam_vb_sg_buf_cleanup)(mcam_vb2_sg_ops_group0);
2222 /*DEG-postcall: default*/
2223 ldv_state_variable_4 = 1;
2224 }
2225 }
2226 break;
2227 default: ldv_assume(0);
2228 }
2229
2230 };
2231
2232
2233 void ldv_main_exported_1(void){
2234 struct video_device *ldvarg51;
2235 /*DEG-struct: handlers from structure mcam_v4l_template*/
2236 switch(__VERIFIER_nondet_int()){
2237 case 0:{
2238 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
2239 if(ldv_state_variable_1 == 2){
2240 /*DEG-CALL: handler release from mcam_v4l_template*/
2241 (& video_device_release_empty)(ldvarg51);
2242 /*DEG-postcall: module put*/
2243 ldv_state_variable_1 = 1;
2244 ref_cnt--;
2245 }
2246 }
2247 break;
2248 case 1:{
2249 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2250 if(ldv_state_variable_1 == 1){
2251 /*DEG-CALL: handler probe from mcam_v4l_template*/
2252 ldv_probe_1();
2253 /*DEG-postcall: get module*/
2254 ldv_state_variable_1 = 2;
2255 ref_cnt++;
2256 }
2257 }
2258 break;
2259 default: ldv_assume(0);
2260 }
2261
2262 };
2263
2264
2265 void ldv_main_exported_3(void){
2266 void *ldvarg19;
2267 void *ldvarg22;
2268 void *ldvarg25;
2269 void *ldvarg17;
2270 void *ldvarg21;
2271 void *ldvarg30;
2272 void *ldvarg15;
2273 struct v4l2_fmtdesc *ldvarg27;
2274 void *ldvarg26;
2275 struct v4l2_input *ldvarg9;
2276 void *ldvarg10;
2277 struct v4l2_requestbuffers *ldvarg36;
2278 void *ldvarg13;
2279 void *ldvarg8;
2280 void *ldvarg3;
2281 void *ldvarg31;
2282 struct v4l2_frmsizeenum *ldvarg20;
2283 enum v4l2_buf_type ldvarg2;
2284 void *ldvarg28;
2285 void *ldvarg34;
2286 struct v4l2_dbg_register *ldvarg14;
2287 struct v4l2_dbg_register const *ldvarg4;
2288 struct v4l2_frmivalenum *ldvarg16;
2289 unsigned int ldvarg6;
2290 v4l2_std_id ldvarg33;
2291 void *ldvarg5;
2292 void *ldvarg35;
2293 void *ldvarg24;
2294 enum v4l2_buf_type ldvarg29;
2295 void *ldvarg37;
2296 v4l2_std_id *ldvarg12;
2297 unsigned int *ldvarg23;
2298 void *ldvarg7;
2299 void *ldvarg32;
2300 void *ldvarg11;
2301 struct v4l2_capability *ldvarg18;
2302 /*DEG-struct: handlers from structure mcam_v4l_ioctl_ops*/
2303 switch(__VERIFIER_nondet_int()){
2304 case 0:{
2305 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2306 if(ldv_state_variable_3 == 1){
2307 /*DEG-CALL: handler vidioc_reqbufs from mcam_v4l_ioctl_ops*/
2308 (& mcam_vidioc_reqbufs)(mcam_v4l_ioctl_ops_group3,ldvarg37,ldvarg36);
2309 /*DEG-postcall: default*/
2310 ldv_state_variable_3 = 1;
2311 }
2312 }
2313 break;
2314 case 1:{
2315 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2316 if(ldv_state_variable_3 == 1){
2317 /*DEG-CALL: handler vidioc_try_fmt_vid_cap from mcam_v4l_ioctl_ops*/
2318 (& mcam_vidioc_try_fmt_vid_cap)(mcam_v4l_ioctl_ops_group3,ldvarg35,mcam_v4l_ioctl_ops_group1);
2319 /*DEG-postcall: default*/
2320 ldv_state_variable_3 = 1;
2321 }
2322 }
2323 break;
2324 case 2:{
2325 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2326 if(ldv_state_variable_3 == 1){
2327 /*DEG-CALL: handler vidioc_s_std from mcam_v4l_ioctl_ops*/
2328 (& mcam_vidioc_s_std)(mcam_v4l_ioctl_ops_group3,ldvarg34,ldvarg33);
2329 /*DEG-postcall: default*/
2330 ldv_state_variable_3 = 1;
2331 }
2332 }
2333 break;
2334 case 3:{
2335 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2336 if(ldv_state_variable_3 == 1){
2337 /*DEG-CALL: handler vidioc_querybuf from mcam_v4l_ioctl_ops*/
2338 (& mcam_vidioc_querybuf)(mcam_v4l_ioctl_ops_group3,ldvarg32,mcam_v4l_ioctl_ops_group0);
2339 /*DEG-postcall: default*/
2340 ldv_state_variable_3 = 1;
2341 }
2342 }
2343 break;
2344 case 4:{
2345 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2346 if(ldv_state_variable_3 == 1){
2347 /*DEG-CALL: handler vidioc_dqbuf from mcam_v4l_ioctl_ops*/
2348 (& mcam_vidioc_dqbuf)(mcam_v4l_ioctl_ops_group3,ldvarg31,mcam_v4l_ioctl_ops_group0);
2349 /*DEG-postcall: default*/
2350 ldv_state_variable_3 = 1;
2351 }
2352 }
2353 break;
2354 case 5:{
2355 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2356 if(ldv_state_variable_3 == 1){
2357 /*DEG-CALL: handler vidioc_streamoff from mcam_v4l_ioctl_ops*/
2358 (& mcam_vidioc_streamoff)(mcam_v4l_ioctl_ops_group3,ldvarg30,ldvarg29);
2359 /*DEG-postcall: default*/
2360 ldv_state_variable_3 = 1;
2361 }
2362 }
2363 break;
2364 case 6:{
2365 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2366 if(ldv_state_variable_3 == 1){
2367 /*DEG-CALL: handler vidioc_enum_fmt_vid_cap from mcam_v4l_ioctl_ops*/
2368 (& mcam_vidioc_enum_fmt_vid_cap)(mcam_v4l_ioctl_ops_group3,ldvarg28,ldvarg27);
2369 /*DEG-postcall: default*/
2370 ldv_state_variable_3 = 1;
2371 }
2372 }
2373 break;
2374 case 7:{
2375 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2376 if(ldv_state_variable_3 == 1){
2377 /*DEG-CALL: handler vidioc_s_fmt_vid_cap from mcam_v4l_ioctl_ops*/
2378 (& mcam_vidioc_s_fmt_vid_cap)(mcam_v4l_ioctl_ops_group3,ldvarg26,mcam_v4l_ioctl_ops_group1);
2379 /*DEG-postcall: default*/
2380 ldv_state_variable_3 = 1;
2381 }
2382 }
2383 break;
2384 case 8:{
2385 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2386 if(ldv_state_variable_3 == 1){
2387 /*DEG-CALL: handler vidioc_g_fmt_vid_cap from mcam_v4l_ioctl_ops*/
2388 (& mcam_vidioc_g_fmt_vid_cap)(mcam_v4l_ioctl_ops_group3,ldvarg25,mcam_v4l_ioctl_ops_group1);
2389 /*DEG-postcall: default*/
2390 ldv_state_variable_3 = 1;
2391 }
2392 }
2393 break;
2394 case 9:{
2395 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2396 if(ldv_state_variable_3 == 1){
2397 /*DEG-CALL: handler vidioc_g_input from mcam_v4l_ioctl_ops*/
2398 (& mcam_vidioc_g_input)(mcam_v4l_ioctl_ops_group3,ldvarg24,ldvarg23);
2399 /*DEG-postcall: default*/
2400 ldv_state_variable_3 = 1;
2401 }
2402 }
2403 break;
2404 case 10:{
2405 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2406 if(ldv_state_variable_3 == 1){
2407 /*DEG-CALL: handler vidioc_qbuf from mcam_v4l_ioctl_ops*/
2408 (& mcam_vidioc_qbuf)(mcam_v4l_ioctl_ops_group3,ldvarg22,mcam_v4l_ioctl_ops_group0);
2409 /*DEG-postcall: default*/
2410 ldv_state_variable_3 = 1;
2411 }
2412 }
2413 break;
2414 case 11:{
2415 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2416 if(ldv_state_variable_3 == 1){
2417 /*DEG-CALL: handler vidioc_enum_framesizes from mcam_v4l_ioctl_ops*/
2418 (& mcam_vidioc_enum_framesizes)(mcam_v4l_ioctl_ops_group3,ldvarg21,ldvarg20);
2419 /*DEG-postcall: default*/
2420 ldv_state_variable_3 = 1;
2421 }
2422 }
2423 break;
2424 case 12:{
2425 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2426 if(ldv_state_variable_3 == 1){
2427 /*DEG-CALL: handler vidioc_querycap from mcam_v4l_ioctl_ops*/
2428 (& mcam_vidioc_querycap)(mcam_v4l_ioctl_ops_group3,ldvarg19,ldvarg18);
2429 /*DEG-postcall: default*/
2430 ldv_state_variable_3 = 1;
2431 }
2432 }
2433 break;
2434 case 13:{
2435 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2436 if(ldv_state_variable_3 == 1){
2437 /*DEG-CALL: handler vidioc_enum_frameintervals from mcam_v4l_ioctl_ops*/
2438 (& mcam_vidioc_enum_frameintervals)(mcam_v4l_ioctl_ops_group3,ldvarg17,ldvarg16);
2439 /*DEG-postcall: default*/
2440 ldv_state_variable_3 = 1;
2441 }
2442 }
2443 break;
2444 case 14:{
2445 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2446 if(ldv_state_variable_3 == 1){
2447 /*DEG-CALL: handler vidioc_g_register from mcam_v4l_ioctl_ops*/
2448 (& mcam_vidioc_g_register)(mcam_v4l_ioctl_ops_group3,ldvarg15,ldvarg14);
2449 /*DEG-postcall: default*/
2450 ldv_state_variable_3 = 1;
2451 }
2452 }
2453 break;
2454 case 15:{
2455 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2456 if(ldv_state_variable_3 == 1){
2457 /*DEG-CALL: handler vidioc_g_std from mcam_v4l_ioctl_ops*/
2458 (& mcam_vidioc_g_std)(mcam_v4l_ioctl_ops_group3,ldvarg13,ldvarg12);
2459 /*DEG-postcall: default*/
2460 ldv_state_variable_3 = 1;
2461 }
2462 }
2463 break;
2464 case 16:{
2465 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2466 if(ldv_state_variable_3 == 1){
2467 /*DEG-CALL: handler vidioc_g_parm from mcam_v4l_ioctl_ops*/
2468 (& mcam_vidioc_g_parm)(mcam_v4l_ioctl_ops_group3,ldvarg11,mcam_v4l_ioctl_ops_group2);
2469 /*DEG-postcall: default*/
2470 ldv_state_variable_3 = 1;
2471 }
2472 }
2473 break;
2474 case 17:{
2475 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2476 if(ldv_state_variable_3 == 1){
2477 /*DEG-CALL: handler vidioc_enum_input from mcam_v4l_ioctl_ops*/
2478 (& mcam_vidioc_enum_input)(mcam_v4l_ioctl_ops_group3,ldvarg10,ldvarg9);
2479 /*DEG-postcall: default*/
2480 ldv_state_variable_3 = 1;
2481 }
2482 }
2483 break;
2484 case 18:{
2485 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2486 if(ldv_state_variable_3 == 1){
2487 /*DEG-CALL: handler vidioc_s_parm from mcam_v4l_ioctl_ops*/
2488 (& mcam_vidioc_s_parm)(mcam_v4l_ioctl_ops_group3,ldvarg8,mcam_v4l_ioctl_ops_group2);
2489 /*DEG-postcall: default*/
2490 ldv_state_variable_3 = 1;
2491 }
2492 }
2493 break;
2494 case 19:{
2495 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2496 if(ldv_state_variable_3 == 1){
2497 /*DEG-CALL: handler vidioc_s_input from mcam_v4l_ioctl_ops*/
2498 (& mcam_vidioc_s_input)(mcam_v4l_ioctl_ops_group3,ldvarg7,ldvarg6);
2499 /*DEG-postcall: default*/
2500 ldv_state_variable_3 = 1;
2501 }
2502 }
2503 break;
2504 case 20:{
2505 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2506 if(ldv_state_variable_3 == 1){
2507 /*DEG-CALL: handler vidioc_s_register from mcam_v4l_ioctl_ops*/
2508 (& mcam_vidioc_s_register)(mcam_v4l_ioctl_ops_group3,ldvarg5,ldvarg4);
2509 /*DEG-postcall: default*/
2510 ldv_state_variable_3 = 1;
2511 }
2512 }
2513 break;
2514 case 21:{
2515 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2516 if(ldv_state_variable_3 == 1){
2517 /*DEG-CALL: handler vidioc_streamon from mcam_v4l_ioctl_ops*/
2518 (& mcam_vidioc_streamon)(mcam_v4l_ioctl_ops_group3,ldvarg3,ldvarg2);
2519 /*DEG-postcall: default*/
2520 ldv_state_variable_3 = 1;
2521 }
2522 }
2523 break;
2524 default: ldv_assume(0);
2525 }
2526
2527 };
2528
2529
2530 void ldv_main_exported_2(void){
2531 struct poll_table_struct *ldvarg46;
2532 loff_t *ldvarg47;
2533 struct vm_area_struct *ldvarg50;
2534 long unsigned int ldvarg44;
2535 char *ldvarg49;
2536 unsigned int ldvarg45;
2537 size_t ldvarg48;
2538 /*DEG-struct: handlers from structure mcam_v4l_fops*/
2539 switch(__VERIFIER_nondet_int()){
2540 case 0:{
2541 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2542 if(ldv_state_variable_2 == 1){
2543 /*DEG-CALL: handler open from mcam_v4l_fops*/
2544 ldv_retval_1=(& mcam_v4l_open)(mcam_v4l_fops_group0);
2545 /*DEG-postcall: since other handlers should be called*/
2546 if(ldv_retval_1==0){
2547 ldv_state_variable_2 = 2;
2548 ref_cnt++;
2549 }
2550 }
2551 }
2552 break;
2553 case 1:{
2554 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2555 if(ldv_state_variable_2 == 1){
2556 /*DEG-CALL: handler mmap from mcam_v4l_fops*/
2557 (& mcam_v4l_mmap)(mcam_v4l_fops_group0,ldvarg50);
2558 /*DEG-postcall: default*/
2559 ldv_state_variable_2 = 1;
2560 }
2561 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
2562 if(ldv_state_variable_2 == 2){
2563 /*DEG-CALL: handler mmap from mcam_v4l_fops*/
2564 (& mcam_v4l_mmap)(mcam_v4l_fops_group0,ldvarg50);
2565 /*DEG-postcall: default*/
2566 ldv_state_variable_2 = 2;
2567 }
2568 }
2569 break;
2570 case 2:{
2571 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
2572 if(ldv_state_variable_2 == 2){
2573 /*DEG-CALL: handler release from mcam_v4l_fops*/
2574 (& mcam_v4l_release)(mcam_v4l_fops_group0);
2575 /*DEG-postcall: module put*/
2576 ldv_state_variable_2 = 1;
2577 ref_cnt--;
2578 }
2579 }
2580 break;
2581 case 3:{
2582 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
2583 if(ldv_state_variable_2 == 2){
2584 /*DEG-CALL: handler read from mcam_v4l_fops*/
2585 (& mcam_v4l_read)(mcam_v4l_fops_group0,ldvarg49,ldvarg48,ldvarg47);
2586 /*DEG-postcall: default*/
2587 ldv_state_variable_2 = 2;
2588 }
2589 }
2590 break;
2591 case 4:{
2592 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2593 if(ldv_state_variable_2 == 1){
2594 /*DEG-CALL: handler poll from mcam_v4l_fops*/
2595 (& mcam_v4l_poll)(mcam_v4l_fops_group0,ldvarg46);
2596 /*DEG-postcall: default*/
2597 ldv_state_variable_2 = 1;
2598 }
2599 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
2600 if(ldv_state_variable_2 == 2){
2601 /*DEG-CALL: handler poll from mcam_v4l_fops*/
2602 (& mcam_v4l_poll)(mcam_v4l_fops_group0,ldvarg46);
2603 /*DEG-postcall: default*/
2604 ldv_state_variable_2 = 2;
2605 }
2606 }
2607 break;
2608 case 5:{
2609 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2610 if(ldv_state_variable_2 == 1){
2611 /*DEG-CALL: handler unlocked_ioctl from mcam_v4l_fops*/
2612 (& video_ioctl2)(mcam_v4l_fops_group0,ldvarg45,ldvarg44);
2613 /*DEG-postcall: default*/
2614 ldv_state_variable_2 = 1;
2615 }
2616 /*DEG-state: state 2 (look at corresponding state-chart diagram for details)*/
2617 if(ldv_state_variable_2 == 2){
2618 /*DEG-CALL: handler unlocked_ioctl from mcam_v4l_fops*/
2619 (& video_ioctl2)(mcam_v4l_fops_group0,ldvarg45,ldvarg44);
2620 /*DEG-postcall: default*/
2621 ldv_state_variable_2 = 2;
2622 }
2623 }
2624 break;
2625 default: ldv_assume(0);
2626 }
2627
2628 };
2629
2630
2631 void ldv_main_exported_5(void){
2632 unsigned int *ldvarg62;
2633 unsigned int *ldvarg59;
2634 struct vb2_buffer *ldvarg64;
2635 unsigned int *ldvarg60;
2636 struct v4l2_format const *ldvarg61;
2637 void **ldvarg58;
2638 unsigned int ldvarg63;
2639 /*DEG-struct: handlers from structure mcam_vb2_ops*/
2640 switch(__VERIFIER_nondet_int()){
2641 case 0:{
2642 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2643 if(ldv_state_variable_5 == 1){
2644 /*DEG-CALL: handler wait_finish from mcam_vb2_ops*/
2645 (& mcam_vb_wait_finish)(mcam_vb2_ops_group0);
2646 /*DEG-postcall: default*/
2647 ldv_state_variable_5 = 1;
2648 }
2649 }
2650 break;
2651 case 1:{
2652 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2653 if(ldv_state_variable_5 == 1){
2654 /*DEG-CALL: handler buf_queue from mcam_vb2_ops*/
2655 (& mcam_vb_buf_queue)(ldvarg64);
2656 /*DEG-postcall: default*/
2657 ldv_state_variable_5 = 1;
2658 }
2659 }
2660 break;
2661 case 2:{
2662 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2663 if(ldv_state_variable_5 == 1){
2664 /*DEG-CALL: handler wait_prepare from mcam_vb2_ops*/
2665 (& mcam_vb_wait_prepare)(mcam_vb2_ops_group0);
2666 /*DEG-postcall: default*/
2667 ldv_state_variable_5 = 1;
2668 }
2669 }
2670 break;
2671 case 3:{
2672 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2673 if(ldv_state_variable_5 == 1){
2674 /*DEG-CALL: handler stop_streaming from mcam_vb2_ops*/
2675 (& mcam_vb_stop_streaming)(mcam_vb2_ops_group0);
2676 /*DEG-postcall: default*/
2677 ldv_state_variable_5 = 1;
2678 }
2679 }
2680 break;
2681 case 4:{
2682 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2683 if(ldv_state_variable_5 == 1){
2684 /*DEG-CALL: handler start_streaming from mcam_vb2_ops*/
2685 (& mcam_vb_start_streaming)(mcam_vb2_ops_group0,ldvarg63);
2686 /*DEG-postcall: default*/
2687 ldv_state_variable_5 = 1;
2688 }
2689 }
2690 break;
2691 case 5:{
2692 /*DEG-state: state 1 (look at corresponding state-chart diagram for details)*/
2693 if(ldv_state_variable_5 == 1){
2694 /*DEG-CALL: handler queue_setup from mcam_vb2_ops*/
2695 (& mcam_vb_queue_setup)(mcam_vb2_ops_group0,ldvarg61,ldvarg60,ldvarg59,ldvarg62,ldvarg58);
2696 /*DEG-postcall: default*/
2697 ldv_state_variable_5 = 1;
2698 }
2699 }
2700 break;
2701 default: ldv_assume(0);
2702 }
2703
2704 }; 1
2
3 #include <verifier/rcv.h>
4 #include <linux/device.h>
5 #include <kernel-model/ERR.inc>
6 struct device_private;
7 /* LDV_COMMENT_CHANGE_STATE At the beginning nothing is allocated. */
8 int ldv_alloc_count = 0;
9
10 /* LDV_COMMENT_CHANGE_STATE Saved release function pointer. */
11 void (*gadget_release_pointer)(struct device *_dev);
12
13 void* __VERIFIER_alloc(size_t size);
14
15 /* LDV_COMMENT_CHANGE_STATE At the beginning nothing is allocated. */
16 //void* ldv_saved_drv_data;
17
18 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_alloc') Allocate a "memory". */
19 void* ldv_alloc(size_t size)
20 {
21 void *res = __VERIFIER_alloc(size);
22 ldv_assume_and_increase(res);
23 return res;
24 }
25
26 void* ldv_zero_alloc(size_t size)
27 {
28 void *res = ldv_zalloc(size);
29 ldv_assume_and_increase(res);
30 return res;
31 }
32
33 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_assume_and_increase') Allocate a "memory". */
34 void ldv_assume_and_increase(void* res)
35 {
36 ldv_assume(res <= LDV_PTR_MAX);
37 if (res != 0) {
38 /* LDV_COMMENT_CHANGE_STATE One more "memory" is allocated. */
39 ldv_alloc_count++;
40 }
41 }
42
43 void* ldv_nonzero_alloc(size_t size)
44 {
45 //functions, like memdup_user returns either valid pointer, or ptr_err
46 void *res = __VERIFIER_alloc(size);
47 ldv_assume(res != 0);
48 if (res <= LDV_PTR_MAX) {
49 /* LDV_COMMENT_CHANGE_STATE One more "memory" is allocated. */
50 ldv_alloc_count++;
51 }
52 return res;
53 }
54
55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_alloc_without_counter') Allocate a "memory". */
56 void* ldv_alloc_without_counter(size_t size)
57 {
58 void *res = __VERIFIER_alloc(size);
59 ldv_assume(res <= LDV_PTR_MAX);
60 return res;
61 }
62
63 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_zalloc_without_counter') Allocate a "memory". */
64 void* ldv_zalloc_without_counter(size_t size)
65 {
66 void *res = ldv_zalloc(size);
67 ldv_assume(res <= LDV_PTR_MAX);
68 return res;
69 }
70
71 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_free') Free a "memory". */
72 void ldv_free(void)
73 {
74 ldv_assert(ldv_alloc_count > 0);
75 /* LDV_COMMENT_CHANGE_STATE Free a "memory". */
76 ldv_alloc_count--;
77 //ldv_saved_drv_data = 0;
78 }
79
80 void ldv_condition_free()
81 {
82 if (ldv_alloc_count > 0)
83 {
84 ldv_free();
85 }
86 }
87
88 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_save_gadget_release') Free a "memory". */
89 void ldv_save_gadget_release(void (*func)(struct device *_dev))
90 {
91 gadget_release_pointer = func;
92 }
93
94 int ldv_dev_set_drvdata(struct device *dev, void *data)
95 {
96 dev->p = data;
97 return 0;
98 }
99
100 void *ldv_dev_get_drvdata(const struct device *dev)
101 {
102 return dev->p;
103 }
104
105
106
107 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') All allocated memory should be freed at the end. */
108 void ldv_check_final_state(void)
109 {
110 /* LDV_COMMENT_ASSERT Nothing should be allocated at the end. */
111 ldv_assert(ldv_alloc_count == 0);
112 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 //Only for defining size_t
5 #include <linux/types.h>
6
7 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
8 label like the standard assert(). */
9 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
10
11
12 /* The error label wrapper. It is used because of some static verifiers (like
13 BLAST) don't accept multiple error labels through a program. */
14 static inline void ldv_error(void)
15 {
16 LDV_ERROR: goto LDV_ERROR;
17 }
18
19 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
20 avoided by verifiers. */
21 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
22
23 /* Infinite loop, that causes verifiers to skip such paths. */
24 static inline void ldv_stop(void) {
25 LDV_STOP: goto LDV_STOP;
26 }
27
28 /* Special nondeterministic functions. */
29 int ldv_undef_int(void);
30 void *ldv_undef_ptr(void);
31 unsigned long ldv_undef_ulong(void);
32
33 void* __VERIFIER_alloc(size_t size);
34 void* ldv_zalloc(size_t size);
35 static inline void* ldv_successful_alloc(size_t size)
36 {
37 void* ret = __VERIFIER_alloc(size);
38 ldv_assume(ret != 0);
39 return ret;
40 }
41
42 static inline void* ldv_successful_zalloc(size_t size)
43 {
44 void* ret = ldv_zalloc(size);
45 ldv_assume(ret != 0);
46 return ret;
47 }
48 /* Return nondeterministic negative integer number. */
49 static inline int ldv_undef_int_negative(void)
50 {
51 int ret = ldv_undef_int();
52
53 ldv_assume(ret < 0);
54
55 return ret;
56 }
57 /* Return nondeterministic nonpositive integer number. */
58 static inline int ldv_undef_int_nonpositive(void)
59 {
60 int ret = ldv_undef_int();
61
62 ldv_assume(ret <= 0);
63
64 return ret;
65 }
66
67 /* Add explicit model for __builin_expect GCC function. Without the model a
68 return value will be treated as nondetermined by verifiers. */
69 long __builtin_expect(long exp, long c)
70 {
71 return exp;
72 }
73
74 /* This function causes the program to exit abnormally. GCC implements this
75 function by using a target-dependent mechanism (such as intentionally executing
76 an illegal instruction) or by calling abort. The mechanism used may vary from
77 release to release so you should not rely on any particular implementation.
78 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
79 void __builtin_trap(void)
80 {
81 ldv_assert(0);
82 }
83
84 /* The constant is for simulating an error of ldv_undef_ptr() function. */
85 #define LDV_PTR_MAX 201200
86
87 #endif /* _LDV_RCV_H_ */ 1 /*
2 * device.h - generic, centralized driver model
3 *
4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
6 * Copyright (c) 2008-2009 Novell Inc.
7 *
8 * This file is released under the GPLv2
9 *
10 * See Documentation/driver-model/ for more information.
11 */
12
13 #ifndef _DEVICE_H_
14 #define _DEVICE_H_
15
16 #include <linux/ioport.h>
17 #include <linux/kobject.h>
18 #include <linux/klist.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/compiler.h>
22 #include <linux/types.h>
23 #include <linux/mutex.h>
24 #include <linux/pinctrl/devinfo.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/ratelimit.h>
28 #include <linux/uidgid.h>
29 #include <linux/gfp.h>
30 #include <asm/device.h>
31
32 struct device;
33 struct device_private;
34 struct device_driver;
35 struct driver_private;
36 struct module;
37 struct class;
38 struct subsys_private;
39 struct bus_type;
40 struct device_node;
41 struct iommu_ops;
42 struct iommu_group;
43
44 struct bus_attribute {
45 struct attribute attr;
46 ssize_t (*show)(struct bus_type *bus, char *buf);
47 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
48 };
49
50 #define BUS_ATTR(_name, _mode, _show, _store) \
51 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
52 #define BUS_ATTR_RW(_name) \
53 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
54 #define BUS_ATTR_RO(_name) \
55 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
56
57 extern int __must_check bus_create_file(struct bus_type *,
58 struct bus_attribute *);
59 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
60
61 /**
62 * struct bus_type - The bus type of the device
63 *
64 * @name: The name of the bus.
65 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
66 * @dev_root: Default device to use as the parent.
67 * @dev_attrs: Default attributes of the devices on the bus.
68 * @bus_groups: Default attributes of the bus.
69 * @dev_groups: Default attributes of the devices on the bus.
70 * @drv_groups: Default attributes of the device drivers on the bus.
71 * @match: Called, perhaps multiple times, whenever a new device or driver
72 * is added for this bus. It should return a nonzero value if the
73 * given device can be handled by the given driver.
74 * @uevent: Called when a device is added, removed, or a few other things
75 * that generate uevents to add the environment variables.
76 * @probe: Called when a new device or driver add to this bus, and callback
77 * the specific driver's probe to initial the matched device.
78 * @remove: Called when a device removed from this bus.
79 * @shutdown: Called at shut-down time to quiesce the device.
80 *
81 * @online: Called to put the device back online (after offlining it).
82 * @offline: Called to put the device offline for hot-removal. May fail.
83 *
84 * @suspend: Called when a device on this bus wants to go to sleep mode.
85 * @resume: Called to bring a device on this bus out of sleep mode.
86 * @pm: Power management operations of this bus, callback the specific
87 * device driver's pm-ops.
88 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
89 * driver implementations to a bus and allow the driver to do
90 * bus-specific setup
91 * @p: The private data of the driver core, only the driver core can
92 * touch this.
93 * @lock_key: Lock class key for use by the lock validator
94 *
95 * A bus is a channel between the processor and one or more devices. For the
96 * purposes of the device model, all devices are connected via a bus, even if
97 * it is an internal, virtual, "platform" bus. Buses can plug into each other.
98 * A USB controller is usually a PCI device, for example. The device model
99 * represents the actual connections between buses and the devices they control.
100 * A bus is represented by the bus_type structure. It contains the name, the
101 * default attributes, the bus' methods, PM operations, and the driver core's
102 * private data.
103 */
104 struct bus_type {
105 const char *name;
106 const char *dev_name;
107 struct device *dev_root;
108 struct device_attribute *dev_attrs; /* use dev_groups instead */
109 const struct attribute_group **bus_groups;
110 const struct attribute_group **dev_groups;
111 const struct attribute_group **drv_groups;
112
113 int (*match)(struct device *dev, struct device_driver *drv);
114 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
115 int (*probe)(struct device *dev);
116 int (*remove)(struct device *dev);
117 void (*shutdown)(struct device *dev);
118
119 int (*online)(struct device *dev);
120 int (*offline)(struct device *dev);
121
122 int (*suspend)(struct device *dev, pm_message_t state);
123 int (*resume)(struct device *dev);
124
125 const struct dev_pm_ops *pm;
126
127 const struct iommu_ops *iommu_ops;
128
129 struct subsys_private *p;
130 struct lock_class_key lock_key;
131 };
132
133 extern int __must_check bus_register(struct bus_type *bus);
134
135 extern void bus_unregister(struct bus_type *bus);
136
137 extern int __must_check bus_rescan_devices(struct bus_type *bus);
138
139 /* iterator helpers for buses */
140 struct subsys_dev_iter {
141 struct klist_iter ki;
142 const struct device_type *type;
143 };
144 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
145 struct bus_type *subsys,
146 struct device *start,
147 const struct device_type *type);
148 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
149 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
150
151 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
152 int (*fn)(struct device *dev, void *data));
153 struct device *bus_find_device(struct bus_type *bus, struct device *start,
154 void *data,
155 int (*match)(struct device *dev, void *data));
156 struct device *bus_find_device_by_name(struct bus_type *bus,
157 struct device *start,
158 const char *name);
159 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
160 struct device *hint);
161 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
162 void *data, int (*fn)(struct device_driver *, void *));
163 void bus_sort_breadthfirst(struct bus_type *bus,
164 int (*compare)(const struct device *a,
165 const struct device *b));
166 /*
167 * Bus notifiers: Get notified of addition/removal of devices
168 * and binding/unbinding of drivers to devices.
169 * In the long run, it should be a replacement for the platform
170 * notify hooks.
171 */
172 struct notifier_block;
173
174 extern int bus_register_notifier(struct bus_type *bus,
175 struct notifier_block *nb);
176 extern int bus_unregister_notifier(struct bus_type *bus,
177 struct notifier_block *nb);
178
179 /* All 4 notifers below get called with the target struct device *
180 * as an argument. Note that those functions are likely to be called
181 * with the device lock held in the core, so be careful.
182 */
183 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
184 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
185 #define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
186 bound */
187 #define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
188 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
189 unbound */
190 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
191 from the device */
192
193 extern struct kset *bus_get_kset(struct bus_type *bus);
194 extern struct klist *bus_get_device_klist(struct bus_type *bus);
195
196 /**
197 * struct device_driver - The basic device driver structure
198 * @name: Name of the device driver.
199 * @bus: The bus which the device of this driver belongs to.
200 * @owner: The module owner.
201 * @mod_name: Used for built-in modules.
202 * @suppress_bind_attrs: Disables bind/unbind via sysfs.
203 * @of_match_table: The open firmware table.
204 * @acpi_match_table: The ACPI match table.
205 * @probe: Called to query the existence of a specific device,
206 * whether this driver can work with it, and bind the driver
207 * to a specific device.
208 * @remove: Called when the device is removed from the system to
209 * unbind a device from this driver.
210 * @shutdown: Called at shut-down time to quiesce the device.
211 * @suspend: Called to put the device to sleep mode. Usually to a
212 * low power state.
213 * @resume: Called to bring a device from sleep mode.
214 * @groups: Default attributes that get created by the driver core
215 * automatically.
216 * @pm: Power management operations of the device which matched
217 * this driver.
218 * @p: Driver core's private data, no one other than the driver
219 * core can touch this.
220 *
221 * The device driver-model tracks all of the drivers known to the system.
222 * The main reason for this tracking is to enable the driver core to match
223 * up drivers with new devices. Once drivers are known objects within the
224 * system, however, a number of other things become possible. Device drivers
225 * can export information and configuration variables that are independent
226 * of any specific device.
227 */
228 struct device_driver {
229 const char *name;
230 struct bus_type *bus;
231
232 struct module *owner;
233 const char *mod_name; /* used for built-in modules */
234
235 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
236
237 const struct of_device_id *of_match_table;
238 const struct acpi_device_id *acpi_match_table;
239
240 int (*probe) (struct device *dev);
241 int (*remove) (struct device *dev);
242 void (*shutdown) (struct device *dev);
243 int (*suspend) (struct device *dev, pm_message_t state);
244 int (*resume) (struct device *dev);
245 const struct attribute_group **groups;
246
247 const struct dev_pm_ops *pm;
248
249 struct driver_private *p;
250 };
251
252
253 extern int __must_check driver_register(struct device_driver *drv);
254 extern void driver_unregister(struct device_driver *drv);
255
256 extern struct device_driver *driver_find(const char *name,
257 struct bus_type *bus);
258 extern int driver_probe_done(void);
259 extern void wait_for_device_probe(void);
260
261
262 /* sysfs interface for exporting driver attributes */
263
264 struct driver_attribute {
265 struct attribute attr;
266 ssize_t (*show)(struct device_driver *driver, char *buf);
267 ssize_t (*store)(struct device_driver *driver, const char *buf,
268 size_t count);
269 };
270
271 #define DRIVER_ATTR(_name, _mode, _show, _store) \
272 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
273 #define DRIVER_ATTR_RW(_name) \
274 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
275 #define DRIVER_ATTR_RO(_name) \
276 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
277 #define DRIVER_ATTR_WO(_name) \
278 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
279
280 extern int __must_check driver_create_file(struct device_driver *driver,
281 const struct driver_attribute *attr);
282 extern void driver_remove_file(struct device_driver *driver,
283 const struct driver_attribute *attr);
284
285 extern int __must_check driver_for_each_device(struct device_driver *drv,
286 struct device *start,
287 void *data,
288 int (*fn)(struct device *dev,
289 void *));
290 struct device *driver_find_device(struct device_driver *drv,
291 struct device *start, void *data,
292 int (*match)(struct device *dev, void *data));
293
294 /**
295 * struct subsys_interface - interfaces to device functions
296 * @name: name of the device function
297 * @subsys: subsytem of the devices to attach to
298 * @node: the list of functions registered at the subsystem
299 * @add_dev: device hookup to device function handler
300 * @remove_dev: device hookup to device function handler
301 *
302 * Simple interfaces attached to a subsystem. Multiple interfaces can
303 * attach to a subsystem and its devices. Unlike drivers, they do not
304 * exclusively claim or control devices. Interfaces usually represent
305 * a specific functionality of a subsystem/class of devices.
306 */
307 struct subsys_interface {
308 const char *name;
309 struct bus_type *subsys;
310 struct list_head node;
311 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
312 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
313 };
314
315 int subsys_interface_register(struct subsys_interface *sif);
316 void subsys_interface_unregister(struct subsys_interface *sif);
317
318 int subsys_system_register(struct bus_type *subsys,
319 const struct attribute_group **groups);
320 int subsys_virtual_register(struct bus_type *subsys,
321 const struct attribute_group **groups);
322
323 /**
324 * struct class - device classes
325 * @name: Name of the class.
326 * @owner: The module owner.
327 * @class_attrs: Default attributes of this class.
328 * @dev_groups: Default attributes of the devices that belong to the class.
329 * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
330 * @dev_uevent: Called when a device is added, removed from this class, or a
331 * few other things that generate uevents to add the environment
332 * variables.
333 * @devnode: Callback to provide the devtmpfs.
334 * @class_release: Called to release this class.
335 * @dev_release: Called to release the device.
336 * @suspend: Used to put the device to sleep mode, usually to a low power
337 * state.
338 * @resume: Used to bring the device from the sleep mode.
339 * @ns_type: Callbacks so sysfs can detemine namespaces.
340 * @namespace: Namespace of the device belongs to this class.
341 * @pm: The default device power management operations of this class.
342 * @p: The private data of the driver core, no one other than the
343 * driver core can touch this.
344 *
345 * A class is a higher-level view of a device that abstracts out low-level
346 * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
347 * at the class level, they are all simply disks. Classes allow user space
348 * to work with devices based on what they do, rather than how they are
349 * connected or how they work.
350 */
351 struct class {
352 const char *name;
353 struct module *owner;
354
355 struct class_attribute *class_attrs;
356 const struct attribute_group **dev_groups;
357 struct kobject *dev_kobj;
358
359 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
360 char *(*devnode)(struct device *dev, umode_t *mode);
361
362 void (*class_release)(struct class *class);
363 void (*dev_release)(struct device *dev);
364
365 int (*suspend)(struct device *dev, pm_message_t state);
366 int (*resume)(struct device *dev);
367
368 const struct kobj_ns_type_operations *ns_type;
369 const void *(*namespace)(struct device *dev);
370
371 const struct dev_pm_ops *pm;
372
373 struct subsys_private *p;
374 };
375
376 struct class_dev_iter {
377 struct klist_iter ki;
378 const struct device_type *type;
379 };
380
381 extern struct kobject *sysfs_dev_block_kobj;
382 extern struct kobject *sysfs_dev_char_kobj;
383 extern int __must_check __class_register(struct class *class,
384 struct lock_class_key *key);
385 extern void class_unregister(struct class *class);
386
387 /* This is a #define to keep the compiler from merging different
388 * instances of the __key variable */
389 #define class_register(class) \
390 ({ \
391 static struct lock_class_key __key; \
392 __class_register(class, &__key); \
393 })
394
395 struct class_compat;
396 struct class_compat *class_compat_register(const char *name);
397 void class_compat_unregister(struct class_compat *cls);
398 int class_compat_create_link(struct class_compat *cls, struct device *dev,
399 struct device *device_link);
400 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
401 struct device *device_link);
402
403 extern void class_dev_iter_init(struct class_dev_iter *iter,
404 struct class *class,
405 struct device *start,
406 const struct device_type *type);
407 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
408 extern void class_dev_iter_exit(struct class_dev_iter *iter);
409
410 extern int class_for_each_device(struct class *class, struct device *start,
411 void *data,
412 int (*fn)(struct device *dev, void *data));
413 extern struct device *class_find_device(struct class *class,
414 struct device *start, const void *data,
415 int (*match)(struct device *, const void *));
416
417 struct class_attribute {
418 struct attribute attr;
419 ssize_t (*show)(struct class *class, struct class_attribute *attr,
420 char *buf);
421 ssize_t (*store)(struct class *class, struct class_attribute *attr,
422 const char *buf, size_t count);
423 };
424
425 #define CLASS_ATTR(_name, _mode, _show, _store) \
426 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
427 #define CLASS_ATTR_RW(_name) \
428 struct class_attribute class_attr_##_name = __ATTR_RW(_name)
429 #define CLASS_ATTR_RO(_name) \
430 struct class_attribute class_attr_##_name = __ATTR_RO(_name)
431
432 extern int __must_check class_create_file_ns(struct class *class,
433 const struct class_attribute *attr,
434 const void *ns);
435 extern void class_remove_file_ns(struct class *class,
436 const struct class_attribute *attr,
437 const void *ns);
438
439 static inline int __must_check class_create_file(struct class *class,
440 const struct class_attribute *attr)
441 {
442 return class_create_file_ns(class, attr, NULL);
443 }
444
445 static inline void class_remove_file(struct class *class,
446 const struct class_attribute *attr)
447 {
448 return class_remove_file_ns(class, attr, NULL);
449 }
450
451 /* Simple class attribute that is just a static string */
452 struct class_attribute_string {
453 struct class_attribute attr;
454 char *str;
455 };
456
457 /* Currently read-only only */
458 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
459 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
460 #define CLASS_ATTR_STRING(_name, _mode, _str) \
461 struct class_attribute_string class_attr_##_name = \
462 _CLASS_ATTR_STRING(_name, _mode, _str)
463
464 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
465 char *buf);
466
467 struct class_interface {
468 struct list_head node;
469 struct class *class;
470
471 int (*add_dev) (struct device *, struct class_interface *);
472 void (*remove_dev) (struct device *, struct class_interface *);
473 };
474
475 extern int __must_check class_interface_register(struct class_interface *);
476 extern void class_interface_unregister(struct class_interface *);
477
478 extern struct class * __must_check __class_create(struct module *owner,
479 const char *name,
480 struct lock_class_key *key);
481 extern void class_destroy(struct class *cls);
482
483 /* This is a #define to keep the compiler from merging different
484 * instances of the __key variable */
485 #define class_create(owner, name) \
486 ({ \
487 static struct lock_class_key __key; \
488 __class_create(owner, name, &__key); \
489 })
490
491 /*
492 * The type of device, "struct device" is embedded in. A class
493 * or bus can contain devices of different types
494 * like "partitions" and "disks", "mouse" and "event".
495 * This identifies the device type and carries type-specific
496 * information, equivalent to the kobj_type of a kobject.
497 * If "name" is specified, the uevent will contain it in
498 * the DEVTYPE variable.
499 */
500 struct device_type {
501 const char *name;
502 const struct attribute_group **groups;
503 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
504 char *(*devnode)(struct device *dev, umode_t *mode,
505 kuid_t *uid, kgid_t *gid);
506 void (*release)(struct device *dev);
507
508 const struct dev_pm_ops *pm;
509 };
510
511 /* interface for exporting device attributes */
512 struct device_attribute {
513 struct attribute attr;
514 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
515 char *buf);
516 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
517 const char *buf, size_t count);
518 };
519
520 struct dev_ext_attribute {
521 struct device_attribute attr;
522 void *var;
523 };
524
525 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
526 char *buf);
527 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
528 const char *buf, size_t count);
529 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
530 char *buf);
531 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
532 const char *buf, size_t count);
533 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
534 char *buf);
535 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
536 const char *buf, size_t count);
537
538 #define DEVICE_ATTR(_name, _mode, _show, _store) \
539 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
540 #define DEVICE_ATTR_RW(_name) \
541 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
542 #define DEVICE_ATTR_RO(_name) \
543 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
544 #define DEVICE_ATTR_WO(_name) \
545 struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
546 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
547 struct dev_ext_attribute dev_attr_##_name = \
548 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
549 #define DEVICE_INT_ATTR(_name, _mode, _var) \
550 struct dev_ext_attribute dev_attr_##_name = \
551 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
552 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
553 struct dev_ext_attribute dev_attr_##_name = \
554 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
555 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
556 struct device_attribute dev_attr_##_name = \
557 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
558
559 extern int device_create_file(struct device *device,
560 const struct device_attribute *entry);
561 extern void device_remove_file(struct device *dev,
562 const struct device_attribute *attr);
563 extern bool device_remove_file_self(struct device *dev,
564 const struct device_attribute *attr);
565 extern int __must_check device_create_bin_file(struct device *dev,
566 const struct bin_attribute *attr);
567 extern void device_remove_bin_file(struct device *dev,
568 const struct bin_attribute *attr);
569
570 /* device resource management */
571 typedef void (*dr_release_t)(struct device *dev, void *res);
572 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
573
574 #ifdef CONFIG_DEBUG_DEVRES
575 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
576 const char *name);
577 #define devres_alloc(release, size, gfp) \
578 __devres_alloc(release, size, gfp, #release)
579 #else
580 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
581 #endif
582 extern void devres_for_each_res(struct device *dev, dr_release_t release,
583 dr_match_t match, void *match_data,
584 void (*fn)(struct device *, void *, void *),
585 void *data);
586 extern void devres_free(void *res);
587 extern void devres_add(struct device *dev, void *res);
588 extern void *devres_find(struct device *dev, dr_release_t release,
589 dr_match_t match, void *match_data);
590 extern void *devres_get(struct device *dev, void *new_res,
591 dr_match_t match, void *match_data);
592 extern void *devres_remove(struct device *dev, dr_release_t release,
593 dr_match_t match, void *match_data);
594 extern int devres_destroy(struct device *dev, dr_release_t release,
595 dr_match_t match, void *match_data);
596 extern int devres_release(struct device *dev, dr_release_t release,
597 dr_match_t match, void *match_data);
598
599 /* devres group */
600 extern void * __must_check devres_open_group(struct device *dev, void *id,
601 gfp_t gfp);
602 extern void devres_close_group(struct device *dev, void *id);
603 extern void devres_remove_group(struct device *dev, void *id);
604 extern int devres_release_group(struct device *dev, void *id);
605
606 /* managed devm_k.alloc/kfree for device drivers */
607 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
608 extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
609 va_list ap);
610 extern char *devm_kasprintf(struct device *dev, gfp_t gfp,
611 const char *fmt, ...);
612 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
613 {
614 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
615 }
616 static inline void *devm_kmalloc_array(struct device *dev,
617 size_t n, size_t size, gfp_t flags)
618 {
619 if (size != 0 && n > SIZE_MAX / size)
620 return NULL;
621 return devm_kmalloc(dev, n * size, flags);
622 }
623 static inline void *devm_kcalloc(struct device *dev,
624 size_t n, size_t size, gfp_t flags)
625 {
626 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
627 }
628 extern void devm_kfree(struct device *dev, void *p);
629 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
630 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
631 gfp_t gfp);
632
633 extern unsigned long devm_get_free_pages(struct device *dev,
634 gfp_t gfp_mask, unsigned int order);
635 extern void devm_free_pages(struct device *dev, unsigned long addr);
636
637 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
638
639 /* allows to add/remove a custom action to devres stack */
640 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
641 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
642
643 struct device_dma_parameters {
644 /*
645 * a low level driver may set these to teach IOMMU code about
646 * sg limitations.
647 */
648 unsigned int max_segment_size;
649 unsigned long segment_boundary_mask;
650 };
651
652 struct acpi_device;
653
654 struct acpi_dev_node {
655 #ifdef CONFIG_ACPI
656 struct acpi_device *companion;
657 #endif
658 };
659
660 /**
661 * struct device - The basic device structure
662 * @parent: The device's "parent" device, the device to which it is attached.
663 * In most cases, a parent device is some sort of bus or host
664 * controller. If parent is NULL, the device, is a top-level device,
665 * which is not usually what you want.
666 * @p: Holds the private data of the driver core portions of the device.
667 * See the comment of the struct device_private for detail.
668 * @kobj: A top-level, abstract class from which other classes are derived.
669 * @init_name: Initial name of the device.
670 * @type: The type of device.
671 * This identifies the device type and carries type-specific
672 * information.
673 * @mutex: Mutex to synchronize calls to its driver.
674 * @bus: Type of bus device is on.
675 * @driver: Which driver has allocated this
676 * @platform_data: Platform data specific to the device.
677 * Example: For devices on custom boards, as typical of embedded
678 * and SOC based hardware, Linux often uses platform_data to point
679 * to board-specific structures describing devices and how they
680 * are wired. That can include what ports are available, chip
681 * variants, which GPIO pins act in what additional roles, and so
682 * on. This shrinks the "Board Support Packages" (BSPs) and
683 * minimizes board-specific #ifdefs in drivers.
684 * @driver_data: Private pointer for driver specific info.
685 * @power: For device power management.
686 * See Documentation/power/devices.txt for details.
687 * @pm_domain: Provide callbacks that are executed during system suspend,
688 * hibernation, system resume and during runtime PM transitions
689 * along with subsystem-level and driver-level callbacks.
690 * @pins: For device pin management.
691 * See Documentation/pinctrl.txt for details.
692 * @numa_node: NUMA node this device is close to.
693 * @dma_mask: Dma mask (if dma'ble device).
694 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
695 * hardware supports 64-bit addresses for consistent allocations
696 * such descriptors.
697 * @dma_pfn_offset: offset of DMA memory range relatively of RAM
698 * @dma_parms: A low level driver may set these to teach IOMMU code about
699 * segment limitations.
700 * @dma_pools: Dma pools (if dma'ble device).
701 * @dma_mem: Internal for coherent mem override.
702 * @cma_area: Contiguous memory area for dma allocations
703 * @archdata: For arch-specific additions.
704 * @of_node: Associated device tree node.
705 * @acpi_node: Associated ACPI device node.
706 * @devt: For creating the sysfs "dev".
707 * @id: device instance
708 * @devres_lock: Spinlock to protect the resource of the device.
709 * @devres_head: The resources list of the device.
710 * @knode_class: The node used to add the device to the class list.
711 * @class: The class of the device.
712 * @groups: Optional attribute groups.
713 * @release: Callback to free the device after all references have
714 * gone away. This should be set by the allocator of the
715 * device (i.e. the bus driver that discovered the device).
716 * @iommu_group: IOMMU group the device belongs to.
717 *
718 * @offline_disabled: If set, the device is permanently online.
719 * @offline: Set after successful invocation of bus type's .offline().
720 *
721 * At the lowest level, every device in a Linux system is represented by an
722 * instance of struct device. The device structure contains the information
723 * that the device model core needs to model the system. Most subsystems,
724 * however, track additional information about the devices they host. As a
725 * result, it is rare for devices to be represented by bare device structures;
726 * instead, that structure, like kobject structures, is usually embedded within
727 * a higher-level representation of the device.
728 */
729 struct device {
730 struct device *parent;
731
732 struct device_private *p;
733
734 struct kobject kobj;
735 const char *init_name; /* initial name of the device */
736 const struct device_type *type;
737
738 struct mutex mutex; /* mutex to synchronize calls to
739 * its driver.
740 */
741
742 struct bus_type *bus; /* type of bus device is on */
743 struct device_driver *driver; /* which driver has allocated this
744 device */
745 void *platform_data; /* Platform specific data, device
746 core doesn't touch it */
747 void *driver_data; /* Driver data, set and get with
748 dev_set/get_drvdata */
749 struct dev_pm_info power;
750 struct dev_pm_domain *pm_domain;
751
752 #ifdef CONFIG_PINCTRL
753 struct dev_pin_info *pins;
754 #endif
755
756 #ifdef CONFIG_NUMA
757 int numa_node; /* NUMA node this device is close to */
758 #endif
759 u64 *dma_mask; /* dma mask (if dma'able device) */
760 u64 coherent_dma_mask;/* Like dma_mask, but for
761 alloc_coherent mappings as
762 not all hardware supports
763 64 bit addresses for consistent
764 allocations such descriptors. */
765 unsigned long dma_pfn_offset;
766
767 struct device_dma_parameters *dma_parms;
768
769 struct list_head dma_pools; /* dma pools (if dma'ble) */
770
771 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
772 override */
773 #ifdef CONFIG_DMA_CMA
774 struct cma *cma_area; /* contiguous memory area for dma
775 allocations */
776 #endif
777 /* arch specific additions */
778 struct dev_archdata archdata;
779
780 struct device_node *of_node; /* associated device tree node */
781 struct acpi_dev_node acpi_node; /* associated ACPI device node */
782
783 dev_t devt; /* dev_t, creates the sysfs "dev" */
784 u32 id; /* device instance */
785
786 spinlock_t devres_lock;
787 struct list_head devres_head;
788
789 struct klist_node knode_class;
790 struct class *class;
791 const struct attribute_group **groups; /* optional groups */
792
793 void (*release)(struct device *dev);
794 struct iommu_group *iommu_group;
795
796 bool offline_disabled:1;
797 bool offline:1;
798 };
799
800 static inline struct device *kobj_to_dev(struct kobject *kobj)
801 {
802 return container_of(kobj, struct device, kobj);
803 }
804
805 /* Get the wakeup routines, which depend on struct device */
806 #include <linux/pm_wakeup.h>
807
808 static inline const char *dev_name(const struct device *dev)
809 {
810 /* Use the init name until the kobject becomes available */
811 if (dev->init_name)
812 return dev->init_name;
813
814 return kobject_name(&dev->kobj);
815 }
816
817 extern __printf(2, 3)
818 int dev_set_name(struct device *dev, const char *name, ...);
819
820 #ifdef CONFIG_NUMA
821 static inline int dev_to_node(struct device *dev)
822 {
823 return dev->numa_node;
824 }
825 static inline void set_dev_node(struct device *dev, int node)
826 {
827 dev->numa_node = node;
828 }
829 #else
830 static inline int dev_to_node(struct device *dev)
831 {
832 return -1;
833 }
834 static inline void set_dev_node(struct device *dev, int node)
835 {
836 }
837 #endif
838
839 static inline void *dev_get_drvdata(const struct device *dev)
840 {
841 return dev->driver_data;
842 }
843
844 static inline void dev_set_drvdata(struct device *dev, void *data)
845 {
846 dev->driver_data = data;
847 }
848
849 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
850 {
851 return dev ? dev->power.subsys_data : NULL;
852 }
853
854 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
855 {
856 return dev->kobj.uevent_suppress;
857 }
858
859 static inline void dev_set_uevent_suppress(struct device *dev, int val)
860 {
861 dev->kobj.uevent_suppress = val;
862 }
863
864 static inline int device_is_registered(struct device *dev)
865 {
866 return dev->kobj.state_in_sysfs;
867 }
868
869 static inline void device_enable_async_suspend(struct device *dev)
870 {
871 if (!dev->power.is_prepared)
872 dev->power.async_suspend = true;
873 }
874
875 static inline void device_disable_async_suspend(struct device *dev)
876 {
877 if (!dev->power.is_prepared)
878 dev->power.async_suspend = false;
879 }
880
881 static inline bool device_async_suspend_enabled(struct device *dev)
882 {
883 return !!dev->power.async_suspend;
884 }
885
886 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
887 {
888 dev->power.ignore_children = enable;
889 }
890
891 static inline void dev_pm_syscore_device(struct device *dev, bool val)
892 {
893 #ifdef CONFIG_PM_SLEEP
894 dev->power.syscore = val;
895 #endif
896 }
897
898 static inline void device_lock(struct device *dev)
899 {
900 mutex_lock(&dev->mutex);
901 }
902
903 static inline int device_trylock(struct device *dev)
904 {
905 return mutex_trylock(&dev->mutex);
906 }
907
908 static inline void device_unlock(struct device *dev)
909 {
910 mutex_unlock(&dev->mutex);
911 }
912
913 void driver_init(void);
914
915 /*
916 * High level routines for use by the bus drivers
917 */
918 extern int __must_check device_register(struct device *dev);
919 extern void device_unregister(struct device *dev);
920 extern void device_initialize(struct device *dev);
921 extern int __must_check device_add(struct device *dev);
922 extern void device_del(struct device *dev);
923 extern int device_for_each_child(struct device *dev, void *data,
924 int (*fn)(struct device *dev, void *data));
925 extern struct device *device_find_child(struct device *dev, void *data,
926 int (*match)(struct device *dev, void *data));
927 extern int device_rename(struct device *dev, const char *new_name);
928 extern int device_move(struct device *dev, struct device *new_parent,
929 enum dpm_order dpm_order);
930 extern const char *device_get_devnode(struct device *dev,
931 umode_t *mode, kuid_t *uid, kgid_t *gid,
932 const char **tmp);
933
934 static inline bool device_supports_offline(struct device *dev)
935 {
936 return dev->bus && dev->bus->offline && dev->bus->online;
937 }
938
939 extern void lock_device_hotplug(void);
940 extern void unlock_device_hotplug(void);
941 extern int lock_device_hotplug_sysfs(void);
942 extern int device_offline(struct device *dev);
943 extern int device_online(struct device *dev);
944 /*
945 * Root device objects for grouping under /sys/devices
946 */
947 extern struct device *__root_device_register(const char *name,
948 struct module *owner);
949
950 /* This is a macro to avoid include problems with THIS_MODULE */
951 #define root_device_register(name) \
952 __root_device_register(name, THIS_MODULE)
953
954 extern void root_device_unregister(struct device *root);
955
956 static inline void *dev_get_platdata(const struct device *dev)
957 {
958 return dev->platform_data;
959 }
960
961 /*
962 * Manual binding of a device to driver. See drivers/base/bus.c
963 * for information on use.
964 */
965 extern int __must_check device_bind_driver(struct device *dev);
966 extern void device_release_driver(struct device *dev);
967 extern int __must_check device_attach(struct device *dev);
968 extern int __must_check driver_attach(struct device_driver *drv);
969 extern int __must_check device_reprobe(struct device *dev);
970
971 /*
972 * Easy functions for dynamically creating devices on the fly
973 */
974 extern struct device *device_create_vargs(struct class *cls,
975 struct device *parent,
976 dev_t devt,
977 void *drvdata,
978 const char *fmt,
979 va_list vargs);
980 extern __printf(5, 6)
981 struct device *device_create(struct class *cls, struct device *parent,
982 dev_t devt, void *drvdata,
983 const char *fmt, ...);
984 extern __printf(6, 7)
985 struct device *device_create_with_groups(struct class *cls,
986 struct device *parent, dev_t devt, void *drvdata,
987 const struct attribute_group **groups,
988 const char *fmt, ...);
989 extern void device_destroy(struct class *cls, dev_t devt);
990
991 /*
992 * Platform "fixup" functions - allow the platform to have their say
993 * about devices and actions that the general device layer doesn't
994 * know about.
995 */
996 /* Notify platform of device discovery */
997 extern int (*platform_notify)(struct device *dev);
998
999 extern int (*platform_notify_remove)(struct device *dev);
1000
1001
1002 /*
1003 * get_device - atomically increment the reference count for the device.
1004 *
1005 */
1006 extern struct device *get_device(struct device *dev);
1007 extern void put_device(struct device *dev);
1008
1009 #ifdef CONFIG_DEVTMPFS
1010 extern int devtmpfs_create_node(struct device *dev);
1011 extern int devtmpfs_delete_node(struct device *dev);
1012 extern int devtmpfs_mount(const char *mntdir);
1013 #else
1014 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
1015 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
1016 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
1017 #endif
1018
1019 /* drivers/base/power/shutdown.c */
1020 extern void device_shutdown(void);
1021
1022 /* debugging and troubleshooting/diagnostic helpers. */
1023 extern const char *dev_driver_string(const struct device *dev);
1024
1025
1026 #ifdef CONFIG_PRINTK
1027
1028 extern __printf(3, 0)
1029 int dev_vprintk_emit(int level, const struct device *dev,
1030 const char *fmt, va_list args);
1031 extern __printf(3, 4)
1032 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
1033
1034 extern __printf(3, 4)
1035 int dev_printk(const char *level, const struct device *dev,
1036 const char *fmt, ...);
1037 extern __printf(2, 3)
1038 int dev_emerg(const struct device *dev, const char *fmt, ...);
1039 extern __printf(2, 3)
1040 int dev_alert(const struct device *dev, const char *fmt, ...);
1041 extern __printf(2, 3)
1042 int dev_crit(const struct device *dev, const char *fmt, ...);
1043 extern __printf(2, 3)
1044 int dev_err(const struct device *dev, const char *fmt, ...);
1045 extern __printf(2, 3)
1046 int dev_warn(const struct device *dev, const char *fmt, ...);
1047 extern __printf(2, 3)
1048 int dev_notice(const struct device *dev, const char *fmt, ...);
1049 extern __printf(2, 3)
1050 int _dev_info(const struct device *dev, const char *fmt, ...);
1051
1052 #else
1053
1054 static inline __printf(3, 0)
1055 int dev_vprintk_emit(int level, const struct device *dev,
1056 const char *fmt, va_list args)
1057 { return 0; }
1058 static inline __printf(3, 4)
1059 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
1060 { return 0; }
1061
1062 static inline int __dev_printk(const char *level, const struct device *dev,
1063 struct va_format *vaf)
1064 { return 0; }
1065 static inline __printf(3, 4)
1066 int dev_printk(const char *level, const struct device *dev,
1067 const char *fmt, ...)
1068 { return 0; }
1069
1070 static inline __printf(2, 3)
1071 int dev_emerg(const struct device *dev, const char *fmt, ...)
1072 { return 0; }
1073 static inline __printf(2, 3)
1074 int dev_crit(const struct device *dev, const char *fmt, ...)
1075 { return 0; }
1076 static inline __printf(2, 3)
1077 int dev_alert(const struct device *dev, const char *fmt, ...)
1078 { return 0; }
1079 static inline __printf(2, 3)
1080 int dev_err(const struct device *dev, const char *fmt, ...)
1081 { return 0; }
1082 static inline __printf(2, 3)
1083 int dev_warn(const struct device *dev, const char *fmt, ...)
1084 { return 0; }
1085 static inline __printf(2, 3)
1086 int dev_notice(const struct device *dev, const char *fmt, ...)
1087 { return 0; }
1088 static inline __printf(2, 3)
1089 int _dev_info(const struct device *dev, const char *fmt, ...)
1090 { return 0; }
1091
1092 #endif
1093
1094 /*
1095 * Stupid hackaround for existing uses of non-printk uses dev_info
1096 *
1097 * Note that the definition of dev_info below is actually _dev_info
1098 * and a macro is used to avoid redefining dev_info
1099 */
1100
1101 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
1102
1103 #if defined(CONFIG_DYNAMIC_DEBUG)
1104 #define dev_dbg(dev, format, ...) \
1105 do { \
1106 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
1107 } while (0)
1108 #elif defined(DEBUG)
1109 #define dev_dbg(dev, format, arg...) \
1110 dev_printk(KERN_DEBUG, dev, format, ##arg)
1111 #else
1112 #define dev_dbg(dev, format, arg...) \
1113 ({ \
1114 if (0) \
1115 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1116 0; \
1117 })
1118 #endif
1119
1120 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1121 do { \
1122 static DEFINE_RATELIMIT_STATE(_rs, \
1123 DEFAULT_RATELIMIT_INTERVAL, \
1124 DEFAULT_RATELIMIT_BURST); \
1125 if (__ratelimit(&_rs)) \
1126 dev_level(dev, fmt, ##__VA_ARGS__); \
1127 } while (0)
1128
1129 #define dev_emerg_ratelimited(dev, fmt, ...) \
1130 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
1131 #define dev_alert_ratelimited(dev, fmt, ...) \
1132 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
1133 #define dev_crit_ratelimited(dev, fmt, ...) \
1134 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
1135 #define dev_err_ratelimited(dev, fmt, ...) \
1136 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
1137 #define dev_warn_ratelimited(dev, fmt, ...) \
1138 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
1139 #define dev_notice_ratelimited(dev, fmt, ...) \
1140 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
1141 #define dev_info_ratelimited(dev, fmt, ...) \
1142 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
1143 #if defined(CONFIG_DYNAMIC_DEBUG)
1144 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
1145 #define dev_dbg_ratelimited(dev, fmt, ...) \
1146 do { \
1147 static DEFINE_RATELIMIT_STATE(_rs, \
1148 DEFAULT_RATELIMIT_INTERVAL, \
1149 DEFAULT_RATELIMIT_BURST); \
1150 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
1151 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
1152 __ratelimit(&_rs)) \
1153 __dynamic_dev_dbg(&descriptor, dev, fmt, \
1154 ##__VA_ARGS__); \
1155 } while (0)
1156 #elif defined(DEBUG)
1157 #define dev_dbg_ratelimited(dev, fmt, ...) \
1158 do { \
1159 static DEFINE_RATELIMIT_STATE(_rs, \
1160 DEFAULT_RATELIMIT_INTERVAL, \
1161 DEFAULT_RATELIMIT_BURST); \
1162 if (__ratelimit(&_rs)) \
1163 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1164 } while (0)
1165 #else
1166 #define dev_dbg_ratelimited(dev, fmt, ...) \
1167 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
1168 #endif
1169
1170 #ifdef VERBOSE_DEBUG
1171 #define dev_vdbg dev_dbg
1172 #else
1173 #define dev_vdbg(dev, format, arg...) \
1174 ({ \
1175 if (0) \
1176 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1177 0; \
1178 })
1179 #endif
1180
1181 /*
1182 * dev_WARN*() acts like dev_printk(), but with the key difference of
1183 * using WARN/WARN_ONCE to include file/line information and a backtrace.
1184 */
1185 #define dev_WARN(dev, format, arg...) \
1186 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
1187
1188 #define dev_WARN_ONCE(dev, condition, format, arg...) \
1189 WARN_ONCE(condition, "%s %s: " format, \
1190 dev_driver_string(dev), dev_name(dev), ## arg)
1191
1192 /* Create alias, so I can be autoloaded. */
1193 #define MODULE_ALIAS_CHARDEV(major,minor) \
1194 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
1195 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
1196 MODULE_ALIAS("char-major-" __stringify(major) "-*")
1197
1198 #ifdef CONFIG_SYSFS_DEPRECATED
1199 extern long sysfs_deprecated;
1200 #else
1201 #define sysfs_deprecated 0
1202 #endif
1203
1204 /**
1205 * module_driver() - Helper macro for drivers that don't do anything
1206 * special in module init/exit. This eliminates a lot of boilerplate.
1207 * Each module may only use this macro once, and calling it replaces
1208 * module_init() and module_exit().
1209 *
1210 * @__driver: driver name
1211 * @__register: register function for this driver type
1212 * @__unregister: unregister function for this driver type
1213 * @...: Additional arguments to be passed to __register and __unregister.
1214 *
1215 * Use this macro to construct bus specific macros for registering
1216 * drivers, and do not use it on its own.
1217 */
1218 #define module_driver(__driver, __register, __unregister, ...) \
1219 static int __init __driver##_init(void) \
1220 { \
1221 return __register(&(__driver) , ##__VA_ARGS__); \
1222 } \
1223 module_init(__driver##_init); \
1224 static void __exit __driver##_exit(void) \
1225 { \
1226 __unregister(&(__driver) , ##__VA_ARGS__); \
1227 } \
1228 module_exit(__driver##_exit);
1229
1230 #endif /* _DEVICE_H_ */ 1 /* ------------------------------------------------------------------------- */
2 /* */
3 /* i2c.h - definitions for the i2c-bus interface */
4 /* */
5 /* ------------------------------------------------------------------------- */
6 /* Copyright (C) 1995-2000 Simon G. Vogl
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 MA 02110-1301 USA. */
22 /* ------------------------------------------------------------------------- */
23
24 /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
25 Frodo Looijaard <frodol@dds.nl> */
26 #ifndef _LINUX_I2C_H
27 #define _LINUX_I2C_H
28
29 #include <linux/mod_devicetable.h>
30 #include <linux/device.h> /* for struct device */
31 #include <linux/sched.h> /* for completion */
32 #include <linux/mutex.h>
33 #include <linux/of.h> /* for struct device_node */
34 #include <linux/swab.h> /* for swab16 */
35 #include <uapi/linux/i2c.h>
36
37 extern struct bus_type i2c_bus_type;
38 extern struct device_type i2c_adapter_type;
39
40 /* --- General options ------------------------------------------------ */
41
42 struct i2c_msg;
43 struct i2c_algorithm;
44 struct i2c_adapter;
45 struct i2c_client;
46 struct i2c_driver;
47 union i2c_smbus_data;
48 struct i2c_board_info;
49
50 struct module;
51
52 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
53 /*
54 * The master routines are the ones normally used to transmit data to devices
55 * on a bus (or read from them). Apart from two basic transfer functions to
56 * transmit one message at a time, a more complex version can be used to
57 * transmit an arbitrary number of messages without interruption.
58 * @count must be be less than 64k since msg.len is u16.
59 */
60 extern int i2c_master_send(const struct i2c_client *client, const char *buf,
61 int count);
62 extern int i2c_master_recv(const struct i2c_client *client, char *buf,
63 int count);
64
65 /* Transfer num messages.
66 */
67 extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
68 int num);
69 /* Unlocked flavor */
70 extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
71 int num);
72
73 /* This is the very generalized SMBus access routine. You probably do not
74 want to use this, though; one of the functions below may be much easier,
75 and probably just as fast.
76 Note that we use i2c_adapter here, because you do not need a specific
77 smbus adapter to call this function. */
78 extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
79 unsigned short flags, char read_write, u8 command,
80 int size, union i2c_smbus_data *data);
81
82 /* Now follow the 'nice' access routines. These also document the calling
83 conventions of i2c_smbus_xfer. */
84
85 extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
86 extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
87 extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
88 u8 command);
89 extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
90 u8 command, u8 value);
91 extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
92 u8 command);
93 extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
94 u8 command, u16 value);
95
96 static inline s32
97 i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command)
98 {
99 s32 value = i2c_smbus_read_word_data(client, command);
100
101 return (value < 0) ? value : swab16(value);
102 }
103
104 static inline s32
105 i2c_smbus_write_word_swapped(const struct i2c_client *client,
106 u8 command, u16 value)
107 {
108 return i2c_smbus_write_word_data(client, command, swab16(value));
109 }
110
111 /* Returns the number of read bytes */
112 extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
113 u8 command, u8 *values);
114 extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
115 u8 command, u8 length, const u8 *values);
116 /* Returns the number of read bytes */
117 extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
118 u8 command, u8 length, u8 *values);
119 extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
120 u8 command, u8 length,
121 const u8 *values);
122 #endif /* I2C */
123
124 /**
125 * struct i2c_driver - represent an I2C device driver
126 * @class: What kind of i2c device we instantiate (for detect)
127 * @attach_adapter: Callback for bus addition (deprecated)
128 * @probe: Callback for device binding
129 * @remove: Callback for device unbinding
130 * @shutdown: Callback for device shutdown
131 * @suspend: Callback for device suspend
132 * @resume: Callback for device resume
133 * @alert: Alert callback, for example for the SMBus alert protocol
134 * @command: Callback for bus-wide signaling (optional)
135 * @driver: Device driver model driver
136 * @id_table: List of I2C devices supported by this driver
137 * @detect: Callback for device detection
138 * @address_list: The I2C addresses to probe (for detect)
139 * @clients: List of detected clients we created (for i2c-core use only)
140 *
141 * The driver.owner field should be set to the module owner of this driver.
142 * The driver.name field should be set to the name of this driver.
143 *
144 * For automatic device detection, both @detect and @address_list must
145 * be defined. @class should also be set, otherwise only devices forced
146 * with module parameters will be created. The detect function must
147 * fill at least the name field of the i2c_board_info structure it is
148 * handed upon successful detection, and possibly also the flags field.
149 *
150 * If @detect is missing, the driver will still work fine for enumerated
151 * devices. Detected devices simply won't be supported. This is expected
152 * for the many I2C/SMBus devices which can't be detected reliably, and
153 * the ones which can always be enumerated in practice.
154 *
155 * The i2c_client structure which is handed to the @detect callback is
156 * not a real i2c_client. It is initialized just enough so that you can
157 * call i2c_smbus_read_byte_data and friends on it. Don't do anything
158 * else with it. In particular, calling dev_dbg and friends on it is
159 * not allowed.
160 */
161 struct i2c_driver {
162 unsigned int class;
163
164 /* Notifies the driver that a new bus has appeared. You should avoid
165 * using this, it will be removed in a near future.
166 */
167 int (*attach_adapter)(struct i2c_adapter *) __deprecated;
168
169 /* Standard driver model interfaces */
170 int (*probe)(struct i2c_client *, const struct i2c_device_id *);
171 int (*remove)(struct i2c_client *);
172
173 /* driver model interfaces that don't relate to enumeration */
174 void (*shutdown)(struct i2c_client *);
175 int (*suspend)(struct i2c_client *, pm_message_t mesg);
176 int (*resume)(struct i2c_client *);
177
178 /* Alert callback, for example for the SMBus alert protocol.
179 * The format and meaning of the data value depends on the protocol.
180 * For the SMBus alert protocol, there is a single bit of data passed
181 * as the alert response's low bit ("event flag").
182 */
183 void (*alert)(struct i2c_client *, unsigned int data);
184
185 /* a ioctl like command that can be used to perform specific functions
186 * with the device.
187 */
188 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
189
190 struct device_driver driver;
191 const struct i2c_device_id *id_table;
192
193 /* Device detection callback for automatic device creation */
194 int (*detect)(struct i2c_client *, struct i2c_board_info *);
195 const unsigned short *address_list;
196 struct list_head clients;
197 };
198 #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
199
200 /**
201 * struct i2c_client - represent an I2C slave device
202 * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
203 * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking
204 * @addr: Address used on the I2C bus connected to the parent adapter.
205 * @name: Indicates the type of the device, usually a chip name that's
206 * generic enough to hide second-sourcing and compatible revisions.
207 * @adapter: manages the bus segment hosting this I2C device
208 * @dev: Driver model device node for the slave.
209 * @irq: indicates the IRQ generated by this device (if any)
210 * @detected: member of an i2c_driver.clients list or i2c-core's
211 * userspace_devices list
212 *
213 * An i2c_client identifies a single device (i.e. chip) connected to an
214 * i2c bus. The behaviour exposed to Linux is defined by the driver
215 * managing the device.
216 */
217 struct i2c_client {
218 unsigned short flags; /* div., see below */
219 unsigned short addr; /* chip address - NOTE: 7bit */
220 /* addresses are stored in the */
221 /* _LOWER_ 7 bits */
222 char name[I2C_NAME_SIZE];
223 struct i2c_adapter *adapter; /* the adapter we sit on */
224 struct device dev; /* the device structure */
225 int irq; /* irq issued by device */
226 struct list_head detected;
227 };
228 #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
229
230 extern struct i2c_client *i2c_verify_client(struct device *dev);
231 extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
232
233 static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
234 {
235 struct device * const dev = container_of(kobj, struct device, kobj);
236 return to_i2c_client(dev);
237 }
238
239 static inline void *i2c_get_clientdata(const struct i2c_client *dev)
240 {
241 return dev_get_drvdata(&dev->dev);
242 }
243
244 static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
245 {
246 dev_set_drvdata(&dev->dev, data);
247 }
248
249 /**
250 * struct i2c_board_info - template for device creation
251 * @type: chip type, to initialize i2c_client.name
252 * @flags: to initialize i2c_client.flags
253 * @addr: stored in i2c_client.addr
254 * @platform_data: stored in i2c_client.dev.platform_data
255 * @archdata: copied into i2c_client.dev.archdata
256 * @of_node: pointer to OpenFirmware device node
257 * @acpi_node: ACPI device node
258 * @irq: stored in i2c_client.irq
259 *
260 * I2C doesn't actually support hardware probing, although controllers and
261 * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's
262 * a device at a given address. Drivers commonly need more information than
263 * that, such as chip type, configuration, associated IRQ, and so on.
264 *
265 * i2c_board_info is used to build tables of information listing I2C devices
266 * that are present. This information is used to grow the driver model tree.
267 * For mainboards this is done statically using i2c_register_board_info();
268 * bus numbers identify adapters that aren't yet available. For add-on boards,
269 * i2c_new_device() does this dynamically with the adapter already known.
270 */
271 struct i2c_board_info {
272 char type[I2C_NAME_SIZE];
273 unsigned short flags;
274 unsigned short addr;
275 void *platform_data;
276 struct dev_archdata *archdata;
277 struct device_node *of_node;
278 struct acpi_dev_node acpi_node;
279 int irq;
280 };
281
282 /**
283 * I2C_BOARD_INFO - macro used to list an i2c device and its address
284 * @dev_type: identifies the device type
285 * @dev_addr: the device's address on the bus.
286 *
287 * This macro initializes essential fields of a struct i2c_board_info,
288 * declaring what has been provided on a particular board. Optional
289 * fields (such as associated irq, or device-specific platform_data)
290 * are provided using conventional syntax.
291 */
292 #define I2C_BOARD_INFO(dev_type, dev_addr) \
293 .type = dev_type, .addr = (dev_addr)
294
295
296 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
297 /* Add-on boards should register/unregister their devices; e.g. a board
298 * with integrated I2C, a config eeprom, sensors, and a codec that's
299 * used in conjunction with the primary hardware.
300 */
301 extern struct i2c_client *
302 i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
303
304 /* If you don't know the exact address of an I2C device, use this variant
305 * instead, which can probe for device presence in a list of possible
306 * addresses. The "probe" callback function is optional. If it is provided,
307 * it must return 1 on successful probe, 0 otherwise. If it is not provided,
308 * a default probing method is used.
309 */
310 extern struct i2c_client *
311 i2c_new_probed_device(struct i2c_adapter *adap,
312 struct i2c_board_info *info,
313 unsigned short const *addr_list,
314 int (*probe)(struct i2c_adapter *, unsigned short addr));
315
316 /* Common custom probe functions */
317 extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
318
319 /* For devices that use several addresses, use i2c_new_dummy() to make
320 * client handles for the extra addresses.
321 */
322 extern struct i2c_client *
323 i2c_new_dummy(struct i2c_adapter *adap, u16 address);
324
325 extern void i2c_unregister_device(struct i2c_client *);
326 #endif /* I2C */
327
328 /* Mainboard arch_initcall() code should register all its I2C devices.
329 * This is done at arch_initcall time, before declaring any i2c adapters.
330 * Modules for add-on boards must use other calls.
331 */
332 #ifdef CONFIG_I2C_BOARDINFO
333 extern int
334 i2c_register_board_info(int busnum, struct i2c_board_info const *info,
335 unsigned n);
336 #else
337 static inline int
338 i2c_register_board_info(int busnum, struct i2c_board_info const *info,
339 unsigned n)
340 {
341 return 0;
342 }
343 #endif /* I2C_BOARDINFO */
344
345 /**
346 * struct i2c_algorithm - represent I2C transfer method
347 * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
348 * defined by the msgs array, with num messages available to transfer via
349 * the adapter specified by adap.
350 * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
351 * is not present, then the bus layer will try and convert the SMBus calls
352 * into I2C transfers instead.
353 * @functionality: Return the flags that this algorithm/adapter pair supports
354 * from the I2C_FUNC_* flags.
355 *
356 * The following structs are for those who like to implement new bus drivers:
357 * i2c_algorithm is the interface to a class of hardware solutions which can
358 * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
359 * to name two of the most common.
360 *
361 * The return codes from the @master_xfer field should indicate the type of
362 * error code that occured during the transfer, as documented in the kernel
363 * Documentation file Documentation/i2c/fault-codes.
364 */
365 struct i2c_algorithm {
366 /* If an adapter algorithm can't do I2C-level access, set master_xfer
367 to NULL. If an adapter algorithm can do SMBus access, set
368 smbus_xfer. If set to NULL, the SMBus protocol is simulated
369 using common I2C messages */
370 /* master_xfer should return the number of messages successfully
371 processed, or a negative value on error */
372 int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
373 int num);
374 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
375 unsigned short flags, char read_write,
376 u8 command, int size, union i2c_smbus_data *data);
377
378 /* To determine what the adapter supports */
379 u32 (*functionality) (struct i2c_adapter *);
380 };
381
382 /**
383 * struct i2c_bus_recovery_info - I2C bus recovery information
384 * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or
385 * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery().
386 * @get_scl: This gets current value of SCL line. Mandatory for generic SCL
387 * recovery. Used internally for generic GPIO recovery.
388 * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used
389 * internally for generic GPIO recovery.
390 * @get_sda: This gets current value of SDA line. Optional for generic SCL
391 * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO
392 * recovery.
393 * @prepare_recovery: This will be called before starting recovery. Platform may
394 * configure padmux here for SDA/SCL line or something else they want.
395 * @unprepare_recovery: This will be called after completing recovery. Platform
396 * may configure padmux here for SDA/SCL line or something else they want.
397 * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery.
398 * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery.
399 */
400 struct i2c_bus_recovery_info {
401 int (*recover_bus)(struct i2c_adapter *);
402
403 int (*get_scl)(struct i2c_adapter *);
404 void (*set_scl)(struct i2c_adapter *, int val);
405 int (*get_sda)(struct i2c_adapter *);
406
407 void (*prepare_recovery)(struct i2c_bus_recovery_info *bri);
408 void (*unprepare_recovery)(struct i2c_bus_recovery_info *bri);
409
410 /* gpio recovery */
411 int scl_gpio;
412 int sda_gpio;
413 };
414
415 int i2c_recover_bus(struct i2c_adapter *adap);
416
417 /* Generic recovery routines */
418 int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
419 int i2c_generic_scl_recovery(struct i2c_adapter *adap);
420
421 /*
422 * i2c_adapter is the structure used to identify a physical i2c bus along
423 * with the access algorithms necessary to access it.
424 */
425 struct i2c_adapter {
426 struct module *owner;
427 unsigned int class; /* classes to allow probing for */
428 const struct i2c_algorithm *algo; /* the algorithm to access the bus */
429 void *algo_data;
430
431 /* data fields that are valid for all devices */
432 struct rt_mutex bus_lock;
433
434 int timeout; /* in jiffies */
435 int retries;
436 struct device dev; /* the adapter device */
437
438 int nr;
439 char name[48];
440 struct completion dev_released;
441
442 struct mutex userspace_clients_lock;
443 struct list_head userspace_clients;
444
445 struct i2c_bus_recovery_info *bus_recovery_info;
446 };
447 #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
448
449 static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
450 {
451 return dev_get_drvdata(&dev->dev);
452 }
453
454 static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
455 {
456 dev_set_drvdata(&dev->dev, data);
457 }
458
459 static inline struct i2c_adapter *
460 i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
461 {
462 #if IS_ENABLED(CONFIG_I2C_MUX)
463 struct device *parent = adapter->dev.parent;
464
465 if (parent != NULL && parent->type == &i2c_adapter_type)
466 return to_i2c_adapter(parent);
467 else
468 #endif
469 return NULL;
470 }
471
472 int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
473
474 /* Adapter locking functions, exported for shared pin cases */
475 void i2c_lock_adapter(struct i2c_adapter *);
476 void i2c_unlock_adapter(struct i2c_adapter *);
477
478 /*flags for the client struct: */
479 #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
480 #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
481 /* Must equal I2C_M_TEN below */
482 #define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
483 #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
484 /* Must match I2C_M_STOP|IGNORE_NAK */
485
486 /* i2c adapter classes (bitmask) */
487 #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
488 #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
489 #define I2C_CLASS_SPD (1<<7) /* Memory modules */
490 #define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
491
492 /* Internal numbers to terminate lists */
493 #define I2C_CLIENT_END 0xfffeU
494
495 /* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
496 #define I2C_ADDRS(addr, addrs...) \
497 ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
498
499
500 /* ----- functions exported by i2c.o */
501
502 /* administration...
503 */
504 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
505 extern int i2c_add_adapter(struct i2c_adapter *);
506 extern void i2c_del_adapter(struct i2c_adapter *);
507 extern int i2c_add_numbered_adapter(struct i2c_adapter *);
508
509 extern int i2c_register_driver(struct module *, struct i2c_driver *);
510 extern void i2c_del_driver(struct i2c_driver *);
511
512 /* use a define to avoid include chaining to get THIS_MODULE */
513 #define i2c_add_driver(driver) \
514 i2c_register_driver(THIS_MODULE, driver)
515
516 extern struct i2c_client *i2c_use_client(struct i2c_client *client);
517 extern void i2c_release_client(struct i2c_client *client);
518
519 /* call the i2c_client->command() of all attached clients with
520 * the given arguments */
521 extern void i2c_clients_command(struct i2c_adapter *adap,
522 unsigned int cmd, void *arg);
523
524 extern struct i2c_adapter *i2c_get_adapter(int nr);
525 extern void i2c_put_adapter(struct i2c_adapter *adap);
526
527
528 /* Return the functionality mask */
529 static inline u32 i2c_get_functionality(struct i2c_adapter *adap)
530 {
531 return adap->algo->functionality(adap);
532 }
533
534 /* Return 1 if adapter supports everything we need, 0 if not. */
535 static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func)
536 {
537 return (func & i2c_get_functionality(adap)) == func;
538 }
539
540 /* Return the adapter number for a specific adapter */
541 static inline int i2c_adapter_id(struct i2c_adapter *adap)
542 {
543 return adap->nr;
544 }
545
546 /**
547 * module_i2c_driver() - Helper macro for registering a I2C driver
548 * @__i2c_driver: i2c_driver struct
549 *
550 * Helper macro for I2C drivers which do not do anything special in module
551 * init/exit. This eliminates a lot of boilerplate. Each module may only
552 * use this macro once, and calling it replaces module_init() and module_exit()
553 */
554 #define module_i2c_driver(__i2c_driver) \
555 module_driver(__i2c_driver, i2c_add_driver, \
556 i2c_del_driver)
557
558 #endif /* I2C */
559
560 #if IS_ENABLED(CONFIG_OF)
561 /* must call put_device() when done with returned i2c_client device */
562 extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
563
564 /* must call put_device() when done with returned i2c_adapter device */
565 extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
566
567 #else
568
569 static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
570 {
571 return NULL;
572 }
573
574 static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
575 {
576 return NULL;
577 }
578 #endif /* CONFIG_OF */
579
580 #ifdef CONFIG_I2C_ACPI
581 int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
582 void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
583 void acpi_i2c_register_devices(struct i2c_adapter *adap);
584 #else
585 static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
586 static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
587 { }
588 static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
589 { return 0; }
590 #endif
591
592 #endif /* _LINUX_I2C_H */ 1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/irq.h>
21
22 /*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
37
38 /*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43 * DEPRECATED. This flag is a NOOP and scheduled to be removed
44 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for
51 * performance reasons)
52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run.
55 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time.
60 */
61 #define IRQF_DISABLED 0x00000020
62 #define IRQF_SHARED 0x00000080
63 #define IRQF_PROBE_SHARED 0x00000100
64 #define __IRQF_TIMER 0x00000200
65 #define IRQF_PERCPU 0x00000400
66 #define IRQF_NOBALANCING 0x00000800
67 #define IRQF_IRQPOLL 0x00001000
68 #define IRQF_ONESHOT 0x00002000
69 #define IRQF_NO_SUSPEND 0x00004000
70 #define IRQF_FORCE_RESUME 0x00008000
71 #define IRQF_NO_THREAD 0x00010000
72 #define IRQF_EARLY_RESUME 0x00020000
73
74 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
75
76 /*
77 * These values can be returned by request_any_context_irq() and
78 * describe the context the interrupt will be run in.
79 *
80 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
81 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
82 */
83 enum {
84 IRQC_IS_HARDIRQ = 0,
85 IRQC_IS_NESTED,
86 };
87
88 typedef irqreturn_t (*irq_handler_t)(int, void *);
89
90 /**
91 * struct irqaction - per interrupt action descriptor
92 * @handler: interrupt handler function
93 * @name: name of the device
94 * @dev_id: cookie to identify the device
95 * @percpu_dev_id: cookie to identify the device
96 * @next: pointer to the next irqaction for shared interrupts
97 * @irq: interrupt number
98 * @flags: flags (see IRQF_* above)
99 * @thread_fn: interrupt handler function for threaded interrupts
100 * @thread: thread pointer for threaded interrupts
101 * @thread_flags: flags related to @thread
102 * @thread_mask: bitmask for keeping track of @thread activity
103 * @dir: pointer to the proc/irq/NN/name entry
104 */
105 struct irqaction {
106 irq_handler_t handler;
107 void *dev_id;
108 void __percpu *percpu_dev_id;
109 struct irqaction *next;
110 irq_handler_t thread_fn;
111 struct task_struct *thread;
112 unsigned int irq;
113 unsigned int flags;
114 unsigned long thread_flags;
115 unsigned long thread_mask;
116 const char *name;
117 struct proc_dir_entry *dir;
118 } ____cacheline_internodealigned_in_smp;
119
120 extern irqreturn_t no_action(int cpl, void *dev_id);
121
122 extern int __must_check
123 request_threaded_irq(unsigned int irq, irq_handler_t handler,
124 irq_handler_t thread_fn,
125 unsigned long flags, const char *name, void *dev);
126
127 static inline int __must_check
128 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev)
130 {
131 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
132 }
133
134 extern int __must_check
135 request_any_context_irq(unsigned int irq, irq_handler_t handler,
136 unsigned long flags, const char *name, void *dev_id);
137
138 extern int __must_check
139 request_percpu_irq(unsigned int irq, irq_handler_t handler,
140 const char *devname, void __percpu *percpu_dev_id);
141
142 extern void free_irq(unsigned int, void *);
143 extern void free_percpu_irq(unsigned int, void __percpu *);
144
145 struct device;
146
147 extern int __must_check
148 devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153 static inline int __must_check
154 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156 {
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159 }
160
161 extern int __must_check
162 devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
165
166 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167
168 /*
169 * On lockdep we dont want to enable hardirqs in hardirq
170 * context. Use local_irq_enable_in_hardirq() to annotate
171 * kernel code that has to do this nevertheless (pretty much
172 * the only valid case is for old/broken hardware that is
173 * insanely slow).
174 *
175 * NOTE: in theory this might break fragile code that relies
176 * on hardirq delivery - in practice we dont seem to have such
177 * places left. So the only effect should be slightly increased
178 * irqs-off latencies.
179 */
180 #ifdef CONFIG_LOCKDEP
181 # define local_irq_enable_in_hardirq() do { } while (0)
182 #else
183 # define local_irq_enable_in_hardirq() local_irq_enable()
184 #endif
185
186 extern void disable_irq_nosync(unsigned int irq);
187 extern void disable_irq(unsigned int irq);
188 extern void disable_percpu_irq(unsigned int irq);
189 extern void enable_irq(unsigned int irq);
190 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
191 extern void irq_wake_thread(unsigned int irq, void *dev_id);
192
193 /* The following three functions are for the core kernel use only. */
194 extern void suspend_device_irqs(void);
195 extern void resume_device_irqs(void);
196 #ifdef CONFIG_PM_SLEEP
197 extern int check_wakeup_irqs(void);
198 #else
199 static inline int check_wakeup_irqs(void) { return 0; }
200 #endif
201
202 /**
203 * struct irq_affinity_notify - context for notification of IRQ affinity changes
204 * @irq: Interrupt to which notification applies
205 * @kref: Reference count, for internal use
206 * @work: Work item, for internal use
207 * @notify: Function to be called on change. This will be
208 * called in process context.
209 * @release: Function to be called on release. This will be
210 * called in process context. Once registered, the
211 * structure must only be freed when this function is
212 * called or later.
213 */
214 struct irq_affinity_notify {
215 unsigned int irq;
216 struct kref kref;
217 struct work_struct work;
218 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
219 void (*release)(struct kref *ref);
220 };
221
222 #if defined(CONFIG_SMP)
223
224 extern cpumask_var_t irq_default_affinity;
225
226 /* Internal implementation. Use the helpers below */
227 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
228 bool force);
229
230 /**
231 * irq_set_affinity - Set the irq affinity of a given irq
232 * @irq: Interrupt to set affinity
233 * @cpumask: cpumask
234 *
235 * Fails if cpumask does not contain an online CPU
236 */
237 static inline int
238 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
239 {
240 return __irq_set_affinity(irq, cpumask, false);
241 }
242
243 /**
244 * irq_force_affinity - Force the irq affinity of a given irq
245 * @irq: Interrupt to set affinity
246 * @cpumask: cpumask
247 *
248 * Same as irq_set_affinity, but without checking the mask against
249 * online cpus.
250 *
251 * Solely for low level cpu hotplug code, where we need to make per
252 * cpu interrupts affine before the cpu becomes online.
253 */
254 static inline int
255 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
256 {
257 return __irq_set_affinity(irq, cpumask, true);
258 }
259
260 extern int irq_can_set_affinity(unsigned int irq);
261 extern int irq_select_affinity(unsigned int irq);
262
263 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
264
265 extern int
266 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
267
268 #else /* CONFIG_SMP */
269
270 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
271 {
272 return -EINVAL;
273 }
274
275 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
276 {
277 return 0;
278 }
279
280 static inline int irq_can_set_affinity(unsigned int irq)
281 {
282 return 0;
283 }
284
285 static inline int irq_select_affinity(unsigned int irq) { return 0; }
286
287 static inline int irq_set_affinity_hint(unsigned int irq,
288 const struct cpumask *m)
289 {
290 return -EINVAL;
291 }
292
293 static inline int
294 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
295 {
296 return 0;
297 }
298 #endif /* CONFIG_SMP */
299
300 /*
301 * Special lockdep variants of irq disabling/enabling.
302 * These should be used for locking constructs that
303 * know that a particular irq context which is disabled,
304 * and which is the only irq-context user of a lock,
305 * that it's safe to take the lock in the irq-disabled
306 * section without disabling hardirqs.
307 *
308 * On !CONFIG_LOCKDEP they are equivalent to the normal
309 * irq disable/enable methods.
310 */
311 static inline void disable_irq_nosync_lockdep(unsigned int irq)
312 {
313 disable_irq_nosync(irq);
314 #ifdef CONFIG_LOCKDEP
315 local_irq_disable();
316 #endif
317 }
318
319 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
320 {
321 disable_irq_nosync(irq);
322 #ifdef CONFIG_LOCKDEP
323 local_irq_save(*flags);
324 #endif
325 }
326
327 static inline void disable_irq_lockdep(unsigned int irq)
328 {
329 disable_irq(irq);
330 #ifdef CONFIG_LOCKDEP
331 local_irq_disable();
332 #endif
333 }
334
335 static inline void enable_irq_lockdep(unsigned int irq)
336 {
337 #ifdef CONFIG_LOCKDEP
338 local_irq_enable();
339 #endif
340 enable_irq(irq);
341 }
342
343 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
344 {
345 #ifdef CONFIG_LOCKDEP
346 local_irq_restore(*flags);
347 #endif
348 enable_irq(irq);
349 }
350
351 /* IRQ wakeup (PM) control: */
352 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
353
354 static inline int enable_irq_wake(unsigned int irq)
355 {
356 return irq_set_irq_wake(irq, 1);
357 }
358
359 static inline int disable_irq_wake(unsigned int irq)
360 {
361 return irq_set_irq_wake(irq, 0);
362 }
363
364
365 #ifdef CONFIG_IRQ_FORCED_THREADING
366 extern bool force_irqthreads;
367 #else
368 #define force_irqthreads (0)
369 #endif
370
371 #ifndef __ARCH_SET_SOFTIRQ_PENDING
372 #define set_softirq_pending(x) (local_softirq_pending() = (x))
373 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
374 #endif
375
376 /* Some architectures might implement lazy enabling/disabling of
377 * interrupts. In some cases, such as stop_machine, we might want
378 * to ensure that after a local_irq_disable(), interrupts have
379 * really been disabled in hardware. Such architectures need to
380 * implement the following hook.
381 */
382 #ifndef hard_irq_disable
383 #define hard_irq_disable() do { } while(0)
384 #endif
385
386 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
387 frequency threaded job scheduling. For almost all the purposes
388 tasklets are more than enough. F.e. all serial device BHs et
389 al. should be converted to tasklets, not to softirqs.
390 */
391
392 enum
393 {
394 HI_SOFTIRQ=0,
395 TIMER_SOFTIRQ,
396 NET_TX_SOFTIRQ,
397 NET_RX_SOFTIRQ,
398 BLOCK_SOFTIRQ,
399 BLOCK_IOPOLL_SOFTIRQ,
400 TASKLET_SOFTIRQ,
401 SCHED_SOFTIRQ,
402 HRTIMER_SOFTIRQ,
403 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
404
405 NR_SOFTIRQS
406 };
407
408 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
409
410 /* map softirq index to softirq name. update 'softirq_to_name' in
411 * kernel/softirq.c when adding a new softirq.
412 */
413 extern const char * const softirq_to_name[NR_SOFTIRQS];
414
415 /* softirq mask and active fields moved to irq_cpustat_t in
416 * asm/hardirq.h to get better cache usage. KAO
417 */
418
419 struct softirq_action
420 {
421 void (*action)(struct softirq_action *);
422 };
423
424 asmlinkage void do_softirq(void);
425 asmlinkage void __do_softirq(void);
426
427 #ifdef __ARCH_HAS_DO_SOFTIRQ
428 void do_softirq_own_stack(void);
429 #else
430 static inline void do_softirq_own_stack(void)
431 {
432 __do_softirq();
433 }
434 #endif
435
436 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
437 extern void softirq_init(void);
438 extern void __raise_softirq_irqoff(unsigned int nr);
439
440 extern void raise_softirq_irqoff(unsigned int nr);
441 extern void raise_softirq(unsigned int nr);
442
443 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
444
445 static inline struct task_struct *this_cpu_ksoftirqd(void)
446 {
447 return this_cpu_read(ksoftirqd);
448 }
449
450 /* Tasklets --- multithreaded analogue of BHs.
451
452 Main feature differing them of generic softirqs: tasklet
453 is running only on one CPU simultaneously.
454
455 Main feature differing them of BHs: different tasklets
456 may be run simultaneously on different CPUs.
457
458 Properties:
459 * If tasklet_schedule() is called, then tasklet is guaranteed
460 to be executed on some cpu at least once after this.
461 * If the tasklet is already scheduled, but its execution is still not
462 started, it will be executed only once.
463 * If this tasklet is already running on another CPU (or schedule is called
464 from tasklet itself), it is rescheduled for later.
465 * Tasklet is strictly serialized wrt itself, but not
466 wrt another tasklets. If client needs some intertask synchronization,
467 he makes it with spinlocks.
468 */
469
470 struct tasklet_struct
471 {
472 struct tasklet_struct *next;
473 unsigned long state;
474 atomic_t count;
475 void (*func)(unsigned long);
476 unsigned long data;
477 };
478
479 #define DECLARE_TASKLET(name, func, data) \
480 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
481
482 #define DECLARE_TASKLET_DISABLED(name, func, data) \
483 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
484
485
486 enum
487 {
488 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
489 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
490 };
491
492 #ifdef CONFIG_SMP
493 static inline int tasklet_trylock(struct tasklet_struct *t)
494 {
495 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
496 }
497
498 static inline void tasklet_unlock(struct tasklet_struct *t)
499 {
500 smp_mb__before_atomic();
501 clear_bit(TASKLET_STATE_RUN, &(t)->state);
502 }
503
504 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
505 {
506 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
507 }
508 #else
509 #define tasklet_trylock(t) 1
510 #define tasklet_unlock_wait(t) do { } while (0)
511 #define tasklet_unlock(t) do { } while (0)
512 #endif
513
514 extern void __tasklet_schedule(struct tasklet_struct *t);
515
516 static inline void tasklet_schedule(struct tasklet_struct *t)
517 {
518 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
519 __tasklet_schedule(t);
520 }
521
522 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
523
524 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
525 {
526 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
527 __tasklet_hi_schedule(t);
528 }
529
530 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
531
532 /*
533 * This version avoids touching any other tasklets. Needed for kmemcheck
534 * in order not to take any page faults while enqueueing this tasklet;
535 * consider VERY carefully whether you really need this or
536 * tasklet_hi_schedule()...
537 */
538 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
539 {
540 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
541 __tasklet_hi_schedule_first(t);
542 }
543
544
545 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
546 {
547 atomic_inc(&t->count);
548 smp_mb__after_atomic();
549 }
550
551 static inline void tasklet_disable(struct tasklet_struct *t)
552 {
553 tasklet_disable_nosync(t);
554 tasklet_unlock_wait(t);
555 smp_mb();
556 }
557
558 static inline void tasklet_enable(struct tasklet_struct *t)
559 {
560 smp_mb__before_atomic();
561 atomic_dec(&t->count);
562 }
563
564 static inline void tasklet_hi_enable(struct tasklet_struct *t)
565 {
566 smp_mb__before_atomic();
567 atomic_dec(&t->count);
568 }
569
570 extern void tasklet_kill(struct tasklet_struct *t);
571 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
572 extern void tasklet_init(struct tasklet_struct *t,
573 void (*func)(unsigned long), unsigned long data);
574
575 struct tasklet_hrtimer {
576 struct hrtimer timer;
577 struct tasklet_struct tasklet;
578 enum hrtimer_restart (*function)(struct hrtimer *);
579 };
580
581 extern void
582 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
583 enum hrtimer_restart (*function)(struct hrtimer *),
584 clockid_t which_clock, enum hrtimer_mode mode);
585
586 static inline
587 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
588 const enum hrtimer_mode mode)
589 {
590 return hrtimer_start(&ttimer->timer, time, mode);
591 }
592
593 static inline
594 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
595 {
596 hrtimer_cancel(&ttimer->timer);
597 tasklet_kill(&ttimer->tasklet);
598 }
599
600 /*
601 * Autoprobing for irqs:
602 *
603 * probe_irq_on() and probe_irq_off() provide robust primitives
604 * for accurate IRQ probing during kernel initialization. They are
605 * reasonably simple to use, are not "fooled" by spurious interrupts,
606 * and, unlike other attempts at IRQ probing, they do not get hung on
607 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
608 *
609 * For reasonably foolproof probing, use them as follows:
610 *
611 * 1. clear and/or mask the device's internal interrupt.
612 * 2. sti();
613 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
614 * 4. enable the device and cause it to trigger an interrupt.
615 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
616 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
617 * 7. service the device to clear its pending interrupt.
618 * 8. loop again if paranoia is required.
619 *
620 * probe_irq_on() returns a mask of allocated irq's.
621 *
622 * probe_irq_off() takes the mask as a parameter,
623 * and returns the irq number which occurred,
624 * or zero if none occurred, or a negative irq number
625 * if more than one irq occurred.
626 */
627
628 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
629 static inline unsigned long probe_irq_on(void)
630 {
631 return 0;
632 }
633 static inline int probe_irq_off(unsigned long val)
634 {
635 return 0;
636 }
637 static inline unsigned int probe_irq_mask(unsigned long val)
638 {
639 return 0;
640 }
641 #else
642 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
643 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
644 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
645 #endif
646
647 #ifdef CONFIG_PROC_FS
648 /* Initialize /proc/irq/ */
649 extern void init_irq_proc(void);
650 #else
651 static inline void init_irq_proc(void)
652 {
653 }
654 #endif
655
656 struct seq_file;
657 int show_interrupts(struct seq_file *p, void *v);
658 int arch_show_interrupts(struct seq_file *p, int prec);
659
660 extern int early_irq_init(void);
661 extern int arch_probe_nr_irqs(void);
662 extern int arch_early_irq_init(void);
663
664 #endif 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139 #ifndef smp_mb__after_unlock_lock
140 #define smp_mb__after_unlock_lock() do { } while (0)
141 #endif
142
143 /**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question.
146 */
147 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
148
149 #ifdef CONFIG_DEBUG_SPINLOCK
150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
154 #else
155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
156 {
157 __acquire(lock);
158 arch_spin_lock(&lock->raw_lock);
159 }
160
161 static inline void
162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
163 {
164 __acquire(lock);
165 arch_spin_lock_flags(&lock->raw_lock, *flags);
166 }
167
168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
169 {
170 return arch_spin_trylock(&(lock)->raw_lock);
171 }
172
173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
174 {
175 arch_spin_unlock(&lock->raw_lock);
176 __release(lock);
177 }
178 #endif
179
180 /*
181 * Define the various spin_lock methods. Note we define these
182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
183 * various methods are defined as nops in the case they are not
184 * required.
185 */
186 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
187
188 #define raw_spin_lock(lock) _raw_spin_lock(lock)
189
190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
191 # define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193
194 # define raw_spin_lock_nest_lock(lock, nest_lock) \
195 do { \
196 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0)
199 #else
200 # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
201 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
202 #endif
203
204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205
206 #define raw_spin_lock_irqsave(lock, flags) \
207 do { \
208 typecheck(unsigned long, flags); \
209 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211
212 #ifdef CONFIG_DEBUG_LOCK_ALLOC
213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
214 do { \
215 typecheck(unsigned long, flags); \
216 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
217 } while (0)
218 #else
219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
220 do { \
221 typecheck(unsigned long, flags); \
222 flags = _raw_spin_lock_irqsave(lock); \
223 } while (0)
224 #endif
225
226 #else
227
228 #define raw_spin_lock_irqsave(lock, flags) \
229 do { \
230 typecheck(unsigned long, flags); \
231 _raw_spin_lock_irqsave(lock, flags); \
232 } while (0)
233
234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
235 raw_spin_lock_irqsave(lock, flags)
236
237 #endif
238
239 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
240 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
241 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
242 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
243
244 #define raw_spin_unlock_irqrestore(lock, flags) \
245 do { \
246 typecheck(unsigned long, flags); \
247 _raw_spin_unlock_irqrestore(lock, flags); \
248 } while (0)
249 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
250
251 #define raw_spin_trylock_bh(lock) \
252 __cond_lock(lock, _raw_spin_trylock_bh(lock))
253
254 #define raw_spin_trylock_irq(lock) \
255 ({ \
256 local_irq_disable(); \
257 raw_spin_trylock(lock) ? \
258 1 : ({ local_irq_enable(); 0; }); \
259 })
260
261 #define raw_spin_trylock_irqsave(lock, flags) \
262 ({ \
263 local_irq_save(flags); \
264 raw_spin_trylock(lock) ? \
265 1 : ({ local_irq_restore(flags); 0; }); \
266 })
267
268 /**
269 * raw_spin_can_lock - would raw_spin_trylock() succeed?
270 * @lock: the spinlock in question.
271 */
272 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
273
274 /* Include rwlock functions */
275 #include <linux/rwlock.h>
276
277 /*
278 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
279 */
280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
281 # include <linux/spinlock_api_smp.h>
282 #else
283 # include <linux/spinlock_api_up.h>
284 #endif
285
286 /*
287 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
288 */
289
290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
291 {
292 return &lock->rlock;
293 }
294
295 #define spin_lock_init(_lock) \
296 do { \
297 spinlock_check(_lock); \
298 raw_spin_lock_init(&(_lock)->rlock); \
299 } while (0)
300
301 static inline void spin_lock(spinlock_t *lock)
302 {
303 raw_spin_lock(&lock->rlock);
304 }
305
306 static inline void spin_lock_bh(spinlock_t *lock)
307 {
308 raw_spin_lock_bh(&lock->rlock);
309 }
310
311 static inline int spin_trylock(spinlock_t *lock)
312 {
313 return raw_spin_trylock(&lock->rlock);
314 }
315
316 #define spin_lock_nested(lock, subclass) \
317 do { \
318 raw_spin_lock_nested(spinlock_check(lock), subclass); \
319 } while (0)
320
321 #define spin_lock_nest_lock(lock, nest_lock) \
322 do { \
323 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
324 } while (0)
325
326 static inline void spin_lock_irq(spinlock_t *lock)
327 {
328 raw_spin_lock_irq(&lock->rlock);
329 }
330
331 #define spin_lock_irqsave(lock, flags) \
332 do { \
333 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
334 } while (0)
335
336 #define spin_lock_irqsave_nested(lock, flags, subclass) \
337 do { \
338 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
339 } while (0)
340
341 static inline void spin_unlock(spinlock_t *lock)
342 {
343 raw_spin_unlock(&lock->rlock);
344 }
345
346 static inline void spin_unlock_bh(spinlock_t *lock)
347 {
348 raw_spin_unlock_bh(&lock->rlock);
349 }
350
351 static inline void spin_unlock_irq(spinlock_t *lock)
352 {
353 raw_spin_unlock_irq(&lock->rlock);
354 }
355
356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
357 {
358 raw_spin_unlock_irqrestore(&lock->rlock, flags);
359 }
360
361 static inline int spin_trylock_bh(spinlock_t *lock)
362 {
363 return raw_spin_trylock_bh(&lock->rlock);
364 }
365
366 static inline int spin_trylock_irq(spinlock_t *lock)
367 {
368 return raw_spin_trylock_irq(&lock->rlock);
369 }
370
371 #define spin_trylock_irqsave(lock, flags) \
372 ({ \
373 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
374 })
375
376 static inline void spin_unlock_wait(spinlock_t *lock)
377 {
378 raw_spin_unlock_wait(&lock->rlock);
379 }
380
381 static inline int spin_is_locked(spinlock_t *lock)
382 {
383 return raw_spin_is_locked(&lock->rlock);
384 }
385
386 static inline int spin_is_contended(spinlock_t *lock)
387 {
388 return raw_spin_is_contended(&lock->rlock);
389 }
390
391 static inline int spin_can_lock(spinlock_t *lock)
392 {
393 return raw_spin_can_lock(&lock->rlock);
394 }
395
396 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
397
398 /*
399 * Pull the atomic_t declaration:
400 * (asm-mips/atomic.h needs above definitions)
401 */
402 #include <linux/atomic.h>
403 /**
404 * atomic_dec_and_lock - lock on reaching reference count zero
405 * @atomic: the atomic counter
406 * @lock: the spinlock in question
407 *
408 * Decrements @atomic by 1. If the result is 0, returns true and locks
409 * @lock. Returns false for all other cases.
410 */
411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
412 #define atomic_dec_and_lock(atomic, lock) \
413 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
414
415 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-3.17-rc1.tar.xz | drivers/media/platform/marvell-ccic/cafe_ccic.ko | 104_1a | CPAchecker | Bug | Fixed | 2015-04-04 03:17:33 | L0199 |
Comment
reported: 4 Apr 2015
[Home]