Error Trace

[Home]

Bug # 8

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
95 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
19 typedef signed char __s8;
20 typedef unsigned char __u8;
22 typedef short __s16;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
40 typedef __kernel_long_t __kernel_suseconds_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
33 typedef __u16 __be16;
35 typedef __u32 __be32;
40 typedef __u32 __wsum;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
147 typedef u64 dma_addr_t;
158 typedef unsigned int gfp_t;
159 typedef unsigned int fmode_t;
160 typedef unsigned int oom_flags_t;
163 typedef u64 phys_addr_t;
168 typedef phys_addr_t resource_size_t;
178 struct __anonstruct_atomic_t_6 { int counter; } ;
178 typedef struct __anonstruct_atomic_t_6 atomic_t;
183 struct __anonstruct_atomic64_t_7 { long counter; } ;
183 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
184 struct list_head { struct list_head *next; struct list_head *prev; } ;
189 struct hlist_node ;
189 struct hlist_head { struct hlist_node *first; } ;
193 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
204 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
59 struct __anonstruct_ldv_1016_9 { unsigned int a; unsigned int b; } ;
59 struct __anonstruct_ldv_1031_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
59 union __anonunion_ldv_1032_8 { struct __anonstruct_ldv_1016_9 ldv_1016; struct __anonstruct_ldv_1031_10 ldv_1031; } ;
59 struct desc_struct { union __anonunion_ldv_1032_8 ldv_1032; } ;
12 typedef unsigned long pteval_t;
15 typedef unsigned long pgdval_t;
16 typedef unsigned long pgprotval_t;
18 struct __anonstruct_pte_t_11 { pteval_t pte; } ;
18 typedef struct __anonstruct_pte_t_11 pte_t;
20 struct pgprot { pgprotval_t pgprot; } ;
242 typedef struct pgprot pgprot_t;
244 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ;
244 typedef struct __anonstruct_pgd_t_12 pgd_t;
332 struct page ;
332 typedef struct page *pgtable_t;
340 struct file ;
353 struct seq_file ;
390 struct thread_struct ;
392 struct mm_struct ;
393 struct task_struct ;
394 struct cpumask ;
395 struct paravirt_callee_save { void *func; } ;
196 struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save restore_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; void (*safe_halt)(); void (*halt)(); void (*adjust_exception_frame)(); } ;
327 struct arch_spinlock ;
18 typedef u16 __ticket_t;
19 typedef u32 __ticketpair_t;
20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ;
32 union __anonunion_ldv_1452_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ;
32 struct arch_spinlock { union __anonunion_ldv_1452_15 ldv_1452; } ;
33 typedef struct arch_spinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
142 typedef void (*ctor_fn_t)();
48 struct device ;
54 struct net_device ;
400 struct file_operations ;
412 struct completion ;
416 struct pid ;
527 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
102 struct timespec ;
127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ;
79 union __anonunion_ldv_2961_20 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ;
79 struct math_emu_info { long ___orig_eip; union __anonunion_ldv_2961_20 ldv_2961; } ;
306 struct cpumask { unsigned long bits[128U]; } ;
14 typedef struct cpumask cpumask_t;
671 typedef struct cpumask *cpumask_var_t;
162 struct seq_operations ;
294 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
312 struct __anonstruct_ldv_5248_25 { u64 rip; u64 rdp; } ;
312 struct __anonstruct_ldv_5254_26 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
312 union __anonunion_ldv_5255_24 { struct __anonstruct_ldv_5248_25 ldv_5248; struct __anonstruct_ldv_5254_26 ldv_5254; } ;
312 union __anonunion_ldv_5264_27 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
312 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion_ldv_5255_24 ldv_5255; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion_ldv_5264_27 ldv_5264; } ;
346 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
367 struct ymmh_struct { u32 ymmh_space[64U]; } ;
372 struct lwp_struct { u8 reserved[128U]; } ;
377 struct bndregs_struct { u64 bndregs[8U]; } ;
381 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ;
386 struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2U]; u64 reserved2[5U]; } ;
392 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ;
401 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ;
409 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ;
465 struct kmem_cache ;
466 struct perf_event ;
467 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ;
23 typedef atomic64_t atomic_long_t;
35 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
26 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ;
530 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct_ldv_6305_31 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion_ldv_6306_30 { struct raw_spinlock rlock; struct __anonstruct_ldv_6305_31 ldv_6305; } ;
33 struct spinlock { union __anonunion_ldv_6306_30 ldv_6306; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_32 rwlock_t;
135 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
51 typedef struct seqcount seqcount_t;
259 struct __anonstruct_seqlock_t_33 { struct seqcount seqcount; spinlock_t lock; } ;
259 typedef struct __anonstruct_seqlock_t_33 seqlock_t;
433 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
13 struct timeval { __kernel_time_t tv_sec; __kernel_suseconds_t tv_usec; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_34 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_34 kuid_t;
27 struct __anonstruct_kgid_t_35 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_35 kgid_t;
127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
34 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
39 typedef struct __wait_queue_head wait_queue_head_t;
98 struct __anonstruct_nodemask_t_36 { unsigned long bits[16U]; } ;
98 typedef struct __anonstruct_nodemask_t_36 nodemask_t;
814 struct optimistic_spin_queue ;
815 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ;
68 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
178 struct rw_semaphore ;
179 struct rw_semaphore { long count; raw_spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; struct optimistic_spin_queue *osq; struct lockdep_map dep_map; } ;
174 struct completion { unsigned int done; wait_queue_head_t wait; } ;
105 struct llist_node ;
105 struct llist_head { struct llist_node *first; } ;
64 struct llist_node { struct llist_node *next; } ;
72 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ;
323 union ktime { s64 tv64; } ;
59 typedef union ktime ktime_t;
412 struct tvec_base ;
413 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
266 struct workqueue_struct ;
267 struct work_struct ;
53 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
106 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
546 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ;
553 struct dev_pm_qos ;
553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
614 struct dev_pm_domain { struct dev_pm_ops ops; } ;
22 struct __anonstruct_mm_context_t_101 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ;
22 typedef struct __anonstruct_mm_context_t_101 mm_context_t;
18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
40 struct rb_root { struct rb_node *rb_node; } ;
87 struct vm_area_struct ;
22 struct bio_vec ;
167 struct notifier_block ;
51 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
63 struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block *head; } ;
906 struct ctl_table ;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
59 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
98 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
119 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
124 struct __anonstruct_ldv_14315_129 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
124 union __anonunion_ldv_14317_128 { struct __anonstruct_ldv_14315_129 ldv_14315; struct callback_head rcu; } ;
124 struct ctl_table_set ;
124 struct ctl_table_header { union __anonunion_ldv_14317_128 ldv_14317; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
145 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
151 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
156 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
37 struct cred ;
24 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct_ldv_14561_136 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct_ldv_14565_137 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion_ldv_14566_135 { struct __anonstruct_ldv_14561_136 ldv_14561; struct __anonstruct_ldv_14565_137 ldv_14565; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion_ldv_14566_135 ldv_14566; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct xol_area ;
95 struct uprobes_state { struct xol_area *xol_area; } ;
133 struct address_space ;
134 union __anonunion_ldv_14675_138 { struct address_space *mapping; void *s_mem; } ;
134 union __anonunion_ldv_14681_140 { unsigned long index; void *freelist; bool pfmemalloc; } ;
134 struct __anonstruct_ldv_14691_144 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
134 union __anonunion_ldv_14693_143 { atomic_t _mapcount; struct __anonstruct_ldv_14691_144 ldv_14691; int units; } ;
134 struct __anonstruct_ldv_14695_142 { union __anonunion_ldv_14693_143 ldv_14693; atomic_t _count; } ;
134 union __anonunion_ldv_14697_141 { unsigned long counters; struct __anonstruct_ldv_14695_142 ldv_14695; unsigned int active; } ;
134 struct __anonstruct_ldv_14698_139 { union __anonunion_ldv_14681_140 ldv_14681; union __anonunion_ldv_14697_141 ldv_14697; } ;
134 struct __anonstruct_ldv_14705_146 { struct page *next; int pages; int pobjects; } ;
134 struct slab ;
134 union __anonunion_ldv_14710_145 { struct list_head lru; struct __anonstruct_ldv_14705_146 ldv_14705; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ;
134 union __anonunion_ldv_14716_147 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ;
134 struct page { unsigned long flags; union __anonunion_ldv_14675_138 ldv_14675; struct __anonstruct_ldv_14698_139 ldv_14698; union __anonunion_ldv_14710_145 ldv_14710; union __anonunion_ldv_14716_147 ldv_14716; unsigned long debug_flags; } ;
187 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
239 struct __anonstruct_linear_149 { struct rb_node rb; unsigned long rb_subtree_last; } ;
239 union __anonunion_shared_148 { struct __anonstruct_linear_149 linear; struct list_head nonlinear; } ;
239 struct anon_vma ;
239 struct vm_operations_struct ;
239 struct mempolicy ;
239 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_148 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ;
311 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
317 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
330 struct task_rss_stat { int events; int count[3U]; } ;
338 struct mm_rss_stat { atomic_long_t count[3U]; } ;
343 struct kioctx_table ;
344 struct linux_binfmt ;
344 struct mmu_notifier_mm ;
344 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
48 union __anonunion_ldv_15079_153 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
48 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion_ldv_15079_153 ldv_15079; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
185 struct dentry ;
186 struct iattr ;
187 struct super_block ;
188 struct file_system_type ;
189 struct kernfs_open_node ;
190 struct kernfs_iattrs ;
213 struct kernfs_root ;
213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; } ;
95 union __anonunion_ldv_15223_154 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
95 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion_ldv_15223_154 ldv_15223; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
137 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;
154 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
170 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
186 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
462 struct sock ;
463 struct kobject ;
464 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
470 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
131 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
470 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
222 struct kernel_param ;
227 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
58 struct kparam_string ;
58 struct kparam_array ;
58 union __anonunion_ldv_15898_155 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion_ldv_15898_155 ldv_15898; } ;
70 struct kparam_string { unsigned int maxlen; char *string; } ;
76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
461 struct mod_arch_specific { } ;
36 struct module_param_attrs ;
36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
72 struct exception_table_entry ;
205 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
212 struct module_ref { unsigned long incs; unsigned long decs; } ;
226 struct module_sect_attrs ;
226 struct module_notes_attrs ;
226 struct tracepoint ;
226 struct ftrace_event_call ;
226 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
217 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
27 union __anonunion_ldv_16475_156 { const struct iovec *iov; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion_ldv_16475_156 ldv_16475; unsigned long nr_segs; } ;
11 typedef unsigned short __kernel_sa_family_t;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
54 struct poll_table_struct ;
55 struct pipe_inode_info ;
56 struct net ;
73 struct fasync_struct ;
120 struct kiocb ;
57 struct mem_cgroup ;
368 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ;
48 struct kmem_cache_order_objects { unsigned long x; } ;
58 struct memcg_cache_params ;
58 struct kmem_cache_node ;
58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; struct kset *memcg_kset; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ;
501 struct __anonstruct_ldv_17129_158 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ;
501 struct __anonstruct_ldv_17135_159 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; } ;
501 union __anonunion_ldv_17136_157 { struct __anonstruct_ldv_17129_158 ldv_17129; struct __anonstruct_ldv_17135_159 ldv_17135; } ;
501 struct memcg_cache_params { bool is_root_cache; union __anonunion_ldv_17136_157 ldv_17136; } ;
62 struct exception_table_entry { int insn; int fixup; } ;
140 struct sk_buff ;
155 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
67 struct path ;
68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ;
35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
196 struct pinctrl ;
197 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
42 struct dma_map_ops ;
42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
11 struct pdev_archdata { } ;
14 struct device_private ;
15 struct device_driver ;
16 struct driver_private ;
17 struct class ;
18 struct subsys_private ;
19 struct bus_type ;
20 struct device_node ;
21 struct iommu_ops ;
22 struct iommu_group ;
60 struct device_attribute ;
60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
138 struct device_type ;
195 struct of_device_id ;
195 struct acpi_device_id ;
195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
321 struct class_attribute ;
321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
640 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
649 struct acpi_device ;
650 struct acpi_dev_node { struct acpi_device *companion; } ;
656 struct dma_coherent_mem ;
656 struct cma ;
656 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
803 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ;
26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
71 struct file_ra_state ;
72 struct user_struct ;
73 struct writeback_control ;
188 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; unsigned long max_pgoff; pte_t *pte; } ;
221 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;
2112 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
17 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
38 typedef s32 dma_cookie_t;
1153 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ;
132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ;
163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
474 struct dma_attrs { unsigned long flags[1U]; } ;
70 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
77 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
15 typedef u64 netdev_features_t;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
218 struct plist_head { struct list_head node_list; } ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
24 struct __anonstruct_sigset_t_162 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_162 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_164 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_165 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_166 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_167 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__sigfault_168 { void *_addr; short _addr_lsb; } ;
11 struct __anonstruct__sigpoll_169 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_170 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_163 { int _pad[28U]; struct __anonstruct__kill_164 _kill; struct __anonstruct__timer_165 _timer; struct __anonstruct__rt_166 _rt; struct __anonstruct__sigchld_167 _sigchld; struct __anonstruct__sigfault_168 _sigfault; struct __anonstruct__sigpoll_169 _sigpoll; struct __anonstruct__sigsys_170 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_163 _sifields; } ;
109 typedef struct siginfo siginfo_t;
21 struct sigpending { struct list_head list; sigset_t signal; } ;
246 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
260 struct k_sigaction { struct sigaction sa; } ;
459 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
466 struct pid_namespace ;
466 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
174 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
46 struct seccomp_filter ;
47 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
39 struct assoc_array_ptr ;
39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
123 union __anonunion_ldv_24417_173 { struct list_head graveyard_link; struct rb_node serial_node; } ;
123 struct key_user ;
123 union __anonunion_ldv_24425_174 { time_t expiry; time_t revoked_at; } ;
123 struct __anonstruct_ldv_24438_176 { struct key_type *type; char *description; } ;
123 union __anonunion_ldv_24439_175 { struct keyring_index_key index_key; struct __anonstruct_ldv_24438_176 ldv_24438; } ;
123 union __anonunion_type_data_177 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ;
123 union __anonunion_payload_179 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ;
123 union __anonunion_ldv_24454_178 { union __anonunion_payload_179 payload; struct assoc_array keys; } ;
123 struct key { atomic_t usage; key_serial_t serial; union __anonunion_ldv_24417_173 ldv_24417; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion_ldv_24425_174 ldv_24425; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion_ldv_24439_175 ldv_24439; union __anonunion_type_data_177 type_data; union __anonunion_ldv_24454_178 ldv_24454; } ;
356 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
125 struct futex_pi_state ;
126 struct robust_list_head ;
127 struct bio_list ;
128 struct fs_struct ;
129 struct perf_event_context ;
130 struct blk_plug ;
180 struct cfs_rq ;
181 struct task_group ;
426 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
465 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
473 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
480 struct cputime { cputime_t utime; cputime_t stime; } ;
492 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
512 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ;
554 struct autogroup ;
555 struct tty_struct ;
555 struct taskstats ;
555 struct tty_audit_buf ;
555 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
735 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
778 struct backing_dev_info ;
779 struct reclaim_state ;
780 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
794 struct task_delay_info { spinlock_t lock; unsigned int flags; struct timespec blkio_start; struct timespec blkio_end; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; struct timespec freepages_start; struct timespec freepages_end; u64 freepages_delay; u32 freepages_count; } ;
1026 struct io_context ;
1060 struct uts_namespace ;
1061 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1069 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ;
1081 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1116 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1148 struct rt_rq ;
1148 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1164 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1222 struct memcg_batch_info { int do_batch; struct mem_cgroup *memcg; unsigned long nr_pages; unsigned long memsw_nr_pages; } ;
1643 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ;
1650 struct sched_class ;
1650 struct files_struct ;
1650 struct css_set ;
1650 struct compat_robust_list_head ;
1650 struct numa_group ;
1650 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char no_new_privs; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct task_struct *pi_top_task; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults_memory; unsigned long total_numa_faults; unsigned long *numa_faults_buffer_memory; unsigned long *numa_faults_cpu; unsigned long *numa_faults_buffer_cpu; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; struct memcg_batch_info memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ;
18 struct nf_conntrack { atomic_t use; } ;
137 struct nf_bridge_info { atomic_t use; unsigned int mask; struct net_device *physindev; struct net_device *physoutdev; unsigned long data[4U]; } ;
147 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
360 typedef unsigned int sk_buff_data_t;
361 struct __anonstruct_ldv_26193_184 { u32 stamp_us; u32 stamp_jiffies; } ;
361 union __anonunion_ldv_26194_183 { u64 v64; struct __anonstruct_ldv_26193_184 ldv_26193; } ;
361 struct skb_mstamp { union __anonunion_ldv_26194_183 ldv_26194; } ;
414 union __anonunion_ldv_26213_185 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
414 struct sec_path ;
414 struct __anonstruct_ldv_26229_187 { __u16 csum_start; __u16 csum_offset; } ;
414 union __anonunion_ldv_26230_186 { __wsum csum; struct __anonstruct_ldv_26229_187 ldv_26229; } ;
414 union __anonunion_ldv_26269_188 { unsigned int napi_id; dma_cookie_t dma_cookie; } ;
414 union __anonunion_ldv_26275_189 { __u32 mark; __u32 dropcount; __u32 reserved_tailroom; } ;
414 struct sk_buff { struct sk_buff *next; struct sk_buff *prev; union __anonunion_ldv_26213_185 ldv_26213; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; struct sec_path *sp; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; union __anonunion_ldv_26230_186 ldv_26230; __u32 priority; unsigned char ignore_df; unsigned char cloned; unsigned char ip_summed; unsigned char nohdr; unsigned char nfctinfo; unsigned char pkt_type; unsigned char fclone; unsigned char ipvs_property; unsigned char peeked; unsigned char nf_trace; __be16 protocol; void (*destructor)(struct sk_buff *); struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; __u16 tc_index; __u16 tc_verd; __u16 queue_mapping; unsigned char ndisc_nodetype; unsigned char pfmemalloc; unsigned char ooo_okay; unsigned char l4_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char head_frag; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; union __anonunion_ldv_26269_188 ldv_26269; __u32 secmark; union __anonunion_ldv_26275_189 ldv_26275; __be16 inner_protocol; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __u16 transport_header; __u16 network_header; __u16 mac_header; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
641 struct dst_entry ;
84 struct pm_qos_request { struct plist_node node; int pm_qos_class; struct delayed_work work; } ;
48 struct pm_qos_flags_request { struct list_head node; s32 flags; } ;
53 enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_FLAGS = 3 } ;
59 union __anonunion_data_190 { struct plist_node pnode; struct pm_qos_flags_request flr; } ;
59 struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union __anonunion_data_190 data; struct device *dev; } ;
68 enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ;
74 struct pm_qos_constraints { struct plist_head list; s32 target_value; s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; } ;
88 struct pm_qos_flags { struct list_head list; s32 effective_flags; } ;
93 struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; } ;
54 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
43 struct __anonstruct_sync_serial_settings_191 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_191 sync_serial_settings;
50 struct __anonstruct_te1_settings_192 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_192 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_193 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_193 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_194 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_194 fr_proto;
69 struct __anonstruct_fr_proto_pvc_195 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_195 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_196 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_196 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_197 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_197 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
177 union __anonunion_ifs_ifsu_198 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
177 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_198 ifs_ifsu; } ;
195 union __anonunion_ifr_ifrn_199 { char ifrn_name[16U]; } ;
195 union __anonunion_ifr_ifru_200 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
195 struct ifreq { union __anonunion_ifr_ifrn_199 ifr_ifrn; union __anonunion_ifr_ifru_200 ifr_ifru; } ;
91 struct hlist_bl_node ;
91 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct_ldv_27896_203 { spinlock_t lock; unsigned int count; } ;
114 union __anonunion_ldv_27897_202 { struct __anonstruct_ldv_27896_203 ldv_27896; } ;
114 struct lockref { union __anonunion_ldv_27897_202 ldv_27897; } ;
49 struct nameidata ;
50 struct vfsmount ;
51 struct __anonstruct_ldv_27920_205 { u32 hash; u32 len; } ;
51 union __anonunion_ldv_27922_204 { struct __anonstruct_ldv_27920_205 ldv_27920; u64 hash_len; } ;
51 struct qstr { union __anonunion_ldv_27922_204 ldv_27922; const unsigned char *name; } ;
90 struct dentry_operations ;
90 union __anonunion_d_u_206 { struct list_head d_child; struct callback_head d_rcu; } ;
90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_206 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ;
142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ;
477 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
27 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ;
30 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ;
58 struct __anonstruct_ldv_28283_208 { struct radix_tree_node *parent; void *private_data; } ;
58 union __anonunion_ldv_28285_207 { struct __anonstruct_ldv_28283_208 ldv_28283; struct callback_head callback_head; } ;
58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion_ldv_28285_207 ldv_28285; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
30 struct block_device ;
31 struct cgroup_subsys_state ;
19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
59 struct export_operations ;
61 struct kstatfs ;
62 struct swap_info_struct ;
69 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
253 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ;
76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ;
151 typedef struct fs_qfilestat fs_qfilestat_t;
152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ;
166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ;
196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ;
212 struct dquot ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_209 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_209 kprojid_t;
119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ;
152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
60 typedef long long qsize_t;
61 union __anonunion_ldv_28810_210 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
61 struct kqid { union __anonunion_ldv_28810_210 ldv_28810; enum quota_type type; } ;
178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ;
200 struct quota_format_type ;
201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ;
264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ;
302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ;
316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
334 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
380 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ;
323 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t ); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
382 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
405 struct request_queue ;
406 struct hd_struct ;
406 struct gendisk ;
406 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
478 struct posix_acl ;
479 struct inode_operations ;
479 union __anonunion_ldv_29224_213 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
479 union __anonunion_ldv_29244_214 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
479 struct file_lock ;
479 struct cdev ;
479 union __anonunion_ldv_29261_215 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ;
479 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion_ldv_29224_213 ldv_29224; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion_ldv_29244_214 ldv_29244; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion_ldv_29261_215 ldv_29261; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ;
715 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
723 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
746 union __anonunion_f_u_216 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
746 struct file { union __anonunion_f_u_216 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
836 typedef struct files_struct *fl_owner_t;
837 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
842 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ;
860 struct nlm_lockowner ;
861 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct __anonstruct_afs_218 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_217 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_218 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_217 fl_u; } ;
963 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1157 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ;
1173 struct super_operations ;
1173 struct xattr_handler ;
1173 struct mtd_info ;
1173 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ;
1403 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1441 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1446 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ;
1488 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1535 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ;
1749 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
39 typedef s32 compat_long_t;
44 typedef u32 compat_uptr_t;
276 struct compat_robust_list { compat_uptr_t next; } ;
280 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
703 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
34 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
125 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char reserved1[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
187 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
211 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
233 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
259 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
288 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
305 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
404 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
441 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
469 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
568 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
600 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
642 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
675 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
691 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
711 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
722 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
741 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
767 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
933 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
941 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1017 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
44 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
79 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); } ;
235 struct prot_inuse ;
236 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
145 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[8U]; } ;
106 struct linux_mib { unsigned long mibs[103U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct proc_dir_entry ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { int nqueues; struct list_head lru_list; spinlock_t lru_lock; struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; } ;
180 struct tcpm_hash_bucket ;
181 struct ipv4_devconf ;
182 struct fib_rules_ops ;
183 struct fib_table ;
184 struct local_ports { seqlock_t lock; int range[2U]; } ;
22 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
27 struct inet_peer_base ;
27 struct xt_table ;
27 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; struct sock *fibnl; struct sock **icmp_sk; struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; unsigned int tcp_metrics_hash_log; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; atomic_t rt_genid; } ;
102 struct neighbour ;
102 struct dst_ops { unsigned short family; __be16 protocol; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int icmpv6_time; int anycast_src_echo_reply; int fwmark_reflect; } ;
35 struct ipv6_devconf ;
35 struct rt6_info ;
35 struct rt6_statistics ;
35 struct fib6_table ;
35 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct dst_ops ip6_dst_ops; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t rt_genid; } ;
80 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
86 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; u16 max_dsize; } ;
21 struct sctp_mib ;
22 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
133 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
324 struct nlattr ;
337 struct nf_logger ;
338 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; } ;
17 struct ebt_table ;
18 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; bool ulog_warn_deprecated; bool ebt_ulog_warn_deprecated; } ;
24 struct hlist_nulls_node ;
24 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
20 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ;
24 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
29 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
43 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
48 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
53 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ;
64 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; struct hlist_nulls_head tmpl; } ;
72 struct ip_conntrack_stat ;
72 struct nf_ct_event_notifier ;
72 struct nf_exp_event_notifier ;
72 struct netns_ct { atomic_t count; unsigned int expect_count; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; char *slabname; unsigned int sysctl_log_invalid; unsigned int sysctl_events_retry_timeout; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; unsigned int htable_size; seqcount_t generation; struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; struct hlist_head *nat_bysource; unsigned int nat_htable_size; } ;
111 struct nft_af_info ;
112 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; u8 gencursor; u8 genctr; } ;
499 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
450 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
663 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; } ;
17 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[6U]; struct xfrm_policy_hash policy_bydst[6U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
74 struct net_generic ;
75 struct netns_ipvs ;
76 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; unsigned int proc_inum; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
400 struct dsa_chip_data { struct device *mii_bus; int sw_addr; char *port_names[12U]; s8 *rtable; } ;
46 struct dsa_platform_data { struct device *netdev; int nr_chips; struct dsa_chip_data *chip; } ;
61 struct dsa_switch ;
61 struct dsa_switch_tree { struct dsa_platform_data *pd; struct net_device *master_netdev; __be16 tag_protocol; s8 cpu_switch; s8 cpu_port; int link_poll_needed; struct work_struct link_poll_work; struct timer_list link_poll_timer; struct dsa_switch *ds[4U]; } ;
94 struct dsa_switch_driver ;
94 struct mii_bus ;
94 struct dsa_switch { struct dsa_switch_tree *dst; int index; struct dsa_chip_data *pd; struct dsa_switch_driver *drv; struct mii_bus *master_mii_bus; u32 dsa_port_mask; u32 phys_port_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[12U]; } ;
146 struct dsa_switch_driver { struct list_head list; __be16 tag_protocol; int priv_size; char * (*probe)(struct mii_bus *, int); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*poll_link)(struct dsa_switch *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); } ;
205 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
80 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
100 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
123 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
138 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
167 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); u8 (*setapp)(struct net_device *, u8 , u16 , u8 ); u8 (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
102 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct percpu_ref ;
54 typedef void percpu_ref_func_t(struct percpu_ref *);
55 struct percpu_ref { atomic_t count; unsigned int *pcpu_count; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct callback_head rcu; } ;
205 struct cgroup_root ;
206 struct cgroup_subsys ;
207 struct cgroup ;
58 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ;
167 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct kernfs_node *populated_kn; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[12U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[12U]; struct list_head release_list; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; } ;
253 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
355 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[12U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[12U]; struct callback_head callback_head; } ;
438 struct cftype { char name[64U]; int private; umode_t mode; size_t max_write_len; unsigned int flags; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
609 struct cgroup_taskset ;
617 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*fork)(struct task_struct *); void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int disabled; int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *base_cftypes; } ;
919 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
3161 struct mnt_namespace ;
3162 struct ipc_namespace ;
3163 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; } ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
145 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
104 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
180 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
39 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; } ;
547 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; } ;
28 struct netpoll_info ;
29 struct phy_device ;
30 struct wireless_dev ;
61 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ;
106 typedef enum netdev_tx netdev_tx_t;
125 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
186 struct neigh_parms ;
207 struct netdev_hw_addr_list { struct list_head list; int count; } ;
212 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
241 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*rebuild)(struct sk_buff *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); } ;
292 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
336 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
384 typedef enum rx_handler_result rx_handler_result_t;
385 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
522 struct Qdisc ;
522 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long trans_timeout; unsigned long state; struct dql dql; } ;
591 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
603 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
615 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
666 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
689 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
702 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
713 struct netdev_tc_txq { u16 count; u16 offset; } ;
724 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
740 struct netdev_phys_port_id { unsigned char id[32U]; unsigned char id_len; } ;
753 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_setup_tc)(struct net_device *, u8 ); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct neighbour *); void (*ndo_neigh_destroy)(struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 ); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_port_id *); void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); } ;
1187 struct __anonstruct_adj_list_246 { struct list_head upper; struct list_head lower; } ;
1187 struct __anonstruct_all_adj_list_247 { struct list_head upper; struct list_head lower; } ;
1187 struct iw_handler_def ;
1187 struct iw_public_data ;
1187 struct forwarding_accel_ops ;
1187 struct vlan_info ;
1187 struct tipc_bearer ;
1187 struct in_device ;
1187 struct dn_dev ;
1187 struct inet6_dev ;
1187 struct cpu_rmap ;
1187 struct pcpu_lstats ;
1187 struct pcpu_sw_netstats ;
1187 struct pcpu_dstats ;
1187 struct pcpu_vstats ;
1187 union __anonunion_ldv_40376_248 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1187 struct garp_port ;
1187 struct mrp_port ;
1187 struct rtnl_link_ops ;
1187 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct __anonstruct_adj_list_246 adj_list; struct __anonstruct_all_adj_list_247 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; int ifindex; int iflink; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_t carrier_changes; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct forwarding_accel_ops *fwd_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; rx_handler_func_t *rx_handler; void *rx_handler_data; struct netdev_queue *ingress_queue; unsigned char broadcast[32U]; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; struct xps_dev_maps *xps_maps; struct cpu_rmap *rx_cpu_rmap; unsigned long trans_start; int watchdog_timeo; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct hlist_node index_hlist; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; struct net *nd_net; union __anonunion_ldv_40376_248 ldv_40376; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; int group; struct pm_qos_request pm_qos_req; } ;
1806 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
483 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ;
219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
479 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ;
628 struct mfd_cell ;
629 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ;
172 struct platform_driver { int (*probe)(struct platform_device *); int (*remove)(struct platform_device *); void (*shutdown)(struct platform_device *); int (*suspend)(struct platform_device *, pm_message_t ); int (*resume)(struct platform_device *); struct device_driver driver; const struct platform_device_id *id_table; bool prevent_deferred_probe; } ;
6 typedef unsigned char cc_t;
7 typedef unsigned int speed_t;
8 typedef unsigned int tcflag_t;
30 struct ktermios { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19U]; speed_t c_ispeed; speed_t c_ospeed; } ;
41 struct winsize { unsigned short ws_row; unsigned short ws_col; unsigned short ws_xpixel; unsigned short ws_ypixel; } ;
93 struct termiox { __u16 x_hflag; __u16 x_cflag; __u16 x_rflag[5U]; __u16 x_sflag; } ;
16 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ;
34 struct tty_driver ;
35 struct serial_icounter_struct ;
36 struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *, struct inode *, int); int (*install)(struct tty_driver *, struct tty_struct *); void (*remove)(struct tty_driver *, struct tty_struct *); int (*open)(struct tty_struct *, struct file *); void (*close)(struct tty_struct *, struct file *); void (*shutdown)(struct tty_struct *); void (*cleanup)(struct tty_struct *); int (*write)(struct tty_struct *, const unsigned char *, int); int (*put_char)(struct tty_struct *, unsigned char); void (*flush_chars)(struct tty_struct *); int (*write_room)(struct tty_struct *); int (*chars_in_buffer)(struct tty_struct *); int (*ioctl)(struct tty_struct *, unsigned int, unsigned long); long int (*compat_ioctl)(struct tty_struct *, unsigned int, unsigned long); void (*set_termios)(struct tty_struct *, struct ktermios *); void (*throttle)(struct tty_struct *); void (*unthrottle)(struct tty_struct *); void (*stop)(struct tty_struct *); void (*start)(struct tty_struct *); void (*hangup)(struct tty_struct *); int (*break_ctl)(struct tty_struct *, int); void (*flush_buffer)(struct tty_struct *); void (*set_ldisc)(struct tty_struct *); void (*wait_until_sent)(struct tty_struct *, int); void (*send_xchar)(struct tty_struct *, char); int (*tiocmget)(struct tty_struct *); int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); int (*resize)(struct tty_struct *, struct winsize *); int (*set_termiox)(struct tty_struct *, struct termiox *); int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); int (*poll_init)(struct tty_driver *, int, char *); int (*poll_get_char)(struct tty_driver *, int); void (*poll_put_char)(struct tty_driver *, int, char); const struct file_operations *proc_fops; } ;
289 struct tty_port ;
289 struct tty_driver { int magic; struct kref kref; struct cdev *cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; int major; int minor_start; unsigned int num; short type; short subtype; struct ktermios init_termios; unsigned long flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; const struct tty_operations *ops; struct list_head tty_drivers; } ;
356 struct ld_semaphore { long count; raw_spinlock_t wait_lock; unsigned int wait_readers; struct list_head read_wait; struct list_head write_wait; struct lockdep_map dep_map; } ;
170 struct tty_ldisc_ops { int magic; char *name; int num; int flags; int (*open)(struct tty_struct *); void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *); ssize_t (*chars_in_buffer)(struct tty_struct *); ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t ); ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t ); int (*ioctl)(struct tty_struct *, struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, unsigned long); void (*set_termios)(struct tty_struct *, struct ktermios *); unsigned int (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); int (*hangup)(struct tty_struct *); void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); void (*fasync)(struct tty_struct *, int); int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int); struct module *owner; int refcount; } ;
220 struct tty_ldisc { struct tty_ldisc_ops *ops; struct tty_struct *tty; } ;
225 union __anonunion_ldv_42946_255 { struct tty_buffer *next; struct llist_node free; } ;
225 struct tty_buffer { union __anonunion_ldv_42946_255 ldv_42946; int used; int size; int commit; int read; int flags; unsigned long data[0U]; } ;
59 struct tty_bufhead { struct tty_buffer *head; struct work_struct work; struct mutex lock; atomic_t priority; struct tty_buffer sentinel; struct llist_head free; atomic_t mem_used; int mem_limit; struct tty_buffer *tail; } ;
71 struct tty_port_operations { int (*carrier_raised)(struct tty_port *); void (*dtr_rts)(struct tty_port *, int); void (*shutdown)(struct tty_port *); int (*activate)(struct tty_port *, struct tty_struct *); void (*destruct)(struct tty_port *); } ;
197 struct tty_port { struct tty_bufhead buf; struct tty_struct *tty; struct tty_struct *itty; const struct tty_port_operations *ops; spinlock_t lock; int blocked_open; int count; wait_queue_head_t open_wait; wait_queue_head_t close_wait; wait_queue_head_t delta_msr_wait; unsigned long flags; unsigned char console; unsigned char low_latency; struct mutex mutex; struct mutex buf_mutex; unsigned char *xmit_buf; unsigned int close_delay; unsigned int closing_wait; int drain_delay; struct kref kref; } ;
222 struct tty_struct { int magic; struct kref kref; struct device *dev; struct tty_driver *driver; const struct tty_operations *ops; int index; struct ld_semaphore ldisc_sem; struct tty_ldisc *ldisc; struct mutex atomic_write_lock; struct mutex legacy_mutex; struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; spinlock_t ctrl_lock; struct ktermios termios; struct ktermios termios_locked; struct termiox *termiox; char name[64U]; struct pid *pgrp; struct pid *session; unsigned long flags; int count; struct winsize winsize; unsigned char stopped; unsigned char hw_stopped; unsigned char flow_stopped; unsigned char packet; unsigned char ctrl_status; unsigned int receive_room; int flow_change; struct tty_struct *link; struct fasync_struct *fasync; int alt_speed; wait_queue_head_t write_wait; wait_queue_head_t read_wait; struct work_struct hangup_work; void *disc_data; void *driver_data; struct list_head tty_files; unsigned char closing; unsigned char *write_buf; int write_cnt; struct work_struct SAK_work; struct tty_port *port; } ;
162 struct if_irda_qos { unsigned long baudrate; unsigned short data_size; unsigned short window_size; unsigned short min_turn_time; unsigned short max_turn_time; unsigned char add_bofs; unsigned char link_disc; } ;
188 struct if_irda_line { __u8 dtr; __u8 rts; } ;
194 union __anonunion_ifr_ifrn_259 { char ifrn_name[16U]; } ;
194 union __anonunion_ifr_ifru_260 { struct if_irda_line ifru_line; struct if_irda_qos ifru_qos; unsigned short ifru_flags; unsigned int ifru_receiving; unsigned int ifru_mode; unsigned int ifru_dongle; } ;
194 struct if_irda_req { union __anonunion_ifr_ifrn_259 ifr_ifrn; union __anonunion_ifr_ifru_260 ifr_ifru; } ;
225 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ;
92 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ;
26 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ;
40 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ;
50 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ;
76 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; void *xstats; int xstats_len; struct tc_stats tc_stats; } ;
68 struct nla_policy { u16 type; u16 len; } ;
25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); } ;
144 struct Qdisc_ops ;
145 struct qdisc_walker ;
146 struct tcf_walker ;
33 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ;
44 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; int (*reshape_fail)(struct sk_buff *, struct Qdisc *); void *u32_node; struct Qdisc *__parent; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct Qdisc *next_sched; struct sk_buff *gso_skb; unsigned long state; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; unsigned int __state; struct gnet_stats_queue qstats; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ;
128 struct tcf_proto ;
128 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;
156 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); unsigned int (*drop)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ;
180 struct tcf_result { unsigned long class; u32 classid; } ;
186 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); void (*destroy)(struct tcf_proto *); unsigned long int (*get)(struct tcf_proto *, u32 ); void (*put)(struct tcf_proto *, unsigned long); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ;
212 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; } ;
722 struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ;
34 typedef __u32 magic_t;
64 struct __anonstruct_qos_value_t_267 { __u32 value; __u16 bits; } ;
64 typedef struct __anonstruct_qos_value_t_267 qos_value_t;
65 struct qos_info { magic_t magic; qos_value_t baud_rate; qos_value_t max_turn_time; qos_value_t data_size; qos_value_t window_size; qos_value_t additional_bofs; qos_value_t min_turn_time; qos_value_t link_disc_time; qos_value_t power; } ;
93 struct irlap_cb ;
133 struct irda_skb_cb { unsigned int default_qdisc_pad; magic_t magic; __u32 next_speed; __u16 mtt; __u16 xbofs; __u16 next_xbofs; void *context; void (*destructor)(struct sk_buff *); __u16 xbofs_delay; __u8 line; } ;
170 struct __anonstruct_chipio_t_269 { int cfg_base; int sir_base; int fir_base; int mem_base; int sir_ext; int fir_ext; int irq; int irq2; int dma; int dma2; int fifo_size; int irqflags; int direction; int enabled; int suspended; __u32 speed; __u32 new_speed; int dongle_id; } ;
170 typedef struct __anonstruct_chipio_t_269 chipio_t;
185 struct __anonstruct_iobuff_t_270 { int state; int in_frame; __u8 *head; __u8 *data; int len; int truesize; __u16 fcs; struct sk_buff *skb; } ;
185 typedef struct __anonstruct_iobuff_t_270 iobuff_t;
56 struct ali_chip { char *name; int cfg[2U]; unsigned char entr1; unsigned char entr2; unsigned char cid_index; unsigned char cid_value; int (*probe)(struct ali_chip *, chipio_t *); int (*init)(struct ali_chip *, chipio_t *); } ;
144 typedef struct ali_chip ali_chip_t;
145 struct st_fifo_entry { int status; int len; } ;
165 struct st_fifo { struct st_fifo_entry entries[7U]; int pending_bytes; int head; int tail; int len; } ;
173 struct frame_cb { void *start; int len; } ;
178 struct tx_fifo { struct frame_cb queue[7U]; int ptr; int len; int free; void *tail; } ;
186 struct ali_ircc_cb { struct st_fifo st_fifo; struct tx_fifo tx_fifo; struct net_device *netdev; struct irlap_cb *irlap; struct qos_info qos; chipio_t io; iobuff_t tx_buff; iobuff_t rx_buff; dma_addr_t tx_buff_dma; dma_addr_t rx_buff_dma; __u8 ier; __u8 InterruptID; __u8 BusStatus; __u8 LineStatus; unsigned char rcvFramesOverflow; struct timeval stamp; struct timeval now; spinlock_t lock; __u32 new_speed; int index; unsigned char fifo_opti_buf; } ;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long exp, long c);
33 extern struct module __this_module;
358 extern struct pv_irq_ops pv_irq_ops;
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
250 int test_and_clear_bit(long nr, volatile unsigned long *addr);
139 int printk(const char *, ...);
388 int sprintf(char *, const char *, ...);
88 void __bad_percpu_size();
71 void warn_slowpath_null(const char *, const int);
55 void * memset(void *, int, size_t );
802 unsigned long int arch_local_save_flags();
155 int arch_irqs_disabled_flags(unsigned long flags);
8 extern int __preempt_count;
20 int preempt_count();
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
39 void _raw_spin_unlock(raw_spinlock_t *);
43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
290 raw_spinlock_t * spinlock_check(spinlock_t *lock);
301 void spin_lock(spinlock_t *lock);
365 void ldv_spin_lock_59(spinlock_t *lock);
349 void spin_unlock(spinlock_t *lock);
409 void ldv_spin_unlock_60(spinlock_t *lock);
452 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
488 void ldv_spin_unlock_irqrestore_62(spinlock_t *lock, unsigned long flags);
5 void __ldv_spin_lock(spinlock_t *);
8 void ldv___ldv_spin_lock_7(spinlock_t *ldv_func_arg1);
12 void ldv___ldv_spin_lock_15(spinlock_t *ldv_func_arg1);
16 void ldv___ldv_spin_lock_17(spinlock_t *ldv_func_arg1);
20 void ldv___ldv_spin_lock_19(spinlock_t *ldv_func_arg1);
24 void ldv___ldv_spin_lock_22(spinlock_t *ldv_func_arg1);
28 void ldv___ldv_spin_lock_49(spinlock_t *ldv_func_arg1);
32 void ldv___ldv_spin_lock_53(spinlock_t *ldv_func_arg1);
36 void ldv___ldv_spin_lock_61(spinlock_t *ldv_func_arg1);
40 void ldv___ldv_spin_lock_63(spinlock_t *ldv_func_arg1);
44 void ldv___ldv_spin_lock_66(spinlock_t *ldv_func_arg1);
48 void ldv___ldv_spin_lock_69(spinlock_t *ldv_func_arg1);
52 void ldv___ldv_spin_lock_71(spinlock_t *ldv_func_arg1);
76 void ldv_spin_lock_addr_list_lock_of_net_device();
92 void ldv_spin_lock_dma_spin_lock();
108 void ldv_spin_lock_lock();
116 void ldv_spin_lock_lock_of_NOT_ARG_SIGN();
124 void ldv_spin_lock_lock_of_ali_ircc_cb();
125 void ldv_spin_unlock_lock_of_ali_ircc_cb();
140 void ldv_spin_lock_node_size_lock_of_pglist_data();
156 void ldv_spin_lock_siglock_of_sighand_struct();
156 void do_gettimeofday(struct timeval *);
138 extern struct resource ioport_resource;
192 struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int);
203 void __release_region(struct resource *, resource_size_t , resource_size_t );
77 extern volatile unsigned long jiffies;
309 void outb(unsigned char value, int port);
309 unsigned char inb(int port);
223 int net_ratelimit();
837 void * dev_get_drvdata(const struct device *dev);
76 int is_device_dma_capable(struct device *dev);
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
56 void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t );
27 extern struct device x86_dma_fallback_dev;
30 extern struct dma_map_ops *dma_ops;
32 struct dma_map_ops * get_dma_ops(struct device *dev);
103 unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp);
115 gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp);
131 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs);
160 void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs);
176 void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
211 bool capable(int);
717 void consume_skb(struct sk_buff *);
1565 unsigned char * skb_put(struct sk_buff *, unsigned int);
1666 void skb_reserve(struct sk_buff *skb, int len);
1783 void skb_reset_mac_header(struct sk_buff *skb);
2016 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );
2032 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);
2046 struct sk_buff * dev_alloc_skb(unsigned int length);
2595 void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len);
2609 void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len);
8 void __udelay(unsigned long);
10 void __const_udelay(unsigned long);
123 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
128 int request_irq(unsigned int irq___0, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev);
142 void free_irq(unsigned int, void *);
1621 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);
1687 void * netdev_priv(const struct net_device *dev);
1975 void free_netdev(struct net_device *);
2140 void __netif_schedule(struct Qdisc *);
2156 void netif_tx_start_queue(struct netdev_queue *dev_queue);
2167 void netif_start_queue(struct net_device *dev);
2182 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2195 void netif_wake_queue(struct net_device *dev);
2210 void netif_tx_stop_queue(struct netdev_queue *dev_queue);
2226 void netif_stop_queue(struct net_device *dev);
2596 int netif_rx(struct sk_buff *);
2998 int register_netdev(struct net_device *);
2999 void unregister_netdev(struct net_device *);
192 int __platform_driver_register(struct platform_driver *, struct module *);
194 void platform_driver_unregister(struct platform_driver *);
202 void * platform_get_drvdata(const struct platform_device *pdev);
174 void disable_dma(unsigned int dmanr);
305 int request_dma(unsigned int, const char *);
306 void free_dma(unsigned int);
402 bool qdisc_all_tx_empty(const struct net_device *dev);
59 extern unsigned int irda_debug;
83 void irda_init_max_qos_capabilies(struct qos_info *);
88 void irda_qos_bits_to_value(struct qos_info *);
214 struct irlap_cb * irlap_open(struct net_device *, struct qos_info *, const char *);
216 void irlap_close(struct irlap_cb *);
219 void irda_device_set_media_busy(struct net_device *, int);
224 int irda_device_txqueue_empty(const struct net_device *dev);
229 struct net_device * alloc_irdadev(int);
231 void irda_setup_dma(int, dma_addr_t , int, int);
239 __u16 irda_get_mtt(const struct sk_buff *skb);
252 __u32 irda_get_next_speed(const struct sk_buff *skb);
54 int async_wrap_skb(struct sk_buff *, __u8 *, int);
55 void async_unwrap_char(struct net_device *, struct net_device_stats *, iobuff_t *, __u8 );
223 void switch_bank(int iobase, int bank);
180 int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
181 int ali_ircc_resume(struct platform_device *dev);
183 struct platform_driver ali_ircc_driver = { 0, 0, 0, &ali_ircc_suspend, &ali_ircc_resume, { "ali-ircc", 0, &__this_module, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, 0 };
193 int qos_mtt_bits = 7;
196 unsigned int io[4U] = { 4294967295U, 4294967295U, 4294967295U, 4294967295U };
197 unsigned int irq[4U] = { 0U, 0U, 0U, 0U };
198 unsigned int dma[4U] = { 0U, 0U, 0U, 0U };
200 int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
201 int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
202 int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
207 ali_chip_t chips[4U] = { { (char *)"M1543", { 1008, 880 }, 81U, 35U, 32U, 67U, &ali_ircc_probe_53, &ali_ircc_init_43 }, { (char *)"M1535", { 1008, 880 }, 81U, 35U, 32U, 83U, &ali_ircc_probe_53, &ali_ircc_init_53 }, { (char *)"M1563", { 1008, 880 }, 81U, 35U, 32U, 99U, &ali_ircc_probe_53, &ali_ircc_init_53 }, { (char *)0, { 0, 0 }, 0U, 0U, 0U, 0U, 0, 0 } };
216 struct ali_ircc_cb *dev_self[4U] = { (struct ali_ircc_cb *)0, (struct ali_ircc_cb *)0, (struct ali_ircc_cb *)0, (struct ali_ircc_cb *)0 };
219 char *dongle_types[4U] = { (char *)"TFDS6000", (char *)"HP HSDL-3600", (char *)"HP HSDL-1100", (char *)"No dongle connected" };
227 int ali_ircc_open(int i, chipio_t *info);
229 int ali_ircc_close(struct ali_ircc_cb *self);
231 int ali_ircc_setup(chipio_t *info);
232 int ali_ircc_is_receiving(struct ali_ircc_cb *self);
233 int ali_ircc_net_open(struct net_device *dev);
234 int ali_ircc_net_close(struct net_device *dev);
235 int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
236 void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
239 netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
241 irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
242 void ali_ircc_sir_receive(struct ali_ircc_cb *self);
243 void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
244 int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
245 void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
248 netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
250 void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud);
251 irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
252 int ali_ircc_dma_receive(struct ali_ircc_cb *self);
253 int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
254 int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
255 void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
258 int ali_ircc_read_dongle_id(int i, chipio_t *info);
259 void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
262 void SIR2FIR(int iobase);
263 void FIR2SIR(int iobase);
264 void SetCOMInterrupts(struct ali_ircc_cb *self, unsigned char enable);
272 int ali_ircc_init();
373 void ali_ircc_cleanup();
389 const struct net_device_ops ali_ircc_sir_ops = { 0, 0, &ali_ircc_net_open, &ali_ircc_net_close, &ali_ircc_sir_hard_xmit, 0, 0, 0, 0, 0, &ali_ircc_net_ioctl, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
396 const struct net_device_ops ali_ircc_fir_ops = { 0, 0, &ali_ircc_net_open, &ali_ircc_net_close, &ali_ircc_fir_hard_xmit, 0, 0, 0, 0, 0, &ali_ircc_net_ioctl, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
794 irqreturn_t ali_ircc_interrupt(int irq___0, void *dev_id);
2413 void ldv_check_final_state();
2416 void ldv_check_return_value(int);
2422 void ldv_initialize();
2425 void ldv_handler_precall();
2428 int nondet_int();
2431 int LDV_IN_INTERRUPT = 0;
2434 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
25 int ldv_undef_int();
59 void __builtin_trap();
8 int ldv_spin_NOT_ARG_SIGN = 0;
11 void ldv_spin_lock_NOT_ARG_SIGN();
20 void ldv_spin_unlock_NOT_ARG_SIGN();
29 int ldv_spin_trylock_NOT_ARG_SIGN();
55 void ldv_spin_unlock_wait_NOT_ARG_SIGN();
62 int ldv_spin_is_locked_NOT_ARG_SIGN();
83 int ldv_spin_can_lock_NOT_ARG_SIGN();
90 int ldv_spin_is_contended_NOT_ARG_SIGN();
111 int ldv_atomic_dec_and_lock_NOT_ARG_SIGN();
133 int ldv_spin__xmit_lock_of_netdev_queue = 0;
136 void ldv_spin_lock__xmit_lock_of_netdev_queue();
145 void ldv_spin_unlock__xmit_lock_of_netdev_queue();
154 int ldv_spin_trylock__xmit_lock_of_netdev_queue();
180 void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue();
187 int ldv_spin_is_locked__xmit_lock_of_netdev_queue();
208 int ldv_spin_can_lock__xmit_lock_of_netdev_queue();
215 int ldv_spin_is_contended__xmit_lock_of_netdev_queue();
236 int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue();
258 int ldv_spin_addr_list_lock_of_net_device = 0;
270 void ldv_spin_unlock_addr_list_lock_of_net_device();
279 int ldv_spin_trylock_addr_list_lock_of_net_device();
305 void ldv_spin_unlock_wait_addr_list_lock_of_net_device();
312 int ldv_spin_is_locked_addr_list_lock_of_net_device();
333 int ldv_spin_can_lock_addr_list_lock_of_net_device();
340 int ldv_spin_is_contended_addr_list_lock_of_net_device();
361 int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device();
383 int ldv_spin_alloc_lock_of_task_struct = 0;
386 void ldv_spin_lock_alloc_lock_of_task_struct();
395 void ldv_spin_unlock_alloc_lock_of_task_struct();
404 int ldv_spin_trylock_alloc_lock_of_task_struct();
430 void ldv_spin_unlock_wait_alloc_lock_of_task_struct();
437 int ldv_spin_is_locked_alloc_lock_of_task_struct();
458 int ldv_spin_can_lock_alloc_lock_of_task_struct();
465 int ldv_spin_is_contended_alloc_lock_of_task_struct();
486 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct();
508 int ldv_spin_dma_spin_lock = 0;
520 void ldv_spin_unlock_dma_spin_lock();
529 int ldv_spin_trylock_dma_spin_lock();
555 void ldv_spin_unlock_wait_dma_spin_lock();
562 int ldv_spin_is_locked_dma_spin_lock();
583 int ldv_spin_can_lock_dma_spin_lock();
590 int ldv_spin_is_contended_dma_spin_lock();
611 int ldv_atomic_dec_and_lock_dma_spin_lock();
633 int ldv_spin_i_lock_of_inode = 0;
636 void ldv_spin_lock_i_lock_of_inode();
645 void ldv_spin_unlock_i_lock_of_inode();
654 int ldv_spin_trylock_i_lock_of_inode();
680 void ldv_spin_unlock_wait_i_lock_of_inode();
687 int ldv_spin_is_locked_i_lock_of_inode();
708 int ldv_spin_can_lock_i_lock_of_inode();
715 int ldv_spin_is_contended_i_lock_of_inode();
736 int ldv_atomic_dec_and_lock_i_lock_of_inode();
758 int ldv_spin_lock = 0;
770 void ldv_spin_unlock_lock();
779 int ldv_spin_trylock_lock();
805 void ldv_spin_unlock_wait_lock();
812 int ldv_spin_is_locked_lock();
833 int ldv_spin_can_lock_lock();
840 int ldv_spin_is_contended_lock();
861 int ldv_atomic_dec_and_lock_lock();
883 int ldv_spin_lock_of_NOT_ARG_SIGN = 0;
895 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN();
904 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN();
930 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN();
937 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
958 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN();
965 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN();
986 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN();
1008 int ldv_spin_lock_of_ali_ircc_cb = 0;
1029 int ldv_spin_trylock_lock_of_ali_ircc_cb();
1055 void ldv_spin_unlock_wait_lock_of_ali_ircc_cb();
1062 int ldv_spin_is_locked_lock_of_ali_ircc_cb();
1083 int ldv_spin_can_lock_lock_of_ali_ircc_cb();
1090 int ldv_spin_is_contended_lock_of_ali_ircc_cb();
1111 int ldv_atomic_dec_and_lock_lock_of_ali_ircc_cb();
1133 int ldv_spin_lru_lock_of_netns_frags = 0;
1136 void ldv_spin_lock_lru_lock_of_netns_frags();
1145 void ldv_spin_unlock_lru_lock_of_netns_frags();
1154 int ldv_spin_trylock_lru_lock_of_netns_frags();
1180 void ldv_spin_unlock_wait_lru_lock_of_netns_frags();
1187 int ldv_spin_is_locked_lru_lock_of_netns_frags();
1208 int ldv_spin_can_lock_lru_lock_of_netns_frags();
1215 int ldv_spin_is_contended_lru_lock_of_netns_frags();
1236 int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags();
1258 int ldv_spin_node_size_lock_of_pglist_data = 0;
1270 void ldv_spin_unlock_node_size_lock_of_pglist_data();
1279 int ldv_spin_trylock_node_size_lock_of_pglist_data();
1305 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data();
1312 int ldv_spin_is_locked_node_size_lock_of_pglist_data();
1333 int ldv_spin_can_lock_node_size_lock_of_pglist_data();
1340 int ldv_spin_is_contended_node_size_lock_of_pglist_data();
1361 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data();
1383 int ldv_spin_ptl = 0;
1386 void ldv_spin_lock_ptl();
1395 void ldv_spin_unlock_ptl();
1404 int ldv_spin_trylock_ptl();
1430 void ldv_spin_unlock_wait_ptl();
1437 int ldv_spin_is_locked_ptl();
1458 int ldv_spin_can_lock_ptl();
1465 int ldv_spin_is_contended_ptl();
1486 int ldv_atomic_dec_and_lock_ptl();
1508 int ldv_spin_siglock_of_sighand_struct = 0;
1520 void ldv_spin_unlock_siglock_of_sighand_struct();
1529 int ldv_spin_trylock_siglock_of_sighand_struct();
1555 void ldv_spin_unlock_wait_siglock_of_sighand_struct();
1562 int ldv_spin_is_locked_siglock_of_sighand_struct();
1583 int ldv_spin_can_lock_siglock_of_sighand_struct();
1590 int ldv_spin_is_contended_siglock_of_sighand_struct();
1611 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct();
1633 int ldv_spin_tx_global_lock_of_net_device = 0;
1636 void ldv_spin_lock_tx_global_lock_of_net_device();
1645 void ldv_spin_unlock_tx_global_lock_of_net_device();
1654 int ldv_spin_trylock_tx_global_lock_of_net_device();
1680 void ldv_spin_unlock_wait_tx_global_lock_of_net_device();
1687 int ldv_spin_is_locked_tx_global_lock_of_net_device();
1708 int ldv_spin_can_lock_tx_global_lock_of_net_device();
1715 int ldv_spin_is_contended_tx_global_lock_of_net_device();
1736 int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device();
return ;
}
-entry_point
{
2436 struct platform_device *var_group1;
2437 pm_message_t var_ali_ircc_suspend_29_p1;
2438 struct net_device *var_group2;
2439 int res_ali_ircc_net_open_19;
2440 int res_ali_ircc_net_close_20;
2441 struct sk_buff *var_group3;
2442 struct ifreq *var_group4;
2443 int var_ali_ircc_net_ioctl_27_p2;
2444 int var_ali_ircc_interrupt_9_p0;
2445 void *var_ali_ircc_interrupt_9_p1;
2446 int ldv_s_ali_ircc_driver_platform_driver;
2447 int ldv_s_ali_ircc_sir_ops_net_device_ops;
2448 int ldv_s_ali_ircc_fir_ops_net_device_ops;
2449 int tmp;
2450 int tmp___0;
2451 int tmp___1;
2566 ldv_s_ali_ircc_driver_platform_driver = 0;
2568 ldv_s_ali_ircc_sir_ops_net_device_ops = 0;
2571 ldv_s_ali_ircc_fir_ops_net_device_ops = 0;
2544 LDV_IN_INTERRUPT = 1;
2553 -ldv_initialize()
{
1763 ldv_spin_NOT_ARG_SIGN = 1;
1765 ldv_spin__xmit_lock_of_netdev_queue = 1;
1767 ldv_spin_addr_list_lock_of_net_device = 1;
1769 ldv_spin_alloc_lock_of_task_struct = 1;
1771 ldv_spin_dma_spin_lock = 1;
1773 ldv_spin_i_lock_of_inode = 1;
1775 ldv_spin_lock = 1;
1777 ldv_spin_lock_of_NOT_ARG_SIGN = 1;
1779 ldv_spin_lock_of_ali_ircc_cb = 1;
1781 ldv_spin_lru_lock_of_netns_frags = 1;
1783 ldv_spin_node_size_lock_of_pglist_data = 1;
1785 ldv_spin_ptl = 1;
1787 ldv_spin_siglock_of_sighand_struct = 1;
1789 ldv_spin_tx_global_lock_of_net_device = 1;
1790 return ;;
}
2563 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2564 -ali_ircc_init()
{
274 ali_chip_t *chip;
275 chipio_t info;
276 int ret;
277 int cfg;
278 int cfg_base;
279 int reg;
280 int revision;
281 int i;
282 int tmp;
283 unsigned char tmp___0;
284 unsigned char tmp___1;
285 int tmp___2;
279 i = 0;
281 assume(irda_debug > 1U);
281 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_init") { /* Function call is skipped due to function is undefined */}
283 ret = __platform_driver_register(&ali_ircc_driver, &__this_module) { /* Function call is skipped due to function is undefined */}
284 assume(!(ret != 0));
290 ret = -19;
293 chip = (ali_chip_t *)(&chips);
293 goto ldv_45903;
293 unsigned long __CPAchecker_TMP_4 = (unsigned long)(chip->name);
293 assume(__CPAchecker_TMP_4 != 0UL);
295 goto ldv_45902;
294 ldv_45902:;
295 assume(irda_debug > 1U);
295 printk("\017%s(), Probing for %s ...\n", "ali_ircc_init", chip->name) { /* Function call is skipped due to function is undefined */}
298 cfg = 0;
298 goto ldv_45900;
298 assume(cfg <= 1);
300 goto ldv_45899;
299 ldv_45899:;
300 cfg_base = (chip->cfg)[cfg];
301 assume(!(cfg_base == 0));
304 memset((void *)(&info), 0, 72UL) { /* Function call is skipped due to function is undefined */}
305 info.cfg_base = cfg_base;
306 info.fir_base = (int)(io[i]);
307 info.dma = (int)(dma[i]);
308 info.irq = (int)(irq[i]);
312 int __CPAchecker_TMP_0 = (int)(chip->entr1);
312 -outb(__CPAchecker_TMP_0, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
313 int __CPAchecker_TMP_1 = (int)(chip->entr2);
313 -outb(__CPAchecker_TMP_1, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
316 -outb(7, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
317 -outb(5, cfg_base + 1)
{
309 Ignored inline assembler code
310 return ;;
}
320 int __CPAchecker_TMP_2 = (int)(chip->cid_index);
320 -outb(__CPAchecker_TMP_2, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
321 -inb(cfg_base + 1)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
321 reg = (int)tmp___0;
323 int __CPAchecker_TMP_3 = (int)(chip->cid_value);
323 assume(!(__CPAchecker_TMP_3 == reg));
352 assume(irda_debug > 1U);
352 printk("\017%s(), No %s chip at 0x%03x\n", "ali_ircc_init", chip->name, cfg_base) { /* Function call is skipped due to function is undefined */}
355 -outb(187, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
356 ldv_45898:;
298 cfg = cfg + 1;
299 ldv_45900:;
298 assume(cfg <= 1);
300 goto ldv_45899;
299 ldv_45899:;
300 cfg_base = (chip->cfg)[cfg];
301 assume(!(cfg_base == 0));
304 memset((void *)(&info), 0, 72UL) { /* Function call is skipped due to function is undefined */}
305 info.cfg_base = cfg_base;
306 info.fir_base = (int)(io[i]);
307 info.dma = (int)(dma[i]);
308 info.irq = (int)(irq[i]);
312 int __CPAchecker_TMP_0 = (int)(chip->entr1);
312 -outb(__CPAchecker_TMP_0, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
313 int __CPAchecker_TMP_1 = (int)(chip->entr2);
313 -outb(__CPAchecker_TMP_1, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
316 -outb(7, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
317 -outb(5, cfg_base + 1)
{
309 Ignored inline assembler code
310 return ;;
}
320 int __CPAchecker_TMP_2 = (int)(chip->cid_index);
320 -outb(__CPAchecker_TMP_2, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
321 -inb(cfg_base + 1)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
321 reg = (int)tmp___0;
323 int __CPAchecker_TMP_3 = (int)(chip->cid_value);
323 assume(__CPAchecker_TMP_3 == reg);
325 assume(irda_debug > 1U);
325 printk("\017%s(), Chip found at 0x%03x\n", "ali_ircc_init", cfg_base) { /* Function call is skipped due to function is undefined */}
327 -outb(31, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
328 -inb(cfg_base + 1)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
328 revision = (int)tmp___1;
329 assume(irda_debug > 1U);
329 printk("\017%s(), Found %s chip, revision=%d\n", "ali_ircc_init", chip->name, revision) { /* Function call is skipped due to function is undefined */}
337 assume(!((io[i]) <= 1999U));
343 assume(!((*(chip->probe)) == (&ali_ircc_sir_hard_xmit)));
343 assume(!((*(chip->probe)) == (&ali_ircc_probe_53)));
343 assume(!((*(chip->probe)) == (&ali_ircc_fir_hard_xmit)));
343 assume(!((*(chip->probe)) == (&ali_ircc_init_53)));
343 assume(!((*(chip->probe)) == (&ali_ircc_init_43)));
343 (*(chip->probe))(chip, &info);
346 -ali_ircc_open(i, &info)
{
411 struct net_device *dev;
412 struct ali_ircc_cb *self;
413 int dongle_id;
414 int err;
415 int tmp;
416 int tmp___0;
417 int tmp___1;
418 void *tmp___2;
419 struct lock_class_key __key;
420 int tmp___3;
421 struct resource *tmp___4;
422 void *tmp___5;
423 void *tmp___6;
424 int tmp___7;
425 int tmp___8;
426 int tmp___9;
427 int tmp___10;
428 int tmp___11;
416 assume(irda_debug > 1U);
416 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_open") { /* Function call is skipped due to function is undefined */}
418 assume(!(((unsigned int)i) > 3U));
425 -ali_ircc_setup(info)
{
681 unsigned char tmp;
682 int version;
683 int iobase;
684 unsigned char tmp___0;
685 int tmp___1;
686 unsigned char tmp___2;
687 int tmp___3;
683 iobase = info->fir_base;
685 assume(irda_debug > 1U);
685 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_setup") { /* Function call is skipped due to function is undefined */}
693 -SIR2FIR(iobase)
{
2339 assume(irda_debug != 0U);
2339 printk("\017%s(), ---------------- Start ----------------\n", "SIR2FIR") { /* Function call is skipped due to function is undefined */}
2344 -outb(40, iobase + 4)
{
309 Ignored inline assembler code
310 return ;;
}
2345 -outb(104, iobase + 4)
{
309 Ignored inline assembler code
310 return ;;
}
2346 -outb(136, iobase + 4)
{
309 Ignored inline assembler code
310 return ;;
}
2348 -outb(96, iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
2349 -outb(32, iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
2355 assume(irda_debug != 0U);
2355 printk("\017%s(), ----------------- End ------------------\n", "SIR2FIR") { /* Function call is skipped due to function is undefined */}
2356 return ;;
}
696 -outb(64, iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
699 -switch_bank(iobase, 35)
{
225 -outb((int)((unsigned char)bank), iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
226 return ;;
}
700 -inb(iobase)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
700 version = (int)tmp___0;
703 assume(!(version != 0));
711 -switch_bank(iobase, 33)
{
225 -outb((int)((unsigned char)bank), iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
226 return ;;
}
712 -outb(1, iobase + 1)
{
309 Ignored inline assembler code
310 return ;;
}
715 -outb(1, iobase + 2)
{
309 Ignored inline assembler code
310 return ;;
}
718 -switch_bank(iobase, 34)
{
225 -outb((int)((unsigned char)bank), iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
226 return ;;
}
719 -inb(iobase)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
719 -outb((int)(((unsigned int)tmp___2) | 64U), iobase)
{
309 Ignored inline assembler code
310 return ;;
}
724 -switch_bank(iobase, 32)
{
225 -outb((int)((unsigned char)bank), iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
226 return ;;
}
726 -inb(iobase + 4)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
727 tmp = ((unsigned int)tmp) & 223U;
728 tmp = ((unsigned int)tmp) | 128U;
729 tmp = ((unsigned int)tmp) & 191U;
730 -outb((int)tmp, iobase + 4)
{
309 Ignored inline assembler code
310 return ;;
}
733 -outb(0, iobase + 1)
{
309 Ignored inline assembler code
310 return ;;
}
737 -FIR2SIR(iobase)
{
2360 unsigned char val;
2362 assume(irda_debug != 0U);
2362 printk("\017%s(), ---------------- Start ----------------\n", "FIR2SIR") { /* Function call is skipped due to function is undefined */}
2367 -outb(32, iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
2368 -outb(0, iobase + 1)
{
309 Ignored inline assembler code
310 return ;;
}
2370 -outb(160, iobase + 7)
{
309 Ignored inline assembler code
310 return ;;
}
2371 -outb(0, iobase + 2)
{
309 Ignored inline assembler code
310 return ;;
}
2372 -outb(7, iobase + 2)
{
309 Ignored inline assembler code
310 return ;;
}
2374 -inb(iobase)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
2375 -inb(iobase + 5)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
2376 -inb(iobase + 6)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
2378 assume(irda_debug != 0U);
2378 printk("\017%s(), ----------------- End ------------------\n", "FIR2SIR") { /* Function call is skipped due to function is undefined */}
2379 return ;;
}
739 tmp___3 = net_ratelimit() { /* Function call is skipped due to function is undefined */}
739 assume(!(tmp___3 != 0));
746 assume(irda_debug > 1U);
746 printk("\017%s(), ----------------- End ------------------\n", "ali_ircc_setup") { /* Function call is skipped due to function is undefined */}
748 return 0;;
}
425 assume(!(tmp___0 == -1));
428 dev = alloc_irdadev(608) { /* Function call is skipped due to function is undefined */}
429 assume(!(((unsigned long)dev) == 0UL));
435 -netdev_priv((const struct net_device *)dev)
{
1689 return ((void *)dev) + 3264U;;
}
435 self = (struct ali_ircc_cb *)tmp___2;
436 self->netdev = dev;
437 -spinlock_check(&(self->lock))
{
292 return &(lock->ldv_6306.rlock);;
}
437 __raw_spin_lock_init(&(self->lock.ldv_6306.rlock), "&(&self->lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */}
440 dev_self[i] = self;
441 self->index = i;
444 self->io.cfg_base = info->cfg_base;
445 self->io.fir_base = info->fir_base;
446 self->io.sir_base = info->sir_base;
447 self->io.irq = info->irq;
448 self->io.fir_ext = 8;
449 self->io.dma = info->dma;
450 self->io.fifo_size = 16;
453 tmp___4 = __request_region(&ioport_resource, (resource_size_t )(self->io.fir_base), (resource_size_t )(self->io.fir_ext), "ali-ircc", 0) { /* Function call is skipped due to function is undefined */}
453 assume(!(((unsigned long)tmp___4) == 0UL));
462 irda_init_max_qos_capabilies(&(self->qos)) { /* Function call is skipped due to function is undefined */}
465 self->qos.baud_rate.bits = 510U;
468 self->qos.min_turn_time.bits = (__u16 )qos_mtt_bits;
470 irda_qos_bits_to_value(&(self->qos)) { /* Function call is skipped due to function is undefined */}
473 self->rx_buff.truesize = 14384;
474 self->tx_buff.truesize = 14384;
477 -dma_zalloc_coherent((struct device *)0, (size_t )(self->rx_buff.truesize), &(self->rx_buff_dma), 208U)
{
179 void *ret;
180 void *tmp;
179 -dma_alloc_attrs(dev, size, dma_handle, flag | 32768U, (struct dma_attrs *)0)
{
134 struct dma_map_ops *ops;
135 struct dma_map_ops *tmp;
136 void *memory;
137 int tmp___0;
138 gfp_t tmp___1;
134 -get_dma_ops(dev)
{
34 long tmp;
37 -__builtin_expect(((unsigned long)dev) == 0UL, 0L)
{
51 return exp;;
}
37 assume(tmp != 0L);
38 return dma_ops;;
}
134 ops = tmp;
137 gfp = gfp & 4294967288U;
142 assume(((unsigned long)dev) == 0UL);
143 dev = &x86_dma_fallback_dev;
145 -is_device_dma_capable(dev)
{
78 int __CPAchecker_TMP_0;
78 unsigned long __CPAchecker_TMP_1 = (unsigned long)(dev->dma_mask);
78 assume(__CPAchecker_TMP_1 != 0UL);
78 assume((*(dev->dma_mask)) != 0ULL);
78 __CPAchecker_TMP_0 = 1;
78 return __CPAchecker_TMP_0;;
}
145 assume(!(tmp___0 == 0));
148 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);
148 assume(!(__CPAchecker_TMP_0 == 0UL));
151 -dma_alloc_coherent_gfp_flags(dev, gfp)
{
117 unsigned long dma_mask;
118 unsigned long tmp;
117 -dma_alloc_coherent_mask(dev, gfp)
{
105 unsigned long dma_mask;
106 dma_mask = 0UL;
108 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->coherent_dma_mask);
108 dma_mask = __CPAchecker_TMP_0;
109 assume(dma_mask == 0UL);
110 unsigned long int __CPAchecker_TMP_1;
110 assume(!((((int)gfp) & 1) == 0));
110 __CPAchecker_TMP_1 = 16777215UL;
110 dma_mask = __CPAchecker_TMP_1;
112 return dma_mask;;
}
117 dma_mask = tmp;
119 assume(((unsigned long long)dma_mask) <= 16777215ULL);
120 gfp = gfp | 1U;
122 assume(((unsigned long long)dma_mask) <= 4294967295ULL);
122 assume((gfp & 1U) == 0U);
123 gfp = gfp | 4U;
125 return gfp;;
}
151 memory = (*(ops->alloc))(dev, size, dma_handle, tmp___1, attrs);
153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory) { /* Function call is skipped due to function is undefined */}
155 return memory;;
}
179 ret = tmp;
181 return ret;;
}
477 self->rx_buff.head = (__u8 *)tmp___5;
480 assume(!(((unsigned long)(self->rx_buff.head)) == 0UL));
485 -dma_zalloc_coherent((struct device *)0, (size_t )(self->tx_buff.truesize), &(self->tx_buff_dma), 208U)
{
179 void *ret;
180 void *tmp;
179 -dma_alloc_attrs(dev, size, dma_handle, flag | 32768U, (struct dma_attrs *)0)
{
134 struct dma_map_ops *ops;
135 struct dma_map_ops *tmp;
136 void *memory;
137 int tmp___0;
138 gfp_t tmp___1;
134 -get_dma_ops(dev)
{
34 long tmp;
37 -__builtin_expect(((unsigned long)dev) == 0UL, 0L)
{
51 return exp;;
}
37 assume(tmp != 0L);
38 return dma_ops;;
}
134 ops = tmp;
137 gfp = gfp & 4294967288U;
142 assume(((unsigned long)dev) == 0UL);
143 dev = &x86_dma_fallback_dev;
145 -is_device_dma_capable(dev)
{
78 int __CPAchecker_TMP_0;
78 unsigned long __CPAchecker_TMP_1 = (unsigned long)(dev->dma_mask);
78 assume(__CPAchecker_TMP_1 != 0UL);
78 assume((*(dev->dma_mask)) != 0ULL);
78 __CPAchecker_TMP_0 = 1;
78 return __CPAchecker_TMP_0;;
}
145 assume(!(tmp___0 == 0));
148 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);
148 assume(!(__CPAchecker_TMP_0 == 0UL));
151 -dma_alloc_coherent_gfp_flags(dev, gfp)
{
117 unsigned long dma_mask;
118 unsigned long tmp;
117 -dma_alloc_coherent_mask(dev, gfp)
{
105 unsigned long dma_mask;
106 dma_mask = 0UL;
108 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->coherent_dma_mask);
108 dma_mask = __CPAchecker_TMP_0;
109 assume(dma_mask == 0UL);
110 unsigned long int __CPAchecker_TMP_1;
110 assume(!((((int)gfp) & 1) == 0));
110 __CPAchecker_TMP_1 = 16777215UL;
110 dma_mask = __CPAchecker_TMP_1;
112 return dma_mask;;
}
117 dma_mask = tmp;
119 assume(((unsigned long long)dma_mask) <= 16777215ULL);
120 gfp = gfp | 1U;
122 assume(((unsigned long long)dma_mask) <= 4294967295ULL);
122 assume((gfp & 1U) == 0U);
123 gfp = gfp | 4U;
125 return gfp;;
}
151 memory = (*(ops->alloc))(dev, size, dma_handle, tmp___1, attrs);
153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory) { /* Function call is skipped due to function is undefined */}
155 return memory;;
}
179 ret = tmp;
181 return ret;;
}
485 self->tx_buff.head = (__u8 *)tmp___6;
488 assume(!(((unsigned long)(self->tx_buff.head)) == 0UL));
493 self->rx_buff.in_frame = 0;
494 self->rx_buff.state = 0;
495 self->tx_buff.data = self->tx_buff.head;
496 self->rx_buff.data = self->rx_buff.head;
499 tmp___8 = 0;
499 self->tx_fifo.free = tmp___8;
499 tmp___7 = tmp___8;
499 self->tx_fifo.ptr = tmp___7;
499 self->tx_fifo.len = tmp___7;
500 self->tx_fifo.tail = (void *)(self->tx_buff.head);
503 dev->netdev_ops = &ali_ircc_sir_ops;
505 err = register_netdev(dev) { /* Function call is skipped due to function is undefined */}
506 assume(!(err != 0));
510 tmp___10 = net_ratelimit() { /* Function call is skipped due to function is undefined */}
510 assume(!(tmp___10 != 0));
513 -ali_ircc_read_dongle_id(i, info)
{
760 int dongle_id;
761 int reg;
762 int cfg_base;
763 unsigned char tmp;
761 cfg_base = info->cfg_base;
763 assume(irda_debug > 1U);
763 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_read_dongle_id") { /* Function call is skipped due to function is undefined */}
766 -outb((int)((chips[i]).entr1), cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
767 -outb((int)((chips[i]).entr2), cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
770 -outb(7, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
771 -outb(5, cfg_base + 1)
{
309 Ignored inline assembler code
310 return ;;
}
774 -outb(240, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
775 -inb(cfg_base + 1)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
775 reg = (int)tmp;
776 dongle_id = ((reg >> 6) & 2) | ((reg >> 5) & 1);
777 assume(irda_debug > 1U);
777 printk("\017%s(), probing dongle_id=%d, dongle_types=%s\n", "ali_ircc_read_dongle_id", dongle_id, dongle_types[dongle_id]) { /* Function call is skipped due to function is undefined */}
781 -outb(187, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
783 assume(irda_debug > 1U);
783 printk("\017%s(), ----------------- End ------------------\n", "ali_ircc_read_dongle_id") { /* Function call is skipped due to function is undefined */}
785 return dongle_id;;
}
514 tmp___11 = net_ratelimit() { /* Function call is skipped due to function is undefined */}
514 assume(!(tmp___11 != 0));
517 self->io.dongle_id = dongle_id;
519 assume(irda_debug > 1U);
519 printk("\017%s(), ----------------- End -----------------\n", "ali_ircc_open") { /* Function call is skipped due to function is undefined */}
521 return 0;;
}
346 assume(tmp___2 == 0);
347 ret = 0;
348 i = i + 1;
355 -outb(187, cfg_base)
{
309 Ignored inline assembler code
310 return ;;
}
356 ldv_45898:;
298 cfg = cfg + 1;
299 ldv_45900:;
298 assume(!(cfg <= 1));
293 chip = chip + 1;
293 i = i + 1;
294 ldv_45903:;
293 unsigned long __CPAchecker_TMP_4 = (unsigned long)(chip->name);
293 assume(!(__CPAchecker_TMP_4 != 0UL));
359 assume(irda_debug > 1U);
359 printk("\017%s(), ----------------- End -----------------\n", "ali_ircc_init") { /* Function call is skipped due to function is undefined */}
361 assume(!(ret != 0));
364 return ret;;
}
2564 assume(!(tmp != 0));
2577 goto ldv_46302;
2577 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
2577 assume(tmp___1 != 0);
2582 goto ldv_46301;
2578 ldv_46301:;
2583 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
2583 switch (tmp___0)
2584 assume(!(tmp___0 == 0));
2606 assume(!(tmp___0 == 1));
2627 assume(!(tmp___0 == 2));
2651 assume(!(tmp___0 == 3));
2675 assume(!(tmp___0 == 4));
2696 assume(!(tmp___0 == 5));
2717 assume(!(tmp___0 == 6));
2741 assume(!(tmp___0 == 7));
2765 assume(!(tmp___0 == 8));
2786 assume(!(tmp___0 == 9));
2807 assume(tmp___0 == 10);
2810 LDV_IN_INTERRUPT = 2;
2819 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2820 -ali_ircc_interrupt(var_ali_ircc_interrupt_9_p0, var_ali_ircc_interrupt_9_p1)
{
796 struct net_device *dev;
797 struct ali_ircc_cb *self;
798 int ret;
799 void *tmp;
800 irqreturn_t tmp___0;
801 irqreturn_t tmp___1;
796 dev = (struct net_device *)dev_id;
800 assume(irda_debug > 1U);
800 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_interrupt") { /* Function call is skipped due to function is undefined */}
802 -netdev_priv((const struct net_device *)dev)
{
1689 return ((void *)dev) + 3264U;;
}
802 self = (struct ali_ircc_cb *)tmp;
804 -ldv_spin_lock_59(&(self->lock))
{
3325 -ldv_spin_lock_lock_of_ali_ircc_cb()
{
1014 assume(ldv_spin_lock_of_ali_ircc_cb == 1);
1016 ldv_spin_lock_of_ali_ircc_cb = 2;
1017 return ;;
}
3327 -spin_lock(lock)
{
303 _raw_spin_lock(&(lock->ldv_6306.rlock)) { /* Function call is skipped due to function is undefined */}
304 return ;;
}
3328 return ;;
}
807 assume(!((self->io.speed) > 115200U));
810 -ali_ircc_sir_interrupt(self)
{
947 int iobase;
948 int iir;
949 int lsr;
950 unsigned char tmp;
951 unsigned char tmp___0;
950 assume(irda_debug > 1U);
950 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_sir_interrupt") { /* Function call is skipped due to function is undefined */}
952 iobase = self->io.sir_base;
954 -inb(iobase + 2)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
954 iir = ((int)tmp) & 14;
955 assume(iir != 0);
957 -inb(iobase + 5)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
957 lsr = (int)tmp___0;
959 assume(irda_debug > 3U);
959 printk("\017%s(), iir=%02x, lsr=%02x, iobase=%#x\n", "ali_ircc_sir_interrupt", iir, lsr, iobase) { /* Function call is skipped due to function is undefined */}
962 switch (iir)
963 assume(!(iir == 6));
967 assume(!(iir == 4));
971 assume(iir == 2);
972 assume((lsr & 32) != 0);
975 -ali_ircc_sir_write_wakeup(self)
{
1035 int actual;
1036 int iobase;
1037 unsigned char tmp;
1035 actual = 0;
1038 assume(!(((unsigned long)self) == 0UL));
1040 assume(irda_debug > 1U);
1040 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_sir_write_wakeup") { /* Function call is skipped due to function is undefined */}
1042 iobase = self->io.sir_base;
1045 assume(!((self->tx_buff.len) > 0));
1055 assume((self->new_speed) != 0U);
1058 goto ldv_46013;
1058 -inb(iobase + 5)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
1058 assume(!((((int)tmp) & 64) == 0));
1061 assume(irda_debug != 0U);
1061 printk("\017%s(), Changing speed! self->new_speed = %d\n", "ali_ircc_sir_write_wakeup", self->new_speed) { /* Function call is skipped due to function is undefined */}
1062 -ali_ircc_change_speed(self, self->new_speed)
{
1091 struct net_device *dev;
1092 int iobase;
1091 dev = self->netdev;
1094 assume(irda_debug != 0U);
1094 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_change_speed") { /* Function call is skipped due to function is undefined */}
1096 assume(irda_debug > 1U);
1096 printk("\017%s(), setting speed = %d\n", "ali_ircc_change_speed", baud) { /* Function call is skipped due to function is undefined */}
1101 iobase = self->io.fir_base;
1103 -SetCOMInterrupts(self, 0)
{
2286 unsigned char newMask;
2287 int iobase;
2289 iobase = self->io.fir_base;
2291 assume(irda_debug > 1U);
2291 printk("\017%s(), -------- Start -------- ( Enable = %d )\n", "SetCOMInterrupts", (int)enable) { /* Function call is skipped due to function is undefined */}
2294 assume(!(((unsigned int)enable) != 0U));
2319 newMask = 0U;
2324 assume(!((self->io.speed) > 115200U));
2330 -outb((int)newMask, iobase + 1)
{
309 Ignored inline assembler code
310 return ;;
}
2332 assume(irda_debug > 1U);
2332 printk("\017%s(), ----------------- End ------------------\n", "SetCOMInterrupts") { /* Function call is skipped due to function is undefined */}
2333 return ;;
}
1106 assume(!(baud > 115200U));
1124 -ali_ircc_sir_change_speed(self, baud)
{
1177 struct ali_ircc_cb *self;
1178 unsigned long flags;
1179 int iobase;
1180 int fcr;
1181 int lcr;
1182 int divisor;
1177 self = priv;
1184 assume(irda_debug != 0U);
1184 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_sir_change_speed") { /* Function call is skipped due to function is undefined */}
1186 assume(irda_debug != 0U);
1186 printk("\017%s(), Setting speed to: %d\n", "ali_ircc_sir_change_speed", speed) { /* Function call is skipped due to function is undefined */}
1188 assume(!(((unsigned long)self) == 0UL));
1190 iobase = self->io.sir_base;
1193 assume(!((self->io.speed) > 115200U));
1203 -inb(iobase + 5)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
1204 -inb(iobase + 7)
{
311 unsigned char value;
309 Ignored inline assembler code
309 return value;;
}
1207 self->io.speed = speed;
1209 -ldv___ldv_spin_lock_61(&(self->lock))
{
3341 -ldv_spin_lock_lock_of_ali_ircc_cb()
{
1014 assume(!(ldv_spin_lock_of_ali_ircc_cb == 1));
1014 -ldv_error()
{
15 LDV_ERROR:;
12 goto LDV_ERROR;
}
}
}
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H 2 #define _ASM_X86_DMA_MAPPING_H 3 4 /* 5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and 6 * Documentation/DMA-API.txt for documentation. 7 */ 8 9 #include <linux/kmemcheck.h> 10 #include <linux/scatterlist.h> 11 #include <linux/dma-debug.h> 12 #include <linux/dma-attrs.h> 13 #include <asm/io.h> 14 #include <asm/swiotlb.h> 15 #include <asm-generic/dma-coherent.h> 16 #include <linux/dma-contiguous.h> 17 18 #ifdef CONFIG_ISA 19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 20 #else 21 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 22 #endif 23 24 #define DMA_ERROR_CODE 0 25 26 extern int iommu_merge; 27 extern struct device x86_dma_fallback_dev; 28 extern int panic_on_overflow; 29 30 extern struct dma_map_ops *dma_ops; 31 32 static inline struct dma_map_ops *get_dma_ops(struct device *dev) 33 { 34 #ifndef CONFIG_X86_DEV_DMA_OPS 35 return dma_ops; 36 #else 37 if (unlikely(!dev) || !dev->archdata.dma_ops) 38 return dma_ops; 39 else 40 return dev->archdata.dma_ops; 41 #endif 42 } 43 44 #include <asm-generic/dma-mapping-common.h> 45 46 /* Make sure we keep the same behaviour */ 47 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 48 { 49 struct dma_map_ops *ops = get_dma_ops(dev); 50 debug_dma_mapping_error(dev, dma_addr); 51 if (ops->mapping_error) 52 return ops->mapping_error(dev, dma_addr); 53 54 return (dma_addr == DMA_ERROR_CODE); 55 } 56 57 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 58 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 59 60 extern int dma_supported(struct device *hwdev, u64 mask); 61 extern int dma_set_mask(struct device *dev, u64 mask); 62 63 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 64 dma_addr_t *dma_addr, gfp_t flag, 65 struct dma_attrs *attrs); 66 67 extern void dma_generic_free_coherent(struct device *dev, size_t size, 68 void *vaddr, dma_addr_t dma_addr, 69 struct dma_attrs *attrs); 70 71 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 72 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 73 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 74 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 75 #else 76 77 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 78 { 79 if (!dev->dma_mask) 80 return 0; 81 82 return addr + size - 1 <= *dev->dma_mask; 83 } 84 85 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 86 { 87 return paddr; 88 } 89 90 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 91 { 92 return daddr; 93 } 94 #endif /* CONFIG_X86_DMA_REMAP */ 95 96 static inline void 97 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 98 enum dma_data_direction dir) 99 { 100 flush_write_buffers(); 101 } 102 103 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 104 gfp_t gfp) 105 { 106 unsigned long dma_mask = 0; 107 108 dma_mask = dev->coherent_dma_mask; 109 if (!dma_mask) 110 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 111 112 return dma_mask; 113 } 114 115 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 116 { 117 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 118 119 if (dma_mask <= DMA_BIT_MASK(24)) 120 gfp |= GFP_DMA; 121 #ifdef CONFIG_X86_64 122 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 123 gfp |= GFP_DMA32; 124 #endif 125 return gfp; 126 } 127 128 #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 129 130 static inline void * 131 dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 132 gfp_t gfp, struct dma_attrs *attrs) 133 { 134 struct dma_map_ops *ops = get_dma_ops(dev); 135 void *memory; 136 137 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 138 139 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) 140 return memory; 141 142 if (!dev) 143 dev = &x86_dma_fallback_dev; 144 145 if (!is_device_dma_capable(dev)) 146 return NULL; 147 148 if (!ops->alloc) 149 return NULL; 150 151 memory = ops->alloc(dev, size, dma_handle, 152 dma_alloc_coherent_gfp_flags(dev, gfp), attrs); 153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 154 155 return memory; 156 } 157 158 #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 159 160 static inline void dma_free_attrs(struct device *dev, size_t size, 161 void *vaddr, dma_addr_t bus, 162 struct dma_attrs *attrs) 163 { 164 struct dma_map_ops *ops = get_dma_ops(dev); 165 166 WARN_ON(irqs_disabled()); /* for portability */ 167 168 if (dma_release_from_coherent(dev, get_order(size), vaddr)) 169 return; 170 171 debug_dma_free_coherent(dev, size, vaddr, bus); 172 if (ops->free) 173 ops->free(dev, size, vaddr, bus, attrs); 174 } 175 176 #endif
1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 39 #include <linux/string.h> 40 #include <linux/compiler.h> 41 #include <asm/page.h> 42 #include <asm/early_ioremap.h> 43 44 #define build_mmio_read(name, size, type, reg, barrier) \ 45 static inline type name(const volatile void __iomem *addr) \ 46 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 47 :"m" (*(volatile type __force *)addr) barrier); return ret; } 48 49 #define build_mmio_write(name, size, type, reg, barrier) \ 50 static inline void name(type val, volatile void __iomem *addr) \ 51 { asm volatile("mov" size " %0,%1": :reg (val), \ 52 "m" (*(volatile type __force *)addr) barrier); } 53 54 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 55 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 56 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 57 58 build_mmio_read(__readb, "b", unsigned char, "=q", ) 59 build_mmio_read(__readw, "w", unsigned short, "=r", ) 60 build_mmio_read(__readl, "l", unsigned int, "=r", ) 61 62 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 63 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 64 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 65 66 build_mmio_write(__writeb, "b", unsigned char, "q", ) 67 build_mmio_write(__writew, "w", unsigned short, "r", ) 68 build_mmio_write(__writel, "l", unsigned int, "r", ) 69 70 #define readb_relaxed(a) __readb(a) 71 #define readw_relaxed(a) __readw(a) 72 #define readl_relaxed(a) __readl(a) 73 #define __raw_readb __readb 74 #define __raw_readw __readw 75 #define __raw_readl __readl 76 77 #define __raw_writeb __writeb 78 #define __raw_writew __writew 79 #define __raw_writel __writel 80 81 #define mmiowb() barrier() 82 83 #ifdef CONFIG_X86_64 84 85 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 86 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 87 88 #define readq_relaxed(a) readq(a) 89 90 #define __raw_readq(a) readq(a) 91 #define __raw_writeq(val, addr) writeq(val, addr) 92 93 /* Let people know that we have them */ 94 #define readq readq 95 #define writeq writeq 96 97 #endif 98 99 /** 100 * virt_to_phys - map virtual addresses to physical 101 * @address: address to remap 102 * 103 * The returned physical address is the physical (CPU) mapping for 104 * the memory address given. It is only valid to use this function on 105 * addresses directly mapped or allocated via kmalloc. 106 * 107 * This function does not give bus mappings for DMA transfers. In 108 * almost all conceivable cases a device driver should not be using 109 * this function 110 */ 111 112 static inline phys_addr_t virt_to_phys(volatile void *address) 113 { 114 return __pa(address); 115 } 116 117 /** 118 * phys_to_virt - map physical address to virtual 119 * @address: address to remap 120 * 121 * The returned virtual address is a current CPU mapping for 122 * the memory address given. It is only valid to use this function on 123 * addresses that have a kernel mapping 124 * 125 * This function does not handle bus mappings for DMA transfers. In 126 * almost all conceivable cases a device driver should not be using 127 * this function 128 */ 129 130 static inline void *phys_to_virt(phys_addr_t address) 131 { 132 return __va(address); 133 } 134 135 /* 136 * Change "struct page" to physical address. 137 */ 138 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 139 140 /* 141 * ISA I/O bus memory addresses are 1:1 with the physical address. 142 * However, we truncate the address to unsigned int to avoid undesirable 143 * promitions in legacy drivers. 144 */ 145 static inline unsigned int isa_virt_to_bus(volatile void *address) 146 { 147 return (unsigned int)virt_to_phys(address); 148 } 149 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 150 #define isa_bus_to_virt phys_to_virt 151 152 /* 153 * However PCI ones are not necessarily 1:1 and therefore these interfaces 154 * are forbidden in portable PCI drivers. 155 * 156 * Allow them on x86 for legacy drivers, though. 157 */ 158 #define virt_to_bus virt_to_phys 159 #define bus_to_virt phys_to_virt 160 161 /** 162 * ioremap - map bus memory into CPU space 163 * @offset: bus address of the memory 164 * @size: size of the resource to map 165 * 166 * ioremap performs a platform specific sequence of operations to 167 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 168 * writew/writel functions and the other mmio helpers. The returned 169 * address is not guaranteed to be usable directly as a virtual 170 * address. 171 * 172 * If the area you are trying to map is a PCI BAR you should have a 173 * look at pci_iomap(). 174 */ 175 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 176 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 177 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 178 unsigned long prot_val); 179 180 /* 181 * The default ioremap() behavior is non-cached: 182 */ 183 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 184 { 185 return ioremap_nocache(offset, size); 186 } 187 188 extern void iounmap(volatile void __iomem *addr); 189 190 extern void set_iounmap_nonlazy(void); 191 192 #ifdef __KERNEL__ 193 194 #include <asm-generic/iomap.h> 195 196 #include <linux/vmalloc.h> 197 198 /* 199 * Convert a virtual cached pointer to an uncached pointer 200 */ 201 #define xlate_dev_kmem_ptr(p) p 202 203 static inline void 204 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 205 { 206 memset((void __force *)addr, val, count); 207 } 208 209 static inline void 210 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 211 { 212 memcpy(dst, (const void __force *)src, count); 213 } 214 215 static inline void 216 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 217 { 218 memcpy((void __force *)dst, src, count); 219 } 220 221 /* 222 * ISA space is 'always mapped' on a typical x86 system, no need to 223 * explicitly ioremap() it. The fact that the ISA IO space is mapped 224 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 225 * are physical addresses. The following constant pointer can be 226 * used as the IO-area pointer (it can be iounmapped as well, so the 227 * analogy with PCI is quite large): 228 */ 229 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 230 231 /* 232 * Cache management 233 * 234 * This needed for two cases 235 * 1. Out of order aware processors 236 * 2. Accidentally out of order processors (PPro errata #51) 237 */ 238 239 static inline void flush_write_buffers(void) 240 { 241 #if defined(CONFIG_X86_PPRO_FENCE) 242 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 243 #endif 244 } 245 246 #endif /* __KERNEL__ */ 247 248 extern void native_io_delay(void); 249 250 extern int io_delay_type; 251 extern void io_delay_init(void); 252 253 #if defined(CONFIG_PARAVIRT) 254 #include <asm/paravirt.h> 255 #else 256 257 static inline void slow_down_io(void) 258 { 259 native_io_delay(); 260 #ifdef REALLY_SLOW_IO 261 native_io_delay(); 262 native_io_delay(); 263 native_io_delay(); 264 #endif 265 } 266 267 #endif 268 269 #define BUILDIO(bwl, bw, type) \ 270 static inline void out##bwl(unsigned type value, int port) \ 271 { \ 272 asm volatile("out" #bwl " %" #bw "0, %w1" \ 273 : : "a"(value), "Nd"(port)); \ 274 } \ 275 \ 276 static inline unsigned type in##bwl(int port) \ 277 { \ 278 unsigned type value; \ 279 asm volatile("in" #bwl " %w1, %" #bw "0" \ 280 : "=a"(value) : "Nd"(port)); \ 281 return value; \ 282 } \ 283 \ 284 static inline void out##bwl##_p(unsigned type value, int port) \ 285 { \ 286 out##bwl(value, port); \ 287 slow_down_io(); \ 288 } \ 289 \ 290 static inline unsigned type in##bwl##_p(int port) \ 291 { \ 292 unsigned type value = in##bwl(port); \ 293 slow_down_io(); \ 294 return value; \ 295 } \ 296 \ 297 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 298 { \ 299 asm volatile("rep; outs" #bwl \ 300 : "+S"(addr), "+c"(count) : "d"(port)); \ 301 } \ 302 \ 303 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 304 { \ 305 asm volatile("rep; ins" #bwl \ 306 : "+D"(addr), "+c"(count) : "d"(port)); \ 307 } 308 309 BUILDIO(b, b, char) 310 BUILDIO(w, w, short) 311 BUILDIO(l, , int) 312 313 extern void *xlate_dev_mem_ptr(unsigned long phys); 314 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 315 316 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 317 unsigned long prot_val); 318 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 319 320 extern bool is_early_ioremap_ptep(pte_t *ptep); 321 322 #ifdef CONFIG_XEN 323 #include <xen/xen.h> 324 struct bio_vec; 325 326 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 327 const struct bio_vec *vec2); 328 329 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 330 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ 331 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) 332 #endif /* CONFIG_XEN */ 333 334 #define IO_SPACE_LIMIT 0xffff 335 336 #ifdef CONFIG_MTRR 337 extern int __must_check arch_phys_wc_add(unsigned long base, 338 unsigned long size); 339 extern void arch_phys_wc_del(int handle); 340 #define arch_phys_wc_add arch_phys_wc_add 341 #endif 342 343 #endif /* _ASM_X86_IO_H */
1 2 #include <linux/kernel.h> 3 #include <linux/spinlock.h> 4 5 extern void __ldv_spin_lock(spinlock_t *lock); 6 extern void __ldv_spin_unlock(spinlock_t *lock); 7 extern int __ldv_spin_trylock(spinlock_t *lock); 8 extern void __ldv_spin_unlock_wait(spinlock_t *lock); 9 extern void __ldv_spin_can_lock(spinlock_t *lock); 10 extern int __ldv_atomic_dec_and_lock(spinlock_t *lock); 11 12 extern void ldv_spin_lock_NOT_ARG_SIGN(void); 13 extern void ldv_spin_unlock_NOT_ARG_SIGN(void); 14 extern int ldv_spin_trylock_NOT_ARG_SIGN(void); 15 extern void ldv_spin_unlock_wait_NOT_ARG_SIGN(void); 16 extern int ldv_spin_is_locked_NOT_ARG_SIGN(void); 17 extern int ldv_spin_can_lock_NOT_ARG_SIGN(void); 18 extern int ldv_spin_is_contended_NOT_ARG_SIGN(void); 19 extern int ldv_atomic_dec_and_lock_NOT_ARG_SIGN(void); 20 extern void ldv_spin_lock__xmit_lock_of_netdev_queue(void); 21 extern void ldv_spin_unlock__xmit_lock_of_netdev_queue(void); 22 extern int ldv_spin_trylock__xmit_lock_of_netdev_queue(void); 23 extern void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void); 24 extern int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void); 25 extern int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void); 26 extern int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void); 27 extern int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void); 28 extern void ldv_spin_lock_addr_list_lock_of_net_device(void); 29 extern void ldv_spin_unlock_addr_list_lock_of_net_device(void); 30 extern int ldv_spin_trylock_addr_list_lock_of_net_device(void); 31 extern void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void); 32 extern int ldv_spin_is_locked_addr_list_lock_of_net_device(void); 33 extern int ldv_spin_can_lock_addr_list_lock_of_net_device(void); 34 extern int ldv_spin_is_contended_addr_list_lock_of_net_device(void); 35 extern int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void); 36 extern void ldv_spin_lock_alloc_lock_of_task_struct(void); 37 extern void ldv_spin_unlock_alloc_lock_of_task_struct(void); 38 extern int ldv_spin_trylock_alloc_lock_of_task_struct(void); 39 extern void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void); 40 extern int ldv_spin_is_locked_alloc_lock_of_task_struct(void); 41 extern int ldv_spin_can_lock_alloc_lock_of_task_struct(void); 42 extern int ldv_spin_is_contended_alloc_lock_of_task_struct(void); 43 extern int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void); 44 extern void ldv_spin_lock_dma_spin_lock(void); 45 extern void ldv_spin_unlock_dma_spin_lock(void); 46 extern int ldv_spin_trylock_dma_spin_lock(void); 47 extern void ldv_spin_unlock_wait_dma_spin_lock(void); 48 extern int ldv_spin_is_locked_dma_spin_lock(void); 49 extern int ldv_spin_can_lock_dma_spin_lock(void); 50 extern int ldv_spin_is_contended_dma_spin_lock(void); 51 extern int ldv_atomic_dec_and_lock_dma_spin_lock(void); 52 extern void ldv_spin_lock_i_lock_of_inode(void); 53 extern void ldv_spin_unlock_i_lock_of_inode(void); 54 extern int ldv_spin_trylock_i_lock_of_inode(void); 55 extern void ldv_spin_unlock_wait_i_lock_of_inode(void); 56 extern int ldv_spin_is_locked_i_lock_of_inode(void); 57 extern int ldv_spin_can_lock_i_lock_of_inode(void); 58 extern int ldv_spin_is_contended_i_lock_of_inode(void); 59 extern int ldv_atomic_dec_and_lock_i_lock_of_inode(void); 60 extern void ldv_spin_lock_lock(void); 61 extern void ldv_spin_unlock_lock(void); 62 extern int ldv_spin_trylock_lock(void); 63 extern void ldv_spin_unlock_wait_lock(void); 64 extern int ldv_spin_is_locked_lock(void); 65 extern int ldv_spin_can_lock_lock(void); 66 extern int ldv_spin_is_contended_lock(void); 67 extern int ldv_atomic_dec_and_lock_lock(void); 68 extern void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void); 69 extern void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void); 70 extern int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void); 71 extern void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void); 72 extern int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void); 73 extern int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void); 74 extern int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void); 75 extern int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void); 76 extern void ldv_spin_lock_lock_of_ali_ircc_cb(void); 77 extern void ldv_spin_unlock_lock_of_ali_ircc_cb(void); 78 extern int ldv_spin_trylock_lock_of_ali_ircc_cb(void); 79 extern void ldv_spin_unlock_wait_lock_of_ali_ircc_cb(void); 80 extern int ldv_spin_is_locked_lock_of_ali_ircc_cb(void); 81 extern int ldv_spin_can_lock_lock_of_ali_ircc_cb(void); 82 extern int ldv_spin_is_contended_lock_of_ali_ircc_cb(void); 83 extern int ldv_atomic_dec_and_lock_lock_of_ali_ircc_cb(void); 84 extern void ldv_spin_lock_lru_lock_of_netns_frags(void); 85 extern void ldv_spin_unlock_lru_lock_of_netns_frags(void); 86 extern int ldv_spin_trylock_lru_lock_of_netns_frags(void); 87 extern void ldv_spin_unlock_wait_lru_lock_of_netns_frags(void); 88 extern int ldv_spin_is_locked_lru_lock_of_netns_frags(void); 89 extern int ldv_spin_can_lock_lru_lock_of_netns_frags(void); 90 extern int ldv_spin_is_contended_lru_lock_of_netns_frags(void); 91 extern int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(void); 92 extern void ldv_spin_lock_node_size_lock_of_pglist_data(void); 93 extern void ldv_spin_unlock_node_size_lock_of_pglist_data(void); 94 extern int ldv_spin_trylock_node_size_lock_of_pglist_data(void); 95 extern void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void); 96 extern int ldv_spin_is_locked_node_size_lock_of_pglist_data(void); 97 extern int ldv_spin_can_lock_node_size_lock_of_pglist_data(void); 98 extern int ldv_spin_is_contended_node_size_lock_of_pglist_data(void); 99 extern int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void); 100 extern void ldv_spin_lock_ptl(void); 101 extern void ldv_spin_unlock_ptl(void); 102 extern int ldv_spin_trylock_ptl(void); 103 extern void ldv_spin_unlock_wait_ptl(void); 104 extern int ldv_spin_is_locked_ptl(void); 105 extern int ldv_spin_can_lock_ptl(void); 106 extern int ldv_spin_is_contended_ptl(void); 107 extern int ldv_atomic_dec_and_lock_ptl(void); 108 extern void ldv_spin_lock_siglock_of_sighand_struct(void); 109 extern void ldv_spin_unlock_siglock_of_sighand_struct(void); 110 extern int ldv_spin_trylock_siglock_of_sighand_struct(void); 111 extern void ldv_spin_unlock_wait_siglock_of_sighand_struct(void); 112 extern int ldv_spin_is_locked_siglock_of_sighand_struct(void); 113 extern int ldv_spin_can_lock_siglock_of_sighand_struct(void); 114 extern int ldv_spin_is_contended_siglock_of_sighand_struct(void); 115 extern int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void); 116 extern void ldv_spin_lock_tx_global_lock_of_net_device(void); 117 extern void ldv_spin_unlock_tx_global_lock_of_net_device(void); 118 extern int ldv_spin_trylock_tx_global_lock_of_net_device(void); 119 extern void ldv_spin_unlock_wait_tx_global_lock_of_net_device(void); 120 extern int ldv_spin_is_locked_tx_global_lock_of_net_device(void); 121 extern int ldv_spin_can_lock_tx_global_lock_of_net_device(void); 122 extern int ldv_spin_is_contended_tx_global_lock_of_net_device(void); 123 extern int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(void); 124 125 /********************************************************************* 126 * 127 * Filename: ali-ircc.h 128 * Version: 0.5 129 * Description: Driver for the ALI M1535D and M1543C FIR Controller 130 * Status: Experimental. 131 * Author: Benjamin Kong <benjamin_kong@ali.com.tw> 132 * Created at: 2000/10/16 03:46PM 133 * Modified at: 2001/1/3 02:55PM 134 * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw> 135 * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563 136 * Modified by: Clear Zhang <clear_zhang@ali.com.tw> 137 * 138 * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw> 139 * All Rights Reserved 140 * 141 * This program is free software; you can redistribute it and/or 142 * modify it under the terms of the GNU General Public License as 143 * published by the Free Software Foundation; either version 2 of 144 * the License, or (at your option) any later version. 145 * 146 ********************************************************************/ 147 148 #include <linux/module.h> 149 #include <linux/gfp.h> 150 151 #include <linux/kernel.h> 152 #include <linux/types.h> 153 #include <linux/skbuff.h> 154 #include <linux/netdevice.h> 155 #include <linux/ioport.h> 156 #include <linux/delay.h> 157 #include <linux/init.h> 158 #include <linux/interrupt.h> 159 #include <linux/rtnetlink.h> 160 #include <linux/serial_reg.h> 161 #include <linux/dma-mapping.h> 162 #include <linux/platform_device.h> 163 164 #include <asm/io.h> 165 #include <asm/dma.h> 166 #include <asm/byteorder.h> 167 168 #include <net/irda/wrapper.h> 169 #include <net/irda/irda.h> 170 #include <net/irda/irda_device.h> 171 172 #include "ali-ircc.h" 173 174 #define CHIP_IO_EXTENT 8 175 #define BROKEN_DONGLE_ID 176 177 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 178 179 /* Power Management */ 180 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state); 181 static int ali_ircc_resume(struct platform_device *dev); 182 183 static struct platform_driver ali_ircc_driver = { 184 .suspend = ali_ircc_suspend, 185 .resume = ali_ircc_resume, 186 .driver = { 187 .name = ALI_IRCC_DRIVER_NAME, 188 .owner = THIS_MODULE, 189 }, 190 }; 191 192 /* Module parameters */ 193 static int qos_mtt_bits = 0x07; /* 1 ms or more */ 194 195 /* Use BIOS settions by default, but user may supply module parameters */ 196 static unsigned int io[] = { ~0, ~0, ~0, ~0 }; 197 static unsigned int irq[] = { 0, 0, 0, 0 }; 198 static unsigned int dma[] = { 0, 0, 0, 0 }; 199 200 static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info); 201 static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info); 202 static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info); 203 204 /* These are the currently known ALi south-bridge chipsets, the only one difference 205 * is that M1543C doesn't support HP HDSL-3600 206 */ 207 static ali_chip_t chips[] = 208 { 209 { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 }, 210 { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 }, 211 { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 }, 212 { NULL } 213 }; 214 215 /* Max 4 instances for now */ 216 static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL }; 217 218 /* Dongle Types */ 219 static char *dongle_types[] = { 220 "TFDS6000", 221 "HP HSDL-3600", 222 "HP HSDL-1100", 223 "No dongle connected", 224 }; 225 226 /* Some prototypes */ 227 static int ali_ircc_open(int i, chipio_t *info); 228 229 static int ali_ircc_close(struct ali_ircc_cb *self); 230 231 static int ali_ircc_setup(chipio_t *info); 232 static int ali_ircc_is_receiving(struct ali_ircc_cb *self); 233 static int ali_ircc_net_open(struct net_device *dev); 234 static int ali_ircc_net_close(struct net_device *dev); 235 static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 236 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud); 237 238 /* SIR function */ 239 static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, 240 struct net_device *dev); 241 static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self); 242 static void ali_ircc_sir_receive(struct ali_ircc_cb *self); 243 static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self); 244 static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len); 245 static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed); 246 247 /* FIR function */ 248 static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, 249 struct net_device *dev); 250 static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed); 251 static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self); 252 static int ali_ircc_dma_receive(struct ali_ircc_cb *self); 253 static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self); 254 static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self); 255 static void ali_ircc_dma_xmit(struct ali_ircc_cb *self); 256 257 /* My Function */ 258 static int ali_ircc_read_dongle_id (int i, chipio_t *info); 259 static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed); 260 261 /* ALi chip function */ 262 static void SIR2FIR(int iobase); 263 static void FIR2SIR(int iobase); 264 static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable); 265 266 /* 267 * Function ali_ircc_init () 268 * 269 * Initialize chip. Find out whay kinds of chips we are dealing with 270 * and their configuration registers address 271 */ 272 static int __init ali_ircc_init(void) 273 { 274 ali_chip_t *chip; 275 chipio_t info; 276 int ret; 277 int cfg, cfg_base; 278 int reg, revision; 279 int i = 0; 280 281 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 282 283 ret = platform_driver_register(&ali_ircc_driver); 284 if (ret) { 285 IRDA_ERROR("%s, Can't register driver!\n", 286 ALI_IRCC_DRIVER_NAME); 287 return ret; 288 } 289 290 ret = -ENODEV; 291 292 /* Probe for all the ALi chipsets we know about */ 293 for (chip= chips; chip->name; chip++, i++) 294 { 295 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name); 296 297 /* Try all config registers for this chip */ 298 for (cfg=0; cfg<2; cfg++) 299 { 300 cfg_base = chip->cfg[cfg]; 301 if (!cfg_base) 302 continue; 303 304 memset(&info, 0, sizeof(chipio_t)); 305 info.cfg_base = cfg_base; 306 info.fir_base = io[i]; 307 info.dma = dma[i]; 308 info.irq = irq[i]; 309 310 311 /* Enter Configuration */ 312 outb(chip->entr1, cfg_base); 313 outb(chip->entr2, cfg_base); 314 315 /* Select Logical Device 5 Registers (UART2) */ 316 outb(0x07, cfg_base); 317 outb(0x05, cfg_base+1); 318 319 /* Read Chip Identification Register */ 320 outb(chip->cid_index, cfg_base); 321 reg = inb(cfg_base+1); 322 323 if (reg == chip->cid_value) 324 { 325 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base); 326 327 outb(0x1F, cfg_base); 328 revision = inb(cfg_base+1); 329 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__, 330 chip->name, revision); 331 332 /* 333 * If the user supplies the base address, then 334 * we init the chip, if not we probe the values 335 * set by the BIOS 336 */ 337 if (io[i] < 2000) 338 { 339 chip->init(chip, &info); 340 } 341 else 342 { 343 chip->probe(chip, &info); 344 } 345 346 if (ali_ircc_open(i, &info) == 0) 347 ret = 0; 348 i++; 349 } 350 else 351 { 352 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base); 353 } 354 /* Exit configuration */ 355 outb(0xbb, cfg_base); 356 } 357 } 358 359 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); 360 361 if (ret) 362 platform_driver_unregister(&ali_ircc_driver); 363 364 return ret; 365 } 366 367 /* 368 * Function ali_ircc_cleanup () 369 * 370 * Close all configured chips 371 * 372 */ 373 static void __exit ali_ircc_cleanup(void) 374 { 375 int i; 376 377 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 378 379 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 380 if (dev_self[i]) 381 ali_ircc_close(dev_self[i]); 382 } 383 384 platform_driver_unregister(&ali_ircc_driver); 385 386 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); 387 } 388 389 static const struct net_device_ops ali_ircc_sir_ops = { 390 .ndo_open = ali_ircc_net_open, 391 .ndo_stop = ali_ircc_net_close, 392 .ndo_start_xmit = ali_ircc_sir_hard_xmit, 393 .ndo_do_ioctl = ali_ircc_net_ioctl, 394 }; 395 396 static const struct net_device_ops ali_ircc_fir_ops = { 397 .ndo_open = ali_ircc_net_open, 398 .ndo_stop = ali_ircc_net_close, 399 .ndo_start_xmit = ali_ircc_fir_hard_xmit, 400 .ndo_do_ioctl = ali_ircc_net_ioctl, 401 }; 402 403 /* 404 * Function ali_ircc_open (int i, chipio_t *inf) 405 * 406 * Open driver instance 407 * 408 */ 409 static int ali_ircc_open(int i, chipio_t *info) 410 { 411 struct net_device *dev; 412 struct ali_ircc_cb *self; 413 int dongle_id; 414 int err; 415 416 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 417 418 if (i >= ARRAY_SIZE(dev_self)) { 419 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", 420 __func__); 421 return -ENOMEM; 422 } 423 424 /* Set FIR FIFO and DMA Threshold */ 425 if ((ali_ircc_setup(info)) == -1) 426 return -1; 427 428 dev = alloc_irdadev(sizeof(*self)); 429 if (dev == NULL) { 430 IRDA_ERROR("%s(), can't allocate memory for control block!\n", 431 __func__); 432 return -ENOMEM; 433 } 434 435 self = netdev_priv(dev); 436 self->netdev = dev; 437 spin_lock_init(&self->lock); 438 439 /* Need to store self somewhere */ 440 dev_self[i] = self; 441 self->index = i; 442 443 /* Initialize IO */ 444 self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */ 445 self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */ 446 self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */ 447 self->io.irq = info->irq; 448 self->io.fir_ext = CHIP_IO_EXTENT; 449 self->io.dma = info->dma; 450 self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */ 451 452 /* Reserve the ioports that we need */ 453 if (!request_region(self->io.fir_base, self->io.fir_ext, 454 ALI_IRCC_DRIVER_NAME)) { 455 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__, 456 self->io.fir_base); 457 err = -ENODEV; 458 goto err_out1; 459 } 460 461 /* Initialize QoS for this device */ 462 irda_init_max_qos_capabilies(&self->qos); 463 464 /* The only value we must override it the baudrate */ 465 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 466 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM 467 468 self->qos.min_turn_time.bits = qos_mtt_bits; 469 470 irda_qos_bits_to_value(&self->qos); 471 472 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ 473 self->rx_buff.truesize = 14384; 474 self->tx_buff.truesize = 14384; 475 476 /* Allocate memory if needed */ 477 self->rx_buff.head = 478 dma_zalloc_coherent(NULL, self->rx_buff.truesize, 479 &self->rx_buff_dma, GFP_KERNEL); 480 if (self->rx_buff.head == NULL) { 481 err = -ENOMEM; 482 goto err_out2; 483 } 484 485 self->tx_buff.head = 486 dma_zalloc_coherent(NULL, self->tx_buff.truesize, 487 &self->tx_buff_dma, GFP_KERNEL); 488 if (self->tx_buff.head == NULL) { 489 err = -ENOMEM; 490 goto err_out3; 491 } 492 493 self->rx_buff.in_frame = FALSE; 494 self->rx_buff.state = OUTSIDE_FRAME; 495 self->tx_buff.data = self->tx_buff.head; 496 self->rx_buff.data = self->rx_buff.head; 497 498 /* Reset Tx queue info */ 499 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; 500 self->tx_fifo.tail = self->tx_buff.head; 501 502 /* Override the network functions we need to use */ 503 dev->netdev_ops = &ali_ircc_sir_ops; 504 505 err = register_netdev(dev); 506 if (err) { 507 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); 508 goto err_out4; 509 } 510 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 511 512 /* Check dongle id */ 513 dongle_id = ali_ircc_read_dongle_id(i, info); 514 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__, 515 ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]); 516 517 self->io.dongle_id = dongle_id; 518 519 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); 520 521 return 0; 522 523 err_out4: 524 dma_free_coherent(NULL, self->tx_buff.truesize, 525 self->tx_buff.head, self->tx_buff_dma); 526 err_out3: 527 dma_free_coherent(NULL, self->rx_buff.truesize, 528 self->rx_buff.head, self->rx_buff_dma); 529 err_out2: 530 release_region(self->io.fir_base, self->io.fir_ext); 531 err_out1: 532 dev_self[i] = NULL; 533 free_netdev(dev); 534 return err; 535 } 536 537 538 /* 539 * Function ali_ircc_close (self) 540 * 541 * Close driver instance 542 * 543 */ 544 static int __exit ali_ircc_close(struct ali_ircc_cb *self) 545 { 546 int iobase; 547 548 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__); 549 550 IRDA_ASSERT(self != NULL, return -1;); 551 552 iobase = self->io.fir_base; 553 554 /* Remove netdevice */ 555 unregister_netdev(self->netdev); 556 557 /* Release the PORT that this driver is using */ 558 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base); 559 release_region(self->io.fir_base, self->io.fir_ext); 560 561 if (self->tx_buff.head) 562 dma_free_coherent(NULL, self->tx_buff.truesize, 563 self->tx_buff.head, self->tx_buff_dma); 564 565 if (self->rx_buff.head) 566 dma_free_coherent(NULL, self->rx_buff.truesize, 567 self->rx_buff.head, self->rx_buff_dma); 568 569 dev_self[self->index] = NULL; 570 free_netdev(self->netdev); 571 572 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); 573 574 return 0; 575 } 576 577 /* 578 * Function ali_ircc_init_43 (chip, info) 579 * 580 * Initialize the ALi M1543 chip. 581 */ 582 static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info) 583 { 584 /* All controller information like I/O address, DMA channel, IRQ 585 * are set by BIOS 586 */ 587 588 return 0; 589 } 590 591 /* 592 * Function ali_ircc_init_53 (chip, info) 593 * 594 * Initialize the ALi M1535 chip. 595 */ 596 static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info) 597 { 598 /* All controller information like I/O address, DMA channel, IRQ 599 * are set by BIOS 600 */ 601 602 return 0; 603 } 604 605 /* 606 * Function ali_ircc_probe_53 (chip, info) 607 * 608 * Probes for the ALi M1535D or M1535 609 */ 610 static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) 611 { 612 int cfg_base = info->cfg_base; 613 int hi, low, reg; 614 615 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 616 617 /* Enter Configuration */ 618 outb(chip->entr1, cfg_base); 619 outb(chip->entr2, cfg_base); 620 621 /* Select Logical Device 5 Registers (UART2) */ 622 outb(0x07, cfg_base); 623 outb(0x05, cfg_base+1); 624 625 /* Read address control register */ 626 outb(0x60, cfg_base); 627 hi = inb(cfg_base+1); 628 outb(0x61, cfg_base); 629 low = inb(cfg_base+1); 630 info->fir_base = (hi<<8) + low; 631 632 info->sir_base = info->fir_base; 633 634 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); 635 636 /* Read IRQ control register */ 637 outb(0x70, cfg_base); 638 reg = inb(cfg_base+1); 639 info->irq = reg & 0x0f; 640 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); 641 642 /* Read DMA channel */ 643 outb(0x74, cfg_base); 644 reg = inb(cfg_base+1); 645 info->dma = reg & 0x07; 646 647 if(info->dma == 0x04) 648 IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__); 649 else 650 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); 651 652 /* Read Enabled Status */ 653 outb(0x30, cfg_base); 654 reg = inb(cfg_base+1); 655 info->enabled = (reg & 0x80) && (reg & 0x01); 656 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled); 657 658 /* Read Power Status */ 659 outb(0x22, cfg_base); 660 reg = inb(cfg_base+1); 661 info->suspended = (reg & 0x20); 662 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended); 663 664 /* Exit configuration */ 665 outb(0xbb, cfg_base); 666 667 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); 668 669 return 0; 670 } 671 672 /* 673 * Function ali_ircc_setup (info) 674 * 675 * Set FIR FIFO and DMA Threshold 676 * Returns non-negative on success. 677 * 678 */ 679 static int ali_ircc_setup(chipio_t *info) 680 { 681 unsigned char tmp; 682 int version; 683 int iobase = info->fir_base; 684 685 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 686 687 /* Locking comments : 688 * Most operations here need to be protected. We are called before 689 * the device instance is created in ali_ircc_open(), therefore 690 * nobody can bother us - Jean II */ 691 692 /* Switch to FIR space */ 693 SIR2FIR(iobase); 694 695 /* Master Reset */ 696 outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM 697 698 /* Read FIR ID Version Register */ 699 switch_bank(iobase, BANK3); 700 version = inb(iobase+FIR_ID_VR); 701 702 /* Should be 0x00 in the M1535/M1535D */ 703 if(version != 0x00) 704 { 705 IRDA_ERROR("%s, Wrong chip version %02x\n", 706 ALI_IRCC_DRIVER_NAME, version); 707 return -1; 708 } 709 710 /* Set FIR FIFO Threshold Register */ 711 switch_bank(iobase, BANK1); 712 outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR); 713 714 /* Set FIR DMA Threshold Register */ 715 outb(RX_DMA_Threshold, iobase+FIR_DMA_TR); 716 717 /* CRC enable */ 718 switch_bank(iobase, BANK2); 719 outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR); 720 721 /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/ 722 723 /* Switch to Bank 0 */ 724 switch_bank(iobase, BANK0); 725 726 tmp = inb(iobase+FIR_LCR_B); 727 tmp &=~0x20; // disable SIP 728 tmp |= 0x80; // these two steps make RX mode 729 tmp &= 0xbf; 730 outb(tmp, iobase+FIR_LCR_B); 731 732 /* Disable Interrupt */ 733 outb(0x00, iobase+FIR_IER); 734 735 736 /* Switch to SIR space */ 737 FIR2SIR(iobase); 738 739 IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", 740 ALI_IRCC_DRIVER_NAME); 741 742 /* Enable receive interrupts */ 743 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM 744 // Turn on the interrupts in ali_ircc_net_open 745 746 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); 747 748 return 0; 749 } 750 751 /* 752 * Function ali_ircc_read_dongle_id (int index, info) 753 * 754 * Try to read dongle identification. This procedure needs to be executed 755 * once after power-on/reset. It also needs to be used whenever you suspect 756 * that the user may have plugged/unplugged the IrDA Dongle. 757 */ 758 static int ali_ircc_read_dongle_id (int i, chipio_t *info) 759 { 760 int dongle_id, reg; 761 int cfg_base = info->cfg_base; 762 763 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 764 765 /* Enter Configuration */ 766 outb(chips[i].entr1, cfg_base); 767 outb(chips[i].entr2, cfg_base); 768 769 /* Select Logical Device 5 Registers (UART2) */ 770 outb(0x07, cfg_base); 771 outb(0x05, cfg_base+1); 772 773 /* Read Dongle ID */ 774 outb(0xf0, cfg_base); 775 reg = inb(cfg_base+1); 776 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01); 777 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__, 778 dongle_id, dongle_types[dongle_id]); 779 780 /* Exit configuration */ 781 outb(0xbb, cfg_base); 782 783 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); 784 785 return dongle_id; 786 } 787 788 /* 789 * Function ali_ircc_interrupt (irq, dev_id, regs) 790 * 791 * An interrupt from the chip has arrived. Time to do some work 792 * 793 */ 794 static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id) 795 { 796 struct net_device *dev = dev_id; 797 struct ali_ircc_cb *self; 798 int ret; 799 800 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 801 802 self = netdev_priv(dev); 803 804 spin_lock(&self->lock); 805 806 /* Dispatch interrupt handler for the current speed */ 807 if (self->io.speed > 115200) 808 ret = ali_ircc_fir_interrupt(self); 809 else 810 ret = ali_ircc_sir_interrupt(self); 811 812 spin_unlock(&self->lock); 813 814 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); 815 return ret; 816 } 817 /* 818 * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self) 819 * 820 * Handle MIR/FIR interrupt 821 * 822 */ 823 static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) 824 { 825 __u8 eir, OldMessageCount; 826 int iobase, tmp; 827 828 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__); 829 830 iobase = self->io.fir_base; 831 832 switch_bank(iobase, BANK0); 833 self->InterruptID = inb(iobase+FIR_IIR); 834 self->BusStatus = inb(iobase+FIR_BSR); 835 836 OldMessageCount = (self->LineStatus + 1) & 0x07; 837 self->LineStatus = inb(iobase+FIR_LSR); 838 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM 839 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ 840 841 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID); 842 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus); 843 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier); 844 IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir); 845 846 /* Disable interrupts */ 847 SetCOMInterrupts(self, FALSE); 848 849 /* Tx or Rx Interrupt */ 850 851 if (eir & IIR_EOM) 852 { 853 if (self->io.direction == IO_XMIT) /* TX */ 854 { 855 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__); 856 857 if(ali_ircc_dma_xmit_complete(self)) 858 { 859 if (irda_device_txqueue_empty(self->netdev)) 860 { 861 /* Prepare for receive */ 862 ali_ircc_dma_receive(self); 863 self->ier = IER_EOM; 864 } 865 } 866 else 867 { 868 self->ier = IER_EOM; 869 } 870 871 } 872 else /* RX */ 873 { 874 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__); 875 876 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 877 { 878 self->rcvFramesOverflow = TRUE; 879 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__); 880 } 881 882 if (ali_ircc_dma_receive_complete(self)) 883 { 884 IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__); 885 886 self->ier = IER_EOM; 887 } 888 else 889 { 890 IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__); 891 892 self->ier = IER_EOM | IER_TIMER; 893 } 894 895 } 896 } 897 /* Timer Interrupt */ 898 else if (eir & IIR_TIMER) 899 { 900 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 901 { 902 self->rcvFramesOverflow = TRUE; 903 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__); 904 } 905 /* Disable Timer */ 906 switch_bank(iobase, BANK1); 907 tmp = inb(iobase+FIR_CR); 908 outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR); 909 910 /* Check if this is a Tx timer interrupt */ 911 if (self->io.direction == IO_XMIT) 912 { 913 ali_ircc_dma_xmit(self); 914 915 /* Interrupt on EOM */ 916 self->ier = IER_EOM; 917 918 } 919 else /* Rx */ 920 { 921 if(ali_ircc_dma_receive_complete(self)) 922 { 923 self->ier = IER_EOM; 924 } 925 else 926 { 927 self->ier = IER_EOM | IER_TIMER; 928 } 929 } 930 } 931 932 /* Restore Interrupt */ 933 SetCOMInterrupts(self, TRUE); 934 935 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__); 936 return IRQ_RETVAL(eir); 937 } 938 939 /* 940 * Function ali_ircc_sir_interrupt (irq, self, eir) 941 * 942 * Handle SIR interrupt 943 * 944 */ 945 static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) 946 { 947 int iobase; 948 int iir, lsr; 949 950 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 951 952 iobase = self->io.sir_base; 953 954 iir = inb(iobase+UART_IIR) & UART_IIR_ID; 955 if (iir) { 956 /* Clear interrupt */ 957 lsr = inb(iobase+UART_LSR); 958 959 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__, 960 iir, lsr, iobase); 961 962 switch (iir) 963 { 964 case UART_IIR_RLSI: 965 IRDA_DEBUG(2, "%s(), RLSI\n", __func__); 966 break; 967 case UART_IIR_RDI: 968 /* Receive interrupt */ 969 ali_ircc_sir_receive(self); 970 break; 971 case UART_IIR_THRI: 972 if (lsr & UART_LSR_THRE) 973 { 974 /* Transmitter ready for data */ 975 ali_ircc_sir_write_wakeup(self); 976 } 977 break; 978 default: 979 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir); 980 break; 981 } 982 983 } 984 985 986 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); 987 988 return IRQ_RETVAL(iir); 989 } 990 991 992 /* 993 * Function ali_ircc_sir_receive (self) 994 * 995 * Receive one frame from the infrared port 996 * 997 */ 998 static void ali_ircc_sir_receive(struct ali_ircc_cb *self) 999 { 1000 int boguscount = 0; 1001 int iobase; 1002 1003 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 1004 IRDA_ASSERT(self != NULL, return;); 1005 1006 iobase = self->io.sir_base; 1007 1008 /* 1009 * Receive all characters in Rx FIFO, unwrap and unstuff them. 1010 * async_unwrap_char will deliver all found frames 1011 */ 1012 do { 1013 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, 1014 inb(iobase+UART_RX)); 1015 1016 /* Make sure we don't stay here too long */ 1017 if (boguscount++ > 32) { 1018 IRDA_DEBUG(2,"%s(), breaking!\n", __func__); 1019 break; 1020 } 1021 } while (inb(iobase+UART_LSR) & UART_LSR_DR); 1022 1023 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 1024 } 1025 1026 /* 1027 * Function ali_ircc_sir_write_wakeup (tty) 1028 * 1029 * Called by the driver when there's room for more data. If we have 1030 * more packets to send, we send them here. 1031 * 1032 */ 1033 static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) 1034 { 1035 int actual = 0; 1036 int iobase; 1037 1038 IRDA_ASSERT(self != NULL, return;); 1039 1040 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 1041 1042 iobase = self->io.sir_base; 1043 1044 /* Finished with frame? */ 1045 if (self->tx_buff.len > 0) 1046 { 1047 /* Write data left in transmit buffer */ 1048 actual = ali_ircc_sir_write(iobase, self->io.fifo_size, 1049 self->tx_buff.data, self->tx_buff.len); 1050 self->tx_buff.data += actual; 1051 self->tx_buff.len -= actual; 1052 } 1053 else 1054 { 1055 if (self->new_speed) 1056 { 1057 /* We must wait until all data are gone */ 1058 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT)) 1059 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ ); 1060 1061 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed); 1062 ali_ircc_change_speed(self, self->new_speed); 1063 self->new_speed = 0; 1064 1065 // benjamin 2000/11/10 06:32PM 1066 if (self->io.speed > 115200) 1067 { 1068 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ ); 1069 1070 self->ier = IER_EOM; 1071 // SetCOMInterrupts(self, TRUE); 1072 return; 1073 } 1074 } 1075 else 1076 { 1077 netif_wake_queue(self->netdev); 1078 } 1079 1080 self->netdev->stats.tx_packets++; 1081 1082 /* Turn on receive interrupts */ 1083 outb(UART_IER_RDI, iobase+UART_IER); 1084 } 1085 1086 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 1087 } 1088 1089 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) 1090 { 1091 struct net_device *dev = self->netdev; 1092 int iobase; 1093 1094 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 1095 1096 IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud); 1097 1098 /* This function *must* be called with irq off and spin-lock. 1099 * - Jean II */ 1100 1101 iobase = self->io.fir_base; 1102 1103 SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM 1104 1105 /* Go to MIR, FIR Speed */ 1106 if (baud > 115200) 1107 { 1108 1109 1110 ali_ircc_fir_change_speed(self, baud); 1111 1112 /* Install FIR xmit handler*/ 1113 dev->netdev_ops = &ali_ircc_fir_ops; 1114 1115 /* Enable Interuupt */ 1116 self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM 1117 1118 /* Be ready for incoming frames */ 1119 ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete 1120 } 1121 /* Go to SIR Speed */ 1122 else 1123 { 1124 ali_ircc_sir_change_speed(self, baud); 1125 1126 /* Install SIR xmit handler*/ 1127 dev->netdev_ops = &ali_ircc_sir_ops; 1128 } 1129 1130 1131 SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM 1132 1133 netif_wake_queue(self->netdev); 1134 1135 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 1136 } 1137 1138 static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) 1139 { 1140 1141 int iobase; 1142 struct ali_ircc_cb *self = priv; 1143 struct net_device *dev; 1144 1145 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 1146 1147 IRDA_ASSERT(self != NULL, return;); 1148 1149 dev = self->netdev; 1150 iobase = self->io.fir_base; 1151 1152 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud); 1153 1154 /* Come from SIR speed */ 1155 if(self->io.speed <=115200) 1156 { 1157 SIR2FIR(iobase); 1158 } 1159 1160 /* Update accounting for new speed */ 1161 self->io.speed = baud; 1162 1163 // Set Dongle Speed mode 1164 ali_ircc_change_dongle_speed(self, baud); 1165 1166 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1167 } 1168 1169 /* 1170 * Function ali_sir_change_speed (self, speed) 1171 * 1172 * Set speed of IrDA port to specified baudrate 1173 * 1174 */ 1175 static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) 1176 { 1177 struct ali_ircc_cb *self = priv; 1178 unsigned long flags; 1179 int iobase; 1180 int fcr; /* FIFO control reg */ 1181 int lcr; /* Line control reg */ 1182 int divisor; 1183 1184 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 1185 1186 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed); 1187 1188 IRDA_ASSERT(self != NULL, return;); 1189 1190 iobase = self->io.sir_base; 1191 1192 /* Come from MIR or FIR speed */ 1193 if(self->io.speed >115200) 1194 { 1195 // Set Dongle Speed mode first 1196 ali_ircc_change_dongle_speed(self, speed); 1197 1198 FIR2SIR(iobase); 1199 } 1200 1201 // Clear Line and Auxiluary status registers 2000/11/24 11:47AM 1202 1203 inb(iobase+UART_LSR); 1204 inb(iobase+UART_SCR); 1205 1206 /* Update accounting for new speed */ 1207 self->io.speed = speed; 1208 1209 spin_lock_irqsave(&self->lock, flags); 1210 1211 divisor = 115200/speed; 1212 1213 fcr = UART_FCR_ENABLE_FIFO; 1214 1215 /* 1216 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and 1217 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget 1218 * about this timeout since it will always be fast enough. 1219 */ 1220 if (self->io.speed < 38400) 1221 fcr |= UART_FCR_TRIGGER_1; 1222 else 1223 fcr |= UART_FCR_TRIGGER_14; 1224 1225 /* IrDA ports use 8N1 */ 1226 lcr = UART_LCR_WLEN8; 1227 1228 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */ 1229 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */ 1230 outb(divisor >> 8, iobase+UART_DLM); 1231 outb(lcr, iobase+UART_LCR); /* Set 8N1 */ 1232 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */ 1233 1234 /* without this, the connection will be broken after come back from FIR speed, 1235 but with this, the SIR connection is harder to established */ 1236 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); 1237 1238 spin_unlock_irqrestore(&self->lock, flags); 1239 1240 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1241 } 1242 1243 static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1244 { 1245 1246 struct ali_ircc_cb *self = priv; 1247 int iobase,dongle_id; 1248 int tmp = 0; 1249 1250 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 1251 1252 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ 1253 dongle_id = self->io.dongle_id; 1254 1255 /* We are already locked, no need to do it again */ 1256 1257 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed); 1258 1259 switch_bank(iobase, BANK2); 1260 tmp = inb(iobase+FIR_IRDA_CR); 1261 1262 /* IBM type dongle */ 1263 if(dongle_id == 0) 1264 { 1265 if(speed == 4000000) 1266 { 1267 // __ __ 1268 // SD/MODE __| |__ __ 1269 // __ __ 1270 // IRTX __ __| |__ 1271 // T1 T2 T3 T4 T5 1272 1273 tmp &= ~IRDA_CR_HDLC; // HDLC=0 1274 tmp |= IRDA_CR_CRC; // CRC=1 1275 1276 switch_bank(iobase, BANK2); 1277 outb(tmp, iobase+FIR_IRDA_CR); 1278 1279 // T1 -> SD/MODE:0 IRTX:0 1280 tmp &= ~0x09; 1281 tmp |= 0x02; 1282 outb(tmp, iobase+FIR_IRDA_CR); 1283 udelay(2); 1284 1285 // T2 -> SD/MODE:1 IRTX:0 1286 tmp &= ~0x01; 1287 tmp |= 0x0a; 1288 outb(tmp, iobase+FIR_IRDA_CR); 1289 udelay(2); 1290 1291 // T3 -> SD/MODE:1 IRTX:1 1292 tmp |= 0x0b; 1293 outb(tmp, iobase+FIR_IRDA_CR); 1294 udelay(2); 1295 1296 // T4 -> SD/MODE:0 IRTX:1 1297 tmp &= ~0x08; 1298 tmp |= 0x03; 1299 outb(tmp, iobase+FIR_IRDA_CR); 1300 udelay(2); 1301 1302 // T5 -> SD/MODE:0 IRTX:0 1303 tmp &= ~0x09; 1304 tmp |= 0x02; 1305 outb(tmp, iobase+FIR_IRDA_CR); 1306 udelay(2); 1307 1308 // reset -> Normal TX output Signal 1309 outb(tmp & ~0x02, iobase+FIR_IRDA_CR); 1310 } 1311 else /* speed <=1152000 */ 1312 { 1313 // __ 1314 // SD/MODE __| |__ 1315 // 1316 // IRTX ________ 1317 // T1 T2 T3 1318 1319 /* MIR 115200, 57600 */ 1320 if (speed==1152000) 1321 { 1322 tmp |= 0xA0; //HDLC=1, 1.152Mbps=1 1323 } 1324 else 1325 { 1326 tmp &=~0x80; //HDLC 0.576Mbps 1327 tmp |= 0x20; //HDLC=1, 1328 } 1329 1330 tmp |= IRDA_CR_CRC; // CRC=1 1331 1332 switch_bank(iobase, BANK2); 1333 outb(tmp, iobase+FIR_IRDA_CR); 1334 1335 /* MIR 115200, 57600 */ 1336 1337 //switch_bank(iobase, BANK2); 1338 // T1 -> SD/MODE:0 IRTX:0 1339 tmp &= ~0x09; 1340 tmp |= 0x02; 1341 outb(tmp, iobase+FIR_IRDA_CR); 1342 udelay(2); 1343 1344 // T2 -> SD/MODE:1 IRTX:0 1345 tmp &= ~0x01; 1346 tmp |= 0x0a; 1347 outb(tmp, iobase+FIR_IRDA_CR); 1348 1349 // T3 -> SD/MODE:0 IRTX:0 1350 tmp &= ~0x09; 1351 tmp |= 0x02; 1352 outb(tmp, iobase+FIR_IRDA_CR); 1353 udelay(2); 1354 1355 // reset -> Normal TX output Signal 1356 outb(tmp & ~0x02, iobase+FIR_IRDA_CR); 1357 } 1358 } 1359 else if (dongle_id == 1) /* HP HDSL-3600 */ 1360 { 1361 switch(speed) 1362 { 1363 case 4000000: 1364 tmp &= ~IRDA_CR_HDLC; // HDLC=0 1365 break; 1366 1367 case 1152000: 1368 tmp |= 0xA0; // HDLC=1, 1.152Mbps=1 1369 break; 1370 1371 case 576000: 1372 tmp &=~0x80; // HDLC 0.576Mbps 1373 tmp |= 0x20; // HDLC=1, 1374 break; 1375 } 1376 1377 tmp |= IRDA_CR_CRC; // CRC=1 1378 1379 switch_bank(iobase, BANK2); 1380 outb(tmp, iobase+FIR_IRDA_CR); 1381 } 1382 else /* HP HDSL-1100 */ 1383 { 1384 if(speed <= 115200) /* SIR */ 1385 { 1386 1387 tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0 1388 1389 switch_bank(iobase, BANK2); 1390 outb(tmp, iobase+FIR_IRDA_CR); 1391 } 1392 else /* MIR FIR */ 1393 { 1394 1395 switch(speed) 1396 { 1397 case 4000000: 1398 tmp &= ~IRDA_CR_HDLC; // HDLC=0 1399 break; 1400 1401 case 1152000: 1402 tmp |= 0xA0; // HDLC=1, 1.152Mbps=1 1403 break; 1404 1405 case 576000: 1406 tmp &=~0x80; // HDLC 0.576Mbps 1407 tmp |= 0x20; // HDLC=1, 1408 break; 1409 } 1410 1411 tmp |= IRDA_CR_CRC; // CRC=1 1412 tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1 1413 1414 switch_bank(iobase, BANK2); 1415 outb(tmp, iobase+FIR_IRDA_CR); 1416 } 1417 } 1418 1419 switch_bank(iobase, BANK0); 1420 1421 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1422 } 1423 1424 /* 1425 * Function ali_ircc_sir_write (driver) 1426 * 1427 * Fill Tx FIFO with transmit data 1428 * 1429 */ 1430 static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) 1431 { 1432 int actual = 0; 1433 1434 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 1435 1436 /* Tx FIFO should be empty! */ 1437 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { 1438 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ ); 1439 return 0; 1440 } 1441 1442 /* Fill FIFO with current frame */ 1443 while ((fifo_size-- > 0) && (actual < len)) { 1444 /* Transmit next byte */ 1445 outb(buf[actual], iobase+UART_TX); 1446 1447 actual++; 1448 } 1449 1450 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 1451 return actual; 1452 } 1453 1454 /* 1455 * Function ali_ircc_net_open (dev) 1456 * 1457 * Start the device 1458 * 1459 */ 1460 static int ali_ircc_net_open(struct net_device *dev) 1461 { 1462 struct ali_ircc_cb *self; 1463 int iobase; 1464 char hwname[32]; 1465 1466 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 1467 1468 IRDA_ASSERT(dev != NULL, return -1;); 1469 1470 self = netdev_priv(dev); 1471 1472 IRDA_ASSERT(self != NULL, return 0;); 1473 1474 iobase = self->io.fir_base; 1475 1476 /* Request IRQ and install Interrupt Handler */ 1477 if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) 1478 { 1479 IRDA_WARNING("%s, unable to allocate irq=%d\n", 1480 ALI_IRCC_DRIVER_NAME, 1481 self->io.irq); 1482 return -EAGAIN; 1483 } 1484 1485 /* 1486 * Always allocate the DMA channel after the IRQ, and clean up on 1487 * failure. 1488 */ 1489 if (request_dma(self->io.dma, dev->name)) { 1490 IRDA_WARNING("%s, unable to allocate dma=%d\n", 1491 ALI_IRCC_DRIVER_NAME, 1492 self->io.dma); 1493 free_irq(self->io.irq, dev); 1494 return -EAGAIN; 1495 } 1496 1497 /* Turn on interrups */ 1498 outb(UART_IER_RDI , iobase+UART_IER); 1499 1500 /* Ready to play! */ 1501 netif_start_queue(dev); //benjamin by irport 1502 1503 /* Give self a hardware name */ 1504 sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base); 1505 1506 /* 1507 * Open new IrLAP layer instance, now that everything should be 1508 * initialized properly 1509 */ 1510 self->irlap = irlap_open(dev, &self->qos, hwname); 1511 1512 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 1513 1514 return 0; 1515 } 1516 1517 /* 1518 * Function ali_ircc_net_close (dev) 1519 * 1520 * Stop the device 1521 * 1522 */ 1523 static int ali_ircc_net_close(struct net_device *dev) 1524 { 1525 1526 struct ali_ircc_cb *self; 1527 //int iobase; 1528 1529 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ ); 1530 1531 IRDA_ASSERT(dev != NULL, return -1;); 1532 1533 self = netdev_priv(dev); 1534 IRDA_ASSERT(self != NULL, return 0;); 1535 1536 /* Stop device */ 1537 netif_stop_queue(dev); 1538 1539 /* Stop and remove instance of IrLAP */ 1540 if (self->irlap) 1541 irlap_close(self->irlap); 1542 self->irlap = NULL; 1543 1544 disable_dma(self->io.dma); 1545 1546 /* Disable interrupts */ 1547 SetCOMInterrupts(self, FALSE); 1548 1549 free_irq(self->io.irq, dev); 1550 free_dma(self->io.dma); 1551 1552 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 1553 1554 return 0; 1555 } 1556 1557 /* 1558 * Function ali_ircc_fir_hard_xmit (skb, dev) 1559 * 1560 * Transmit the frame 1561 * 1562 */ 1563 static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, 1564 struct net_device *dev) 1565 { 1566 struct ali_ircc_cb *self; 1567 unsigned long flags; 1568 int iobase; 1569 __u32 speed; 1570 int mtt, diff; 1571 1572 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); 1573 1574 self = netdev_priv(dev); 1575 iobase = self->io.fir_base; 1576 1577 netif_stop_queue(dev); 1578 1579 /* Make sure tests *& speed change are atomic */ 1580 spin_lock_irqsave(&self->lock, flags); 1581 1582 /* Note : you should make sure that speed changes are not going 1583 * to corrupt any outgoing frame. Look at nsc-ircc for the gory 1584 * details - Jean II */ 1585 1586 /* Check if we need to change the speed */ 1587 speed = irda_get_next_speed(skb); 1588 if ((speed != self->io.speed) && (speed != -1)) { 1589 /* Check for empty frame */ 1590 if (!skb->len) { 1591 ali_ircc_change_speed(self, speed); 1592 dev->trans_start = jiffies; 1593 spin_unlock_irqrestore(&self->lock, flags); 1594 dev_kfree_skb(skb); 1595 return NETDEV_TX_OK; 1596 } else 1597 self->new_speed = speed; 1598 } 1599 1600 /* Register and copy this frame to DMA memory */ 1601 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; 1602 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 1603 self->tx_fifo.tail += skb->len; 1604 1605 dev->stats.tx_bytes += skb->len; 1606 1607 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, 1608 skb->len); 1609 self->tx_fifo.len++; 1610 self->tx_fifo.free++; 1611 1612 /* Start transmit only if there is currently no transmit going on */ 1613 if (self->tx_fifo.len == 1) 1614 { 1615 /* Check if we must wait the min turn time or not */ 1616 mtt = irda_get_mtt(skb); 1617 1618 if (mtt) 1619 { 1620 /* Check how much time we have used already */ 1621 do_gettimeofday(&self->now); 1622 1623 diff = self->now.tv_usec - self->stamp.tv_usec; 1624 /* self->stamp is set from ali_ircc_dma_receive_complete() */ 1625 1626 IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff); 1627 1628 if (diff < 0) 1629 diff += 1000000; 1630 1631 /* Check if the mtt is larger than the time we have 1632 * already used by all the protocol processing 1633 */ 1634 if (mtt > diff) 1635 { 1636 mtt -= diff; 1637 1638 /* 1639 * Use timer if delay larger than 1000 us, and 1640 * use udelay for smaller values which should 1641 * be acceptable 1642 */ 1643 if (mtt > 500) 1644 { 1645 /* Adjust for timer resolution */ 1646 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ 1647 1648 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt); 1649 1650 /* Setup timer */ 1651 if (mtt == 1) /* 500 us */ 1652 { 1653 switch_bank(iobase, BANK1); 1654 outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); 1655 } 1656 else if (mtt == 2) /* 1 ms */ 1657 { 1658 switch_bank(iobase, BANK1); 1659 outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR); 1660 } 1661 else /* > 2ms -> 4ms */ 1662 { 1663 switch_bank(iobase, BANK1); 1664 outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR); 1665 } 1666 1667 1668 /* Start timer */ 1669 outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR); 1670 self->io.direction = IO_XMIT; 1671 1672 /* Enable timer interrupt */ 1673 self->ier = IER_TIMER; 1674 SetCOMInterrupts(self, TRUE); 1675 1676 /* Timer will take care of the rest */ 1677 goto out; 1678 } 1679 else 1680 udelay(mtt); 1681 } // if (if (mtt > diff) 1682 }// if (mtt) 1683 1684 /* Enable EOM interrupt */ 1685 self->ier = IER_EOM; 1686 SetCOMInterrupts(self, TRUE); 1687 1688 /* Transmit frame */ 1689 ali_ircc_dma_xmit(self); 1690 } // if (self->tx_fifo.len == 1) 1691 1692 out: 1693 1694 /* Not busy transmitting anymore if window is not full */ 1695 if (self->tx_fifo.free < MAX_TX_WINDOW) 1696 netif_wake_queue(self->netdev); 1697 1698 /* Restore bank register */ 1699 switch_bank(iobase, BANK0); 1700 1701 dev->trans_start = jiffies; 1702 spin_unlock_irqrestore(&self->lock, flags); 1703 dev_kfree_skb(skb); 1704 1705 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1706 return NETDEV_TX_OK; 1707 } 1708 1709 1710 static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) 1711 { 1712 int iobase, tmp; 1713 unsigned char FIFO_OPTI, Hi, Lo; 1714 1715 1716 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); 1717 1718 iobase = self->io.fir_base; 1719 1720 /* FIFO threshold , this method comes from NDIS5 code */ 1721 1722 if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold) 1723 FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1; 1724 else 1725 FIFO_OPTI = TX_FIFO_Threshold; 1726 1727 /* Disable DMA */ 1728 switch_bank(iobase, BANK1); 1729 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR); 1730 1731 self->io.direction = IO_XMIT; 1732 1733 irda_setup_dma(self->io.dma, 1734 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - 1735 self->tx_buff.head) + self->tx_buff_dma, 1736 self->tx_fifo.queue[self->tx_fifo.ptr].len, 1737 DMA_TX_MODE); 1738 1739 /* Reset Tx FIFO */ 1740 switch_bank(iobase, BANK0); 1741 outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A); 1742 1743 /* Set Tx FIFO threshold */ 1744 if (self->fifo_opti_buf!=FIFO_OPTI) 1745 { 1746 switch_bank(iobase, BANK1); 1747 outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ; 1748 self->fifo_opti_buf=FIFO_OPTI; 1749 } 1750 1751 /* Set Tx DMA threshold */ 1752 switch_bank(iobase, BANK1); 1753 outb(TX_DMA_Threshold, iobase+FIR_DMA_TR); 1754 1755 /* Set max Tx frame size */ 1756 Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f; 1757 Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff; 1758 switch_bank(iobase, BANK2); 1759 outb(Hi, iobase+FIR_TX_DSR_HI); 1760 outb(Lo, iobase+FIR_TX_DSR_LO); 1761 1762 /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */ 1763 switch_bank(iobase, BANK0); 1764 tmp = inb(iobase+FIR_LCR_B); 1765 tmp &= ~0x20; // Disable SIP 1766 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); 1767 IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); 1768 1769 outb(0, iobase+FIR_LSR); 1770 1771 /* Enable DMA and Burst Mode */ 1772 switch_bank(iobase, BANK1); 1773 outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); 1774 1775 switch_bank(iobase, BANK0); 1776 1777 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1778 } 1779 1780 static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) 1781 { 1782 int iobase; 1783 int ret = TRUE; 1784 1785 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); 1786 1787 iobase = self->io.fir_base; 1788 1789 /* Disable DMA */ 1790 switch_bank(iobase, BANK1); 1791 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR); 1792 1793 /* Check for underrun! */ 1794 switch_bank(iobase, BANK0); 1795 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT) 1796 1797 { 1798 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__); 1799 self->netdev->stats.tx_errors++; 1800 self->netdev->stats.tx_fifo_errors++; 1801 } 1802 else 1803 { 1804 self->netdev->stats.tx_packets++; 1805 } 1806 1807 /* Check if we need to change the speed */ 1808 if (self->new_speed) 1809 { 1810 ali_ircc_change_speed(self, self->new_speed); 1811 self->new_speed = 0; 1812 } 1813 1814 /* Finished with this frame, so prepare for next */ 1815 self->tx_fifo.ptr++; 1816 self->tx_fifo.len--; 1817 1818 /* Any frames to be sent back-to-back? */ 1819 if (self->tx_fifo.len) 1820 { 1821 ali_ircc_dma_xmit(self); 1822 1823 /* Not finished yet! */ 1824 ret = FALSE; 1825 } 1826 else 1827 { /* Reset Tx FIFO info */ 1828 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; 1829 self->tx_fifo.tail = self->tx_buff.head; 1830 } 1831 1832 /* Make sure we have room for more frames */ 1833 if (self->tx_fifo.free < MAX_TX_WINDOW) { 1834 /* Not busy transmitting anymore */ 1835 /* Tell the network layer, that we can accept more frames */ 1836 netif_wake_queue(self->netdev); 1837 } 1838 1839 switch_bank(iobase, BANK0); 1840 1841 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1842 return ret; 1843 } 1844 1845 /* 1846 * Function ali_ircc_dma_receive (self) 1847 * 1848 * Get ready for receiving a frame. The device will initiate a DMA 1849 * if it starts to receive a frame. 1850 * 1851 */ 1852 static int ali_ircc_dma_receive(struct ali_ircc_cb *self) 1853 { 1854 int iobase, tmp; 1855 1856 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); 1857 1858 iobase = self->io.fir_base; 1859 1860 /* Reset Tx FIFO info */ 1861 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; 1862 self->tx_fifo.tail = self->tx_buff.head; 1863 1864 /* Disable DMA */ 1865 switch_bank(iobase, BANK1); 1866 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR); 1867 1868 /* Reset Message Count */ 1869 switch_bank(iobase, BANK0); 1870 outb(0x07, iobase+FIR_LSR); 1871 1872 self->rcvFramesOverflow = FALSE; 1873 1874 self->LineStatus = inb(iobase+FIR_LSR) ; 1875 1876 /* Reset Rx FIFO info */ 1877 self->io.direction = IO_RECV; 1878 self->rx_buff.data = self->rx_buff.head; 1879 1880 /* Reset Rx FIFO */ 1881 // switch_bank(iobase, BANK0); 1882 outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A); 1883 1884 self->st_fifo.len = self->st_fifo.pending_bytes = 0; 1885 self->st_fifo.tail = self->st_fifo.head = 0; 1886 1887 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, 1888 DMA_RX_MODE); 1889 1890 /* Set Receive Mode,Brick Wall */ 1891 //switch_bank(iobase, BANK0); 1892 tmp = inb(iobase+FIR_LCR_B); 1893 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM 1894 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); 1895 1896 /* Set Rx Threshold */ 1897 switch_bank(iobase, BANK1); 1898 outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR); 1899 outb(RX_DMA_Threshold, iobase+FIR_DMA_TR); 1900 1901 /* Enable DMA and Burst Mode */ 1902 // switch_bank(iobase, BANK1); 1903 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); 1904 1905 switch_bank(iobase, BANK0); 1906 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 1907 return 0; 1908 } 1909 1910 static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) 1911 { 1912 struct st_fifo *st_fifo; 1913 struct sk_buff *skb; 1914 __u8 status, MessageCount; 1915 int len, i, iobase, val; 1916 1917 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); 1918 1919 st_fifo = &self->st_fifo; 1920 iobase = self->io.fir_base; 1921 1922 switch_bank(iobase, BANK0); 1923 MessageCount = inb(iobase+ FIR_LSR)&0x07; 1924 1925 if (MessageCount > 0) 1926 IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount); 1927 1928 for (i=0; i<=MessageCount; i++) 1929 { 1930 /* Bank 0 */ 1931 switch_bank(iobase, BANK0); 1932 status = inb(iobase+FIR_LSR); 1933 1934 switch_bank(iobase, BANK2); 1935 len = inb(iobase+FIR_RX_DSR_HI) & 0x0f; 1936 len = len << 8; 1937 len |= inb(iobase+FIR_RX_DSR_LO); 1938 1939 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len); 1940 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status); 1941 1942 if (st_fifo->tail >= MAX_RX_WINDOW) { 1943 IRDA_DEBUG(0, "%s(), window is full!\n", __func__ ); 1944 continue; 1945 } 1946 1947 st_fifo->entries[st_fifo->tail].status = status; 1948 st_fifo->entries[st_fifo->tail].len = len; 1949 st_fifo->pending_bytes += len; 1950 st_fifo->tail++; 1951 st_fifo->len++; 1952 } 1953 1954 for (i=0; i<=MessageCount; i++) 1955 { 1956 /* Get first entry */ 1957 status = st_fifo->entries[st_fifo->head].status; 1958 len = st_fifo->entries[st_fifo->head].len; 1959 st_fifo->pending_bytes -= len; 1960 st_fifo->head++; 1961 st_fifo->len--; 1962 1963 /* Check for errors */ 1964 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) 1965 { 1966 IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ ); 1967 1968 /* Skip frame */ 1969 self->netdev->stats.rx_errors++; 1970 1971 self->rx_buff.data += len; 1972 1973 if (status & LSR_FIFO_UR) 1974 { 1975 self->netdev->stats.rx_frame_errors++; 1976 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ ); 1977 } 1978 if (status & LSR_FRAME_ERROR) 1979 { 1980 self->netdev->stats.rx_frame_errors++; 1981 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ ); 1982 } 1983 1984 if (status & LSR_CRC_ERROR) 1985 { 1986 self->netdev->stats.rx_crc_errors++; 1987 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ ); 1988 } 1989 1990 if(self->rcvFramesOverflow) 1991 { 1992 self->netdev->stats.rx_frame_errors++; 1993 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ ); 1994 } 1995 if(len == 0) 1996 { 1997 self->netdev->stats.rx_frame_errors++; 1998 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ ); 1999 } 2000 } 2001 else 2002 { 2003 2004 if (st_fifo->pending_bytes < 32) 2005 { 2006 switch_bank(iobase, BANK0); 2007 val = inb(iobase+FIR_BSR); 2008 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) 2009 { 2010 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ ); 2011 2012 /* Put this entry back in fifo */ 2013 st_fifo->head--; 2014 st_fifo->len++; 2015 st_fifo->pending_bytes += len; 2016 st_fifo->entries[st_fifo->head].status = status; 2017 st_fifo->entries[st_fifo->head].len = len; 2018 2019 /* 2020 * DMA not finished yet, so try again 2021 * later, set timer value, resolution 2022 * 500 us 2023 */ 2024 2025 switch_bank(iobase, BANK1); 2026 outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM 2027 2028 /* Enable Timer */ 2029 outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR); 2030 2031 return FALSE; /* I'll be back! */ 2032 } 2033 } 2034 2035 /* 2036 * Remember the time we received this frame, so we can 2037 * reduce the min turn time a bit since we will know 2038 * how much time we have used for protocol processing 2039 */ 2040 do_gettimeofday(&self->stamp); 2041 2042 skb = dev_alloc_skb(len+1); 2043 if (skb == NULL) 2044 { 2045 IRDA_WARNING("%s(), memory squeeze, " 2046 "dropping frame.\n", 2047 __func__); 2048 self->netdev->stats.rx_dropped++; 2049 2050 return FALSE; 2051 } 2052 2053 /* Make sure IP header gets aligned */ 2054 skb_reserve(skb, 1); 2055 2056 /* Copy frame without CRC, CRC is removed by hardware*/ 2057 skb_put(skb, len); 2058 skb_copy_to_linear_data(skb, self->rx_buff.data, len); 2059 2060 /* Move to next frame */ 2061 self->rx_buff.data += len; 2062 self->netdev->stats.rx_bytes += len; 2063 self->netdev->stats.rx_packets++; 2064 2065 skb->dev = self->netdev; 2066 skb_reset_mac_header(skb); 2067 skb->protocol = htons(ETH_P_IRDA); 2068 netif_rx(skb); 2069 } 2070 } 2071 2072 switch_bank(iobase, BANK0); 2073 2074 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 2075 return TRUE; 2076 } 2077 2078 2079 2080 /* 2081 * Function ali_ircc_sir_hard_xmit (skb, dev) 2082 * 2083 * Transmit the frame! 2084 * 2085 */ 2086 static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, 2087 struct net_device *dev) 2088 { 2089 struct ali_ircc_cb *self; 2090 unsigned long flags; 2091 int iobase; 2092 __u32 speed; 2093 2094 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 2095 2096 IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); 2097 2098 self = netdev_priv(dev); 2099 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); 2100 2101 iobase = self->io.sir_base; 2102 2103 netif_stop_queue(dev); 2104 2105 /* Make sure tests *& speed change are atomic */ 2106 spin_lock_irqsave(&self->lock, flags); 2107 2108 /* Note : you should make sure that speed changes are not going 2109 * to corrupt any outgoing frame. Look at nsc-ircc for the gory 2110 * details - Jean II */ 2111 2112 /* Check if we need to change the speed */ 2113 speed = irda_get_next_speed(skb); 2114 if ((speed != self->io.speed) && (speed != -1)) { 2115 /* Check for empty frame */ 2116 if (!skb->len) { 2117 ali_ircc_change_speed(self, speed); 2118 dev->trans_start = jiffies; 2119 spin_unlock_irqrestore(&self->lock, flags); 2120 dev_kfree_skb(skb); 2121 return NETDEV_TX_OK; 2122 } else 2123 self->new_speed = speed; 2124 } 2125 2126 /* Init tx buffer */ 2127 self->tx_buff.data = self->tx_buff.head; 2128 2129 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ 2130 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 2131 self->tx_buff.truesize); 2132 2133 self->netdev->stats.tx_bytes += self->tx_buff.len; 2134 2135 /* Turn on transmit finished interrupt. Will fire immediately! */ 2136 outb(UART_IER_THRI, iobase+UART_IER); 2137 2138 dev->trans_start = jiffies; 2139 spin_unlock_irqrestore(&self->lock, flags); 2140 2141 dev_kfree_skb(skb); 2142 2143 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 2144 2145 return NETDEV_TX_OK; 2146 } 2147 2148 2149 /* 2150 * Function ali_ircc_net_ioctl (dev, rq, cmd) 2151 * 2152 * Process IOCTL commands for this device 2153 * 2154 */ 2155 static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2156 { 2157 struct if_irda_req *irq = (struct if_irda_req *) rq; 2158 struct ali_ircc_cb *self; 2159 unsigned long flags; 2160 int ret = 0; 2161 2162 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 2163 2164 IRDA_ASSERT(dev != NULL, return -1;); 2165 2166 self = netdev_priv(dev); 2167 2168 IRDA_ASSERT(self != NULL, return -1;); 2169 2170 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); 2171 2172 switch (cmd) { 2173 case SIOCSBANDWIDTH: /* Set bandwidth */ 2174 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ ); 2175 /* 2176 * This function will also be used by IrLAP to change the 2177 * speed, so we still must allow for speed change within 2178 * interrupt context. 2179 */ 2180 if (!in_interrupt() && !capable(CAP_NET_ADMIN)) 2181 return -EPERM; 2182 2183 spin_lock_irqsave(&self->lock, flags); 2184 ali_ircc_change_speed(self, irq->ifr_baudrate); 2185 spin_unlock_irqrestore(&self->lock, flags); 2186 break; 2187 case SIOCSMEDIABUSY: /* Set media busy */ 2188 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ ); 2189 if (!capable(CAP_NET_ADMIN)) 2190 return -EPERM; 2191 irda_device_set_media_busy(self->netdev, TRUE); 2192 break; 2193 case SIOCGRECEIVING: /* Check if we are receiving right now */ 2194 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ ); 2195 /* This is protected */ 2196 irq->ifr_receiving = ali_ircc_is_receiving(self); 2197 break; 2198 default: 2199 ret = -EOPNOTSUPP; 2200 } 2201 2202 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 2203 2204 return ret; 2205 } 2206 2207 /* 2208 * Function ali_ircc_is_receiving (self) 2209 * 2210 * Return TRUE is we are currently receiving a frame 2211 * 2212 */ 2213 static int ali_ircc_is_receiving(struct ali_ircc_cb *self) 2214 { 2215 unsigned long flags; 2216 int status = FALSE; 2217 int iobase; 2218 2219 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ ); 2220 2221 IRDA_ASSERT(self != NULL, return FALSE;); 2222 2223 spin_lock_irqsave(&self->lock, flags); 2224 2225 if (self->io.speed > 115200) 2226 { 2227 iobase = self->io.fir_base; 2228 2229 switch_bank(iobase, BANK1); 2230 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0) 2231 { 2232 /* We are receiving something */ 2233 IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ ); 2234 status = TRUE; 2235 } 2236 switch_bank(iobase, BANK0); 2237 } 2238 else 2239 { 2240 status = (self->rx_buff.state != OUTSIDE_FRAME); 2241 } 2242 2243 spin_unlock_irqrestore(&self->lock, flags); 2244 2245 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 2246 2247 return status; 2248 } 2249 2250 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state) 2251 { 2252 struct ali_ircc_cb *self = platform_get_drvdata(dev); 2253 2254 IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME); 2255 2256 if (self->io.suspended) 2257 return 0; 2258 2259 ali_ircc_net_close(self->netdev); 2260 2261 self->io.suspended = 1; 2262 2263 return 0; 2264 } 2265 2266 static int ali_ircc_resume(struct platform_device *dev) 2267 { 2268 struct ali_ircc_cb *self = platform_get_drvdata(dev); 2269 2270 if (!self->io.suspended) 2271 return 0; 2272 2273 ali_ircc_net_open(self->netdev); 2274 2275 IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME); 2276 2277 self->io.suspended = 0; 2278 2279 return 0; 2280 } 2281 2282 /* ALi Chip Function */ 2283 2284 static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable) 2285 { 2286 2287 unsigned char newMask; 2288 2289 int iobase = self->io.fir_base; /* or sir_base */ 2290 2291 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable); 2292 2293 /* Enable the interrupt which we wish to */ 2294 if (enable){ 2295 if (self->io.direction == IO_XMIT) 2296 { 2297 if (self->io.speed > 115200) /* FIR, MIR */ 2298 { 2299 newMask = self->ier; 2300 } 2301 else /* SIR */ 2302 { 2303 newMask = UART_IER_THRI | UART_IER_RDI; 2304 } 2305 } 2306 else { 2307 if (self->io.speed > 115200) /* FIR, MIR */ 2308 { 2309 newMask = self->ier; 2310 } 2311 else /* SIR */ 2312 { 2313 newMask = UART_IER_RDI; 2314 } 2315 } 2316 } 2317 else /* Disable all the interrupts */ 2318 { 2319 newMask = 0x00; 2320 2321 } 2322 2323 //SIR and FIR has different registers 2324 if (self->io.speed > 115200) 2325 { 2326 switch_bank(iobase, BANK0); 2327 outb(newMask, iobase+FIR_IER); 2328 } 2329 else 2330 outb(newMask, iobase+UART_IER); 2331 2332 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 2333 } 2334 2335 static void SIR2FIR(int iobase) 2336 { 2337 //unsigned char tmp; 2338 2339 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 2340 2341 /* Already protected (change_speed() or setup()), no need to lock. 2342 * Jean II */ 2343 2344 outb(0x28, iobase+UART_MCR); 2345 outb(0x68, iobase+UART_MCR); 2346 outb(0x88, iobase+UART_MCR); 2347 2348 outb(0x60, iobase+FIR_MCR); /* Master Reset */ 2349 outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */ 2350 2351 //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */ 2352 //tmp |= 0x20; 2353 //outb(tmp, iobase+FIR_LCR_B); 2354 2355 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 2356 } 2357 2358 static void FIR2SIR(int iobase) 2359 { 2360 unsigned char val; 2361 2362 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 2363 2364 /* Already protected (change_speed() or setup()), no need to lock. 2365 * Jean II */ 2366 2367 outb(0x20, iobase+FIR_MCR); /* IRQ to low */ 2368 outb(0x00, iobase+UART_IER); 2369 2370 outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */ 2371 outb(0x00, iobase+UART_FCR); 2372 outb(0x07, iobase+UART_FCR); 2373 2374 val = inb(iobase+UART_RX); 2375 val = inb(iobase+UART_LSR); 2376 val = inb(iobase+UART_MSR); 2377 2378 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); 2379 } 2380 2381 MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>"); 2382 MODULE_DESCRIPTION("ALi FIR Controller Driver"); 2383 MODULE_LICENSE("GPL"); 2384 MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME); 2385 2386 2387 module_param_array(io, int, NULL, 0); 2388 MODULE_PARM_DESC(io, "Base I/O addresses"); 2389 module_param_array(irq, int, NULL, 0); 2390 MODULE_PARM_DESC(irq, "IRQ lines"); 2391 module_param_array(dma, int, NULL, 0); 2392 MODULE_PARM_DESC(dma, "DMA channels"); 2393 2394 module_init(ali_ircc_init); 2395 module_exit(ali_ircc_cleanup); 2396 2397 2398 2399 2400 2401 /* LDV_COMMENT_BEGIN_MAIN */ 2402 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 2403 2404 /*###########################################################################*/ 2405 2406 /*############## Driver Environment Generator 0.2 output ####################*/ 2407 2408 /*###########################################################################*/ 2409 2410 2411 2412 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 2413 void ldv_check_final_state(void); 2414 2415 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 2416 void ldv_check_return_value(int res); 2417 2418 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 2419 void ldv_check_return_value_probe(int res); 2420 2421 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 2422 void ldv_initialize(void); 2423 2424 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 2425 void ldv_handler_precall(void); 2426 2427 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 2428 int nondet_int(void); 2429 2430 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 2431 int LDV_IN_INTERRUPT; 2432 2433 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 2434 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 2435 2436 2437 2438 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 2439 /*============================= VARIABLE DECLARATION PART =============================*/ 2440 /** STRUCT: struct type: platform_driver, struct name: ali_ircc_driver **/ 2441 /* content: static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)*/ 2442 /* LDV_COMMENT_BEGIN_PREP */ 2443 #define CHIP_IO_EXTENT 8 2444 #define BROKEN_DONGLE_ID 2445 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2446 /* LDV_COMMENT_END_PREP */ 2447 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_suspend" */ 2448 struct platform_device * var_group1; 2449 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_suspend" */ 2450 pm_message_t var_ali_ircc_suspend_29_p1; 2451 /* content: static int ali_ircc_resume(struct platform_device *dev)*/ 2452 /* LDV_COMMENT_BEGIN_PREP */ 2453 #define CHIP_IO_EXTENT 8 2454 #define BROKEN_DONGLE_ID 2455 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2456 /* LDV_COMMENT_END_PREP */ 2457 2458 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/ 2459 /* content: static int ali_ircc_net_open(struct net_device *dev)*/ 2460 /* LDV_COMMENT_BEGIN_PREP */ 2461 #define CHIP_IO_EXTENT 8 2462 #define BROKEN_DONGLE_ID 2463 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2464 /* LDV_COMMENT_END_PREP */ 2465 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_net_open" */ 2466 struct net_device * var_group2; 2467 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_open" */ 2468 static int res_ali_ircc_net_open_19; 2469 /* content: static int ali_ircc_net_close(struct net_device *dev)*/ 2470 /* LDV_COMMENT_BEGIN_PREP */ 2471 #define CHIP_IO_EXTENT 8 2472 #define BROKEN_DONGLE_ID 2473 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2474 /* LDV_COMMENT_END_PREP */ 2475 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_close" */ 2476 static int res_ali_ircc_net_close_20; 2477 /* content: static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/ 2478 /* LDV_COMMENT_BEGIN_PREP */ 2479 #define CHIP_IO_EXTENT 8 2480 #define BROKEN_DONGLE_ID 2481 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2482 /* LDV_COMMENT_END_PREP */ 2483 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_sir_hard_xmit" */ 2484 struct sk_buff * var_group3; 2485 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 2486 /* LDV_COMMENT_BEGIN_PREP */ 2487 #define CHIP_IO_EXTENT 8 2488 #define BROKEN_DONGLE_ID 2489 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2490 /* LDV_COMMENT_END_PREP */ 2491 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_net_ioctl" */ 2492 struct ifreq * var_group4; 2493 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_net_ioctl" */ 2494 int var_ali_ircc_net_ioctl_27_p2; 2495 2496 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/ 2497 /* content: static int ali_ircc_net_open(struct net_device *dev)*/ 2498 /* LDV_COMMENT_BEGIN_PREP */ 2499 #define CHIP_IO_EXTENT 8 2500 #define BROKEN_DONGLE_ID 2501 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2502 /* LDV_COMMENT_END_PREP */ 2503 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_open" */ 2504 2505 /* content: static int ali_ircc_net_close(struct net_device *dev)*/ 2506 /* LDV_COMMENT_BEGIN_PREP */ 2507 #define CHIP_IO_EXTENT 8 2508 #define BROKEN_DONGLE_ID 2509 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2510 /* LDV_COMMENT_END_PREP */ 2511 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_close" */ 2512 2513 /* content: static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/ 2514 /* LDV_COMMENT_BEGIN_PREP */ 2515 #define CHIP_IO_EXTENT 8 2516 #define BROKEN_DONGLE_ID 2517 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2518 /* LDV_COMMENT_END_PREP */ 2519 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 2520 /* LDV_COMMENT_BEGIN_PREP */ 2521 #define CHIP_IO_EXTENT 8 2522 #define BROKEN_DONGLE_ID 2523 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2524 /* LDV_COMMENT_END_PREP */ 2525 2526 /** CALLBACK SECTION request_irq **/ 2527 /* content: static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)*/ 2528 /* LDV_COMMENT_BEGIN_PREP */ 2529 #define CHIP_IO_EXTENT 8 2530 #define BROKEN_DONGLE_ID 2531 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2532 /* LDV_COMMENT_END_PREP */ 2533 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_interrupt" */ 2534 int var_ali_ircc_interrupt_9_p0; 2535 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_interrupt" */ 2536 void * var_ali_ircc_interrupt_9_p1; 2537 2538 2539 2540 2541 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 2542 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 2543 /*============================= VARIABLE INITIALIZING PART =============================*/ 2544 LDV_IN_INTERRUPT=1; 2545 2546 2547 2548 2549 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 2550 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 2551 /*============================= FUNCTION CALL SECTION =============================*/ 2552 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 2553 ldv_initialize(); 2554 2555 /** INIT: init_type: ST_MODULE_INIT **/ 2556 /* content: static int __init ali_ircc_init(void)*/ 2557 /* LDV_COMMENT_BEGIN_PREP */ 2558 #define CHIP_IO_EXTENT 8 2559 #define BROKEN_DONGLE_ID 2560 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2561 /* LDV_COMMENT_END_PREP */ 2562 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 2563 ldv_handler_precall(); 2564 if(ali_ircc_init()) 2565 goto ldv_final; 2566 int ldv_s_ali_ircc_driver_platform_driver = 0; 2567 2568 int ldv_s_ali_ircc_sir_ops_net_device_ops = 0; 2569 2570 2571 int ldv_s_ali_ircc_fir_ops_net_device_ops = 0; 2572 2573 2574 2575 2576 2577 while( nondet_int() 2578 || !(ldv_s_ali_ircc_driver_platform_driver == 0) 2579 || !(ldv_s_ali_ircc_sir_ops_net_device_ops == 0) 2580 || !(ldv_s_ali_ircc_fir_ops_net_device_ops == 0) 2581 ) { 2582 2583 switch(nondet_int()) { 2584 2585 case 0: { 2586 2587 /** STRUCT: struct type: platform_driver, struct name: ali_ircc_driver **/ 2588 if(ldv_s_ali_ircc_driver_platform_driver==0) { 2589 2590 /* content: static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)*/ 2591 /* LDV_COMMENT_BEGIN_PREP */ 2592 #define CHIP_IO_EXTENT 8 2593 #define BROKEN_DONGLE_ID 2594 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2595 /* LDV_COMMENT_END_PREP */ 2596 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "ali_ircc_driver" */ 2597 ldv_handler_precall(); 2598 ali_ircc_suspend( var_group1, var_ali_ircc_suspend_29_p1); 2599 ldv_s_ali_ircc_driver_platform_driver++; 2600 2601 } 2602 2603 } 2604 2605 break; 2606 case 1: { 2607 2608 /** STRUCT: struct type: platform_driver, struct name: ali_ircc_driver **/ 2609 if(ldv_s_ali_ircc_driver_platform_driver==1) { 2610 2611 /* content: static int ali_ircc_resume(struct platform_device *dev)*/ 2612 /* LDV_COMMENT_BEGIN_PREP */ 2613 #define CHIP_IO_EXTENT 8 2614 #define BROKEN_DONGLE_ID 2615 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2616 /* LDV_COMMENT_END_PREP */ 2617 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "ali_ircc_driver" */ 2618 ldv_handler_precall(); 2619 ali_ircc_resume( var_group1); 2620 ldv_s_ali_ircc_driver_platform_driver=0; 2621 2622 } 2623 2624 } 2625 2626 break; 2627 case 2: { 2628 2629 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/ 2630 if(ldv_s_ali_ircc_sir_ops_net_device_ops==0) { 2631 2632 /* content: static int ali_ircc_net_open(struct net_device *dev)*/ 2633 /* LDV_COMMENT_BEGIN_PREP */ 2634 #define CHIP_IO_EXTENT 8 2635 #define BROKEN_DONGLE_ID 2636 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2637 /* LDV_COMMENT_END_PREP */ 2638 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "ali_ircc_sir_ops". Standart function test for correct return result. */ 2639 ldv_handler_precall(); 2640 res_ali_ircc_net_open_19 = ali_ircc_net_open( var_group2); 2641 ldv_check_return_value(res_ali_ircc_net_open_19); 2642 if(res_ali_ircc_net_open_19 < 0) 2643 goto ldv_module_exit; 2644 ldv_s_ali_ircc_sir_ops_net_device_ops++; 2645 2646 } 2647 2648 } 2649 2650 break; 2651 case 3: { 2652 2653 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/ 2654 if(ldv_s_ali_ircc_sir_ops_net_device_ops==1) { 2655 2656 /* content: static int ali_ircc_net_close(struct net_device *dev)*/ 2657 /* LDV_COMMENT_BEGIN_PREP */ 2658 #define CHIP_IO_EXTENT 8 2659 #define BROKEN_DONGLE_ID 2660 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2661 /* LDV_COMMENT_END_PREP */ 2662 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "ali_ircc_sir_ops". Standart function test for correct return result. */ 2663 ldv_handler_precall(); 2664 res_ali_ircc_net_close_20 = ali_ircc_net_close( var_group2); 2665 ldv_check_return_value(res_ali_ircc_net_close_20); 2666 if(res_ali_ircc_net_close_20) 2667 goto ldv_module_exit; 2668 ldv_s_ali_ircc_sir_ops_net_device_ops=0; 2669 2670 } 2671 2672 } 2673 2674 break; 2675 case 4: { 2676 2677 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/ 2678 2679 2680 /* content: static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/ 2681 /* LDV_COMMENT_BEGIN_PREP */ 2682 #define CHIP_IO_EXTENT 8 2683 #define BROKEN_DONGLE_ID 2684 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2685 /* LDV_COMMENT_END_PREP */ 2686 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "ali_ircc_sir_ops" */ 2687 ldv_handler_precall(); 2688 ali_ircc_sir_hard_xmit( var_group3, var_group2); 2689 2690 2691 2692 2693 } 2694 2695 break; 2696 case 5: { 2697 2698 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/ 2699 2700 2701 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 2702 /* LDV_COMMENT_BEGIN_PREP */ 2703 #define CHIP_IO_EXTENT 8 2704 #define BROKEN_DONGLE_ID 2705 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2706 /* LDV_COMMENT_END_PREP */ 2707 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "ali_ircc_sir_ops" */ 2708 ldv_handler_precall(); 2709 ali_ircc_net_ioctl( var_group2, var_group4, var_ali_ircc_net_ioctl_27_p2); 2710 2711 2712 2713 2714 } 2715 2716 break; 2717 case 6: { 2718 2719 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/ 2720 if(ldv_s_ali_ircc_fir_ops_net_device_ops==0) { 2721 2722 /* content: static int ali_ircc_net_open(struct net_device *dev)*/ 2723 /* LDV_COMMENT_BEGIN_PREP */ 2724 #define CHIP_IO_EXTENT 8 2725 #define BROKEN_DONGLE_ID 2726 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2727 /* LDV_COMMENT_END_PREP */ 2728 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "ali_ircc_fir_ops". Standart function test for correct return result. */ 2729 ldv_handler_precall(); 2730 res_ali_ircc_net_open_19 = ali_ircc_net_open( var_group2); 2731 ldv_check_return_value(res_ali_ircc_net_open_19); 2732 if(res_ali_ircc_net_open_19 < 0) 2733 goto ldv_module_exit; 2734 ldv_s_ali_ircc_fir_ops_net_device_ops++; 2735 2736 } 2737 2738 } 2739 2740 break; 2741 case 7: { 2742 2743 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/ 2744 if(ldv_s_ali_ircc_fir_ops_net_device_ops==1) { 2745 2746 /* content: static int ali_ircc_net_close(struct net_device *dev)*/ 2747 /* LDV_COMMENT_BEGIN_PREP */ 2748 #define CHIP_IO_EXTENT 8 2749 #define BROKEN_DONGLE_ID 2750 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2751 /* LDV_COMMENT_END_PREP */ 2752 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "ali_ircc_fir_ops". Standart function test for correct return result. */ 2753 ldv_handler_precall(); 2754 res_ali_ircc_net_close_20 = ali_ircc_net_close( var_group2); 2755 ldv_check_return_value(res_ali_ircc_net_close_20); 2756 if(res_ali_ircc_net_close_20) 2757 goto ldv_module_exit; 2758 ldv_s_ali_ircc_fir_ops_net_device_ops=0; 2759 2760 } 2761 2762 } 2763 2764 break; 2765 case 8: { 2766 2767 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/ 2768 2769 2770 /* content: static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/ 2771 /* LDV_COMMENT_BEGIN_PREP */ 2772 #define CHIP_IO_EXTENT 8 2773 #define BROKEN_DONGLE_ID 2774 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2775 /* LDV_COMMENT_END_PREP */ 2776 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "ali_ircc_fir_ops" */ 2777 ldv_handler_precall(); 2778 ali_ircc_fir_hard_xmit( var_group3, var_group2); 2779 2780 2781 2782 2783 } 2784 2785 break; 2786 case 9: { 2787 2788 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/ 2789 2790 2791 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 2792 /* LDV_COMMENT_BEGIN_PREP */ 2793 #define CHIP_IO_EXTENT 8 2794 #define BROKEN_DONGLE_ID 2795 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2796 /* LDV_COMMENT_END_PREP */ 2797 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "ali_ircc_fir_ops" */ 2798 ldv_handler_precall(); 2799 ali_ircc_net_ioctl( var_group2, var_group4, var_ali_ircc_net_ioctl_27_p2); 2800 2801 2802 2803 2804 } 2805 2806 break; 2807 case 10: { 2808 2809 /** CALLBACK SECTION request_irq **/ 2810 LDV_IN_INTERRUPT=2; 2811 2812 /* content: static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)*/ 2813 /* LDV_COMMENT_BEGIN_PREP */ 2814 #define CHIP_IO_EXTENT 8 2815 #define BROKEN_DONGLE_ID 2816 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2817 /* LDV_COMMENT_END_PREP */ 2818 /* LDV_COMMENT_FUNCTION_CALL */ 2819 ldv_handler_precall(); 2820 ali_ircc_interrupt( var_ali_ircc_interrupt_9_p0, var_ali_ircc_interrupt_9_p1); 2821 LDV_IN_INTERRUPT=1; 2822 2823 2824 2825 } 2826 2827 break; 2828 default: break; 2829 2830 } 2831 2832 } 2833 2834 ldv_module_exit: 2835 2836 /** INIT: init_type: ST_MODULE_EXIT **/ 2837 /* content: static void __exit ali_ircc_cleanup(void)*/ 2838 /* LDV_COMMENT_BEGIN_PREP */ 2839 #define CHIP_IO_EXTENT 8 2840 #define BROKEN_DONGLE_ID 2841 #define ALI_IRCC_DRIVER_NAME "ali-ircc" 2842 /* LDV_COMMENT_END_PREP */ 2843 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 2844 ldv_handler_precall(); 2845 ali_ircc_cleanup(); 2846 2847 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 2848 ldv_final: ldv_check_final_state(); 2849 2850 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 2851 return; 2852 2853 } 2854 #endif 2855 2856 /* LDV_COMMENT_END_MAIN */
1 2 3 #include <linux/kernel.h> 4 #include <linux/spinlock.h> 5 6 #include <verifier/rcv.h> 7 8 static int ldv_spin_NOT_ARG_SIGN; 9 10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_NOT_ARG_SIGN') Check that spin 'NOT_ARG_SIGN' was not locked and lock it */ 11 void ldv_spin_lock_NOT_ARG_SIGN(void) 12 { 13 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be unlocked */ 14 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1); 15 /* LDV_COMMENT_CHANGE_STATE Lock spin 'NOT_ARG_SIGN' */ 16 ldv_spin_NOT_ARG_SIGN = 2; 17 } 18 19 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_NOT_ARG_SIGN') Check that spin 'NOT_ARG_SIGN' was locked and unlock it */ 20 void ldv_spin_unlock_NOT_ARG_SIGN(void) 21 { 22 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be locked */ 23 ldv_assert(ldv_spin_NOT_ARG_SIGN == 2); 24 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'NOT_ARG_SIGN' */ 25 ldv_spin_NOT_ARG_SIGN = 1; 26 } 27 28 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_NOT_ARG_SIGN') Check that spin 'NOT_ARG_SIGN' was not locked and nondeterministically lock it. Return 0 on fails */ 29 int ldv_spin_trylock_NOT_ARG_SIGN(void) 30 { 31 int is_spin_held_by_another_thread; 32 33 /* LDV_COMMENT_ASSERT It may be an error if spin 'NOT_ARG_SIGN' is locked at this point */ 34 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1); 35 36 /* LDV_COMMENT_OTHER Construct nondetermined result */ 37 is_spin_held_by_another_thread = ldv_undef_int(); 38 39 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'NOT_ARG_SIGN' */ 40 if (is_spin_held_by_another_thread) 41 { 42 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' was not locked. Finish with fail */ 43 return 0; 44 } 45 else 46 { 47 /* LDV_COMMENT_CHANGE_STATE Lock spin 'NOT_ARG_SIGN' */ 48 ldv_spin_NOT_ARG_SIGN = 2; 49 /* LDV_COMMENT_RETURN Finish with success */ 50 return 1; 51 } 52 } 53 54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_NOT_ARG_SIGN') The same process can not both lock spin 'NOT_ARG_SIGN' and wait until it will be unlocked */ 55 void ldv_spin_unlock_wait_NOT_ARG_SIGN(void) 56 { 57 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must not be locked by a current process */ 58 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1); 59 } 60 61 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_NOT_ARG_SIGN') Check whether spin 'NOT_ARG_SIGN' was locked */ 62 int ldv_spin_is_locked_NOT_ARG_SIGN(void) 63 { 64 int is_spin_held_by_another_thread; 65 66 /* LDV_COMMENT_OTHER Construct nondetermined result */ 67 is_spin_held_by_another_thread = ldv_undef_int(); 68 69 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'NOT_ARG_SIGN' was locked */ 70 if(ldv_spin_NOT_ARG_SIGN == 1 && !is_spin_held_by_another_thread) 71 { 72 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' was unlocked */ 73 return 0; 74 } 75 else 76 { 77 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' was locked */ 78 return 1; 79 } 80 } 81 82 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_NOT_ARG_SIGN') Check whether spin 'NOT_ARG_SIGN' was locked */ 83 int ldv_spin_can_lock_NOT_ARG_SIGN(void) 84 { 85 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */ 86 return !ldv_spin_is_locked_NOT_ARG_SIGN(); 87 } 88 89 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_NOT_ARG_SIGN') Check whether spin 'NOT_ARG_SIGN' is contended */ 90 int ldv_spin_is_contended_NOT_ARG_SIGN(void) 91 { 92 int is_spin_contended; 93 94 /* LDV_COMMENT_OTHER Construct nondetermined result */ 95 is_spin_contended = ldv_undef_int(); 96 97 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'NOT_ARG_SIGN' is contended */ 98 if(is_spin_contended) 99 { 100 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' is contended */ 101 return 0; 102 } 103 else 104 { 105 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' isn't contended */ 106 return 1; 107 } 108 } 109 110 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_NOT_ARG_SIGN') Lock spin 'NOT_ARG_SIGN' if atomic decrement result is zero */ 111 int ldv_atomic_dec_and_lock_NOT_ARG_SIGN(void) 112 { 113 int atomic_value_after_dec; 114 115 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be unlocked (since we may lock it in this function) */ 116 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1); 117 118 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 119 atomic_value_after_dec = ldv_undef_int(); 120 121 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 122 if (atomic_value_after_dec == 0) 123 { 124 /* LDV_COMMENT_CHANGE_STATE Lock spin 'NOT_ARG_SIGN', as atomic has decremented to zero */ 125 ldv_spin_NOT_ARG_SIGN = 2; 126 /* LDV_COMMENT_RETURN Return 1 with locked spin 'NOT_ARG_SIGN' */ 127 return 1; 128 } 129 130 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'NOT_ARG_SIGN' */ 131 return 0; 132 } 133 static int ldv_spin__xmit_lock_of_netdev_queue; 134 135 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was not locked and lock it */ 136 void ldv_spin_lock__xmit_lock_of_netdev_queue(void) 137 { 138 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked */ 139 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1); 140 /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue' */ 141 ldv_spin__xmit_lock_of_netdev_queue = 2; 142 } 143 144 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was locked and unlock it */ 145 void ldv_spin_unlock__xmit_lock_of_netdev_queue(void) 146 { 147 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be locked */ 148 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 2); 149 /* LDV_COMMENT_CHANGE_STATE Unlock spin '_xmit_lock_of_netdev_queue' */ 150 ldv_spin__xmit_lock_of_netdev_queue = 1; 151 } 152 153 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was not locked and nondeterministically lock it. Return 0 on fails */ 154 int ldv_spin_trylock__xmit_lock_of_netdev_queue(void) 155 { 156 int is_spin_held_by_another_thread; 157 158 /* LDV_COMMENT_ASSERT It may be an error if spin '_xmit_lock_of_netdev_queue' is locked at this point */ 159 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1); 160 161 /* LDV_COMMENT_OTHER Construct nondetermined result */ 162 is_spin_held_by_another_thread = ldv_undef_int(); 163 164 /* LDV_COMMENT_ASSERT Nondeterministically lock spin '_xmit_lock_of_netdev_queue' */ 165 if (is_spin_held_by_another_thread) 166 { 167 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was not locked. Finish with fail */ 168 return 0; 169 } 170 else 171 { 172 /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue' */ 173 ldv_spin__xmit_lock_of_netdev_queue = 2; 174 /* LDV_COMMENT_RETURN Finish with success */ 175 return 1; 176 } 177 } 178 179 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait__xmit_lock_of_netdev_queue') The same process can not both lock spin '_xmit_lock_of_netdev_queue' and wait until it will be unlocked */ 180 void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void) 181 { 182 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must not be locked by a current process */ 183 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1); 184 } 185 186 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' was locked */ 187 int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void) 188 { 189 int is_spin_held_by_another_thread; 190 191 /* LDV_COMMENT_OTHER Construct nondetermined result */ 192 is_spin_held_by_another_thread = ldv_undef_int(); 193 194 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin '_xmit_lock_of_netdev_queue' was locked */ 195 if(ldv_spin__xmit_lock_of_netdev_queue == 1 && !is_spin_held_by_another_thread) 196 { 197 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was unlocked */ 198 return 0; 199 } 200 else 201 { 202 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was locked */ 203 return 1; 204 } 205 } 206 207 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' was locked */ 208 int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void) 209 { 210 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */ 211 return !ldv_spin_is_locked__xmit_lock_of_netdev_queue(); 212 } 213 214 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' is contended */ 215 int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void) 216 { 217 int is_spin_contended; 218 219 /* LDV_COMMENT_OTHER Construct nondetermined result */ 220 is_spin_contended = ldv_undef_int(); 221 222 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin '_xmit_lock_of_netdev_queue' is contended */ 223 if(is_spin_contended) 224 { 225 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' is contended */ 226 return 0; 227 } 228 else 229 { 230 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' isn't contended */ 231 return 1; 232 } 233 } 234 235 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue') Lock spin '_xmit_lock_of_netdev_queue' if atomic decrement result is zero */ 236 int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void) 237 { 238 int atomic_value_after_dec; 239 240 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked (since we may lock it in this function) */ 241 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1); 242 243 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 244 atomic_value_after_dec = ldv_undef_int(); 245 246 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 247 if (atomic_value_after_dec == 0) 248 { 249 /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue', as atomic has decremented to zero */ 250 ldv_spin__xmit_lock_of_netdev_queue = 2; 251 /* LDV_COMMENT_RETURN Return 1 with locked spin '_xmit_lock_of_netdev_queue' */ 252 return 1; 253 } 254 255 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin '_xmit_lock_of_netdev_queue' */ 256 return 0; 257 } 258 static int ldv_spin_addr_list_lock_of_net_device; 259 260 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was not locked and lock it */ 261 void ldv_spin_lock_addr_list_lock_of_net_device(void) 262 { 263 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked */ 264 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1); 265 /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device' */ 266 ldv_spin_addr_list_lock_of_net_device = 2; 267 } 268 269 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was locked and unlock it */ 270 void ldv_spin_unlock_addr_list_lock_of_net_device(void) 271 { 272 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be locked */ 273 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 2); 274 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'addr_list_lock_of_net_device' */ 275 ldv_spin_addr_list_lock_of_net_device = 1; 276 } 277 278 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was not locked and nondeterministically lock it. Return 0 on fails */ 279 int ldv_spin_trylock_addr_list_lock_of_net_device(void) 280 { 281 int is_spin_held_by_another_thread; 282 283 /* LDV_COMMENT_ASSERT It may be an error if spin 'addr_list_lock_of_net_device' is locked at this point */ 284 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1); 285 286 /* LDV_COMMENT_OTHER Construct nondetermined result */ 287 is_spin_held_by_another_thread = ldv_undef_int(); 288 289 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'addr_list_lock_of_net_device' */ 290 if (is_spin_held_by_another_thread) 291 { 292 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was not locked. Finish with fail */ 293 return 0; 294 } 295 else 296 { 297 /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device' */ 298 ldv_spin_addr_list_lock_of_net_device = 2; 299 /* LDV_COMMENT_RETURN Finish with success */ 300 return 1; 301 } 302 } 303 304 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_addr_list_lock_of_net_device') The same process can not both lock spin 'addr_list_lock_of_net_device' and wait until it will be unlocked */ 305 void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void) 306 { 307 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must not be locked by a current process */ 308 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1); 309 } 310 311 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' was locked */ 312 int ldv_spin_is_locked_addr_list_lock_of_net_device(void) 313 { 314 int is_spin_held_by_another_thread; 315 316 /* LDV_COMMENT_OTHER Construct nondetermined result */ 317 is_spin_held_by_another_thread = ldv_undef_int(); 318 319 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'addr_list_lock_of_net_device' was locked */ 320 if(ldv_spin_addr_list_lock_of_net_device == 1 && !is_spin_held_by_another_thread) 321 { 322 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was unlocked */ 323 return 0; 324 } 325 else 326 { 327 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was locked */ 328 return 1; 329 } 330 } 331 332 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' was locked */ 333 int ldv_spin_can_lock_addr_list_lock_of_net_device(void) 334 { 335 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */ 336 return !ldv_spin_is_locked_addr_list_lock_of_net_device(); 337 } 338 339 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' is contended */ 340 int ldv_spin_is_contended_addr_list_lock_of_net_device(void) 341 { 342 int is_spin_contended; 343 344 /* LDV_COMMENT_OTHER Construct nondetermined result */ 345 is_spin_contended = ldv_undef_int(); 346 347 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'addr_list_lock_of_net_device' is contended */ 348 if(is_spin_contended) 349 { 350 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' is contended */ 351 return 0; 352 } 353 else 354 { 355 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' isn't contended */ 356 return 1; 357 } 358 } 359 360 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_addr_list_lock_of_net_device') Lock spin 'addr_list_lock_of_net_device' if atomic decrement result is zero */ 361 int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void) 362 { 363 int atomic_value_after_dec; 364 365 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked (since we may lock it in this function) */ 366 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1); 367 368 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 369 atomic_value_after_dec = ldv_undef_int(); 370 371 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 372 if (atomic_value_after_dec == 0) 373 { 374 /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device', as atomic has decremented to zero */ 375 ldv_spin_addr_list_lock_of_net_device = 2; 376 /* LDV_COMMENT_RETURN Return 1 with locked spin 'addr_list_lock_of_net_device' */ 377 return 1; 378 } 379 380 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'addr_list_lock_of_net_device' */ 381 return 0; 382 } 383 static int ldv_spin_alloc_lock_of_task_struct; 384 385 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and lock it */ 386 void ldv_spin_lock_alloc_lock_of_task_struct(void) 387 { 388 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked */ 389 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1); 390 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */ 391 ldv_spin_alloc_lock_of_task_struct = 2; 392 } 393 394 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was locked and unlock it */ 395 void ldv_spin_unlock_alloc_lock_of_task_struct(void) 396 { 397 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be locked */ 398 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 2); 399 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'alloc_lock_of_task_struct' */ 400 ldv_spin_alloc_lock_of_task_struct = 1; 401 } 402 403 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and nondeterministically lock it. Return 0 on fails */ 404 int ldv_spin_trylock_alloc_lock_of_task_struct(void) 405 { 406 int is_spin_held_by_another_thread; 407 408 /* LDV_COMMENT_ASSERT It may be an error if spin 'alloc_lock_of_task_struct' is locked at this point */ 409 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1); 410 411 /* LDV_COMMENT_OTHER Construct nondetermined result */ 412 is_spin_held_by_another_thread = ldv_undef_int(); 413 414 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'alloc_lock_of_task_struct' */ 415 if (is_spin_held_by_another_thread) 416 { 417 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was not locked. Finish with fail */ 418 return 0; 419 } 420 else 421 { 422 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */ 423 ldv_spin_alloc_lock_of_task_struct = 2; 424 /* LDV_COMMENT_RETURN Finish with success */ 425 return 1; 426 } 427 } 428 429 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_alloc_lock_of_task_struct') The same process can not both lock spin 'alloc_lock_of_task_struct' and wait until it will be unlocked */ 430 void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void) 431 { 432 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must not be locked by a current process */ 433 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1); 434 } 435 436 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */ 437 int ldv_spin_is_locked_alloc_lock_of_task_struct(void) 438 { 439 int is_spin_held_by_another_thread; 440 441 /* LDV_COMMENT_OTHER Construct nondetermined result */ 442 is_spin_held_by_another_thread = ldv_undef_int(); 443 444 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' was locked */ 445 if(ldv_spin_alloc_lock_of_task_struct == 1 && !is_spin_held_by_another_thread) 446 { 447 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was unlocked */ 448 return 0; 449 } 450 else 451 { 452 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was locked */ 453 return 1; 454 } 455 } 456 457 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */ 458 int ldv_spin_can_lock_alloc_lock_of_task_struct(void) 459 { 460 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */ 461 return !ldv_spin_is_locked_alloc_lock_of_task_struct(); 462 } 463 464 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' is contended */ 465 int ldv_spin_is_contended_alloc_lock_of_task_struct(void) 466 { 467 int is_spin_contended; 468 469 /* LDV_COMMENT_OTHER Construct nondetermined result */ 470 is_spin_contended = ldv_undef_int(); 471 472 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' is contended */ 473 if(is_spin_contended) 474 { 475 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' is contended */ 476 return 0; 477 } 478 else 479 { 480 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' isn't contended */ 481 return 1; 482 } 483 } 484 485 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_alloc_lock_of_task_struct') Lock spin 'alloc_lock_of_task_struct' if atomic decrement result is zero */ 486 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void) 487 { 488 int atomic_value_after_dec; 489 490 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked (since we may lock it in this function) */ 491 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1); 492 493 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ 494 atomic_value_after_dec = ldv_undef_int(); 495 496 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ 497 if (atomic_value_after_dec == 0) 498 { 499 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct', as atomic has decremented to zero */ 500 ldv_spin_alloc_lock_of_task_struct = 2; 501 /* LDV_COMMENT_RETURN Return 1 with locked spin 'alloc_lock_of_task_struct' */ 502 return 1; 503 } 504 505 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'alloc_lock_of_task_struct' */ 506 return 0; 507 } 508 static int ldv_spin_dma_spin_lock; 509 510 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_dma_spin_lock') Check that spin 'dma_spin_lock' was not locked and lock it */ 511 void ldv_spin_lock_dma_spin_lock(void) 512 { 513 /* LDV_COMMENT_ASSERT Spin 'dma_spin_lock' must be unlocked */ 514 ldv_assert(ldv_spin_dma_spin_lock == 1); 515 /* LDV_COMMENT_CHANGE_STATE Lock spin 'dma_spin_lock' */ 516 ldv_spin_dma_spin_lock = 2; 517 } 518 519 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_dma_spin_lock') Check that spin 'dma_spin_lock' was locked and unlock it */