Error Trace

[Home]

Bug # 176

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
19 typedef signed char __s8;
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
28 typedef __u16 __le16;
29 typedef __u16 __be16;
30 typedef __u32 __le32;
31 typedef __u32 __be32;
36 typedef __u32 __wsum;
291 struct kernel_symbol { unsigned long value; const char *name; } ;
34 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
115 typedef void (*ctor_fn_t)();
83 struct ctl_table ;
283 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
58 struct device ;
64 struct net_device ;
474 struct file_operations ;
486 struct completion ;
487 struct pt_regs ;
546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
131 struct timespec ;
132 struct compat_timespec ;
133 struct pollfd ;
134 struct __anonstruct_futex_27 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
134 struct __anonstruct_nanosleep_28 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
134 struct __anonstruct_poll_29 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
134 union __anonunion____missing_field_name_26 { struct __anonstruct_futex_27 futex; struct __anonstruct_nanosleep_28 nanosleep; struct __anonstruct_poll_29 poll; } ;
134 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_26 __annonCompField4; } ;
50 struct task_struct ;
39 struct page ;
26 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_32 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_33 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_31 { struct __anonstruct____missing_field_name_32 __annonCompField5; struct __anonstruct____missing_field_name_33 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_31 __annonCompField7; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
15 typedef unsigned long pudval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_34 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_34 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_35 pgd_t;
276 struct __anonstruct_pud_t_36 { pudval_t pud; } ;
276 typedef struct __anonstruct_pud_t_36 pud_t;
297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_37 pmd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
445 struct seq_file ;
481 struct thread_struct ;
483 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
83 struct static_key { atomic_t enabled; } ;
26 union __anonunion___u_42 { int __val; char __c[1U]; } ;
38 union __anonunion___u_44 { int __val; char __c[1U]; } ;
23 typedef atomic64_t atomic_long_t;
359 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
657 typedef struct cpumask *cpumask_var_t;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
233 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField13; struct __anonstruct____missing_field_name_62 __annonCompField14; } ;
26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; union fpregs_state state; } ;
181 struct seq_operations ;
415 struct perf_event ;
420 struct __anonstruct_mm_segment_t_75 { unsigned long seg; } ;
420 typedef struct __anonstruct_mm_segment_t_75 mm_segment_t;
421 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
48 struct thread_info { unsigned long flags; } ;
33 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
593 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_77 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_76 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_77 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_76 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_78 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_78 rwlock_t;
408 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_93 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_93 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_94 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_94 kuid_t;
27 struct __anonstruct_kgid_t_95 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_95 kgid_t;
139 struct kstat { u32 result_mask; umode_t mode; unsigned int nlink; uint32_t blksize; u64 attributes; u64 ino; dev_t dev; dev_t rdev; kuid_t uid; kgid_t gid; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; struct timespec btime; u64 blocks; } ;
48 struct vm_area_struct ;
39 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
44 typedef struct __wait_queue_head wait_queue_head_t;
97 struct __anonstruct_nodemask_t_96 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_96 nodemask_t;
247 typedef unsigned int isolate_mode_t;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct ww_acquire_ctx ;
40 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ;
72 struct mutex_waiter { struct list_head list; struct task_struct *task; struct ww_acquire_ctx *ww_ctx; void *magic; } ;
229 struct rw_semaphore ;
230 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
28 typedef s64 ktime_t;
1109 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; struct lockdep_map lockdep_map; } ;
211 struct hrtimer ;
212 enum hrtimer_restart ;
235 struct workqueue_struct ;
236 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
268 struct notifier_block ;
53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
70 struct raw_notifier_head { struct notifier_block *head; } ;
217 struct resource ;
68 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
236 struct pci_dev ;
144 struct pci_bus ;
38 struct ldt_struct ;
38 struct vdso_image ;
38 struct __anonstruct_mm_context_t_161 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ;
38 typedef struct __anonstruct_mm_context_t_161 mm_context_t;
34 struct bio_vec ;
1266 struct llist_node ;
69 struct llist_node { struct llist_node *next; } ;
551 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
61 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
100 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
121 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
126 struct __anonstruct____missing_field_name_208 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
126 union __anonunion____missing_field_name_207 { struct __anonstruct____missing_field_name_208 __annonCompField31; struct callback_head rcu; } ;
126 struct ctl_table_set ;
126 struct ctl_table_header { union __anonunion____missing_field_name_207 __annonCompField32; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; struct list_head inodes; } ;
148 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
154 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
159 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
37 struct cred ;
19 struct vmacache { u32 seqnum; struct vm_area_struct *vmas[4U]; } ;
41 struct task_rss_stat { int events; int count[4U]; } ;
49 struct mm_rss_stat { atomic_long_t count[4U]; } ;
54 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
61 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
85 struct completion { unsigned int done; wait_queue_head_t wait; } ;
108 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_215 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_216 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_214 { struct __anonstruct____missing_field_name_215 __annonCompField35; struct __anonstruct____missing_field_name_216 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_214 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
95 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
111 struct xol_area ;
112 struct uprobes_state { struct xol_area *xol_area; } ;
151 struct address_space ;
152 struct mem_cgroup ;
153 union __anonunion____missing_field_name_217 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
153 union __anonunion____missing_field_name_218 { unsigned long index; void *freelist; } ;
153 struct __anonstruct____missing_field_name_222 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
153 union __anonunion____missing_field_name_221 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_222 __annonCompField40; int units; } ;
153 struct __anonstruct____missing_field_name_220 { union __anonunion____missing_field_name_221 __annonCompField41; atomic_t _refcount; } ;
153 union __anonunion____missing_field_name_219 { unsigned long counters; struct __anonstruct____missing_field_name_220 __annonCompField42; } ;
153 struct dev_pagemap ;
153 struct __anonstruct____missing_field_name_224 { struct page *next; int pages; int pobjects; } ;
153 struct __anonstruct____missing_field_name_225 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
153 struct __anonstruct____missing_field_name_226 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
153 union __anonunion____missing_field_name_223 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_224 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_225 __annonCompField45; struct __anonstruct____missing_field_name_226 __annonCompField46; } ;
153 struct kmem_cache ;
153 union __anonunion____missing_field_name_227 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
153 struct page { unsigned long flags; union __anonunion____missing_field_name_217 __annonCompField38; union __anonunion____missing_field_name_218 __annonCompField39; union __anonunion____missing_field_name_219 __annonCompField43; union __anonunion____missing_field_name_223 __annonCompField47; union __anonunion____missing_field_name_227 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
266 struct userfaultfd_ctx ;
266 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
273 struct __anonstruct_shared_228 { struct rb_node rb; unsigned long rb_subtree_last; } ;
273 struct anon_vma ;
273 struct vm_operations_struct ;
273 struct mempolicy ;
273 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_228 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
346 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
351 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
357 struct kioctx_table ;
358 struct linux_binfmt ;
358 struct mmu_notifier_mm ;
358 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
544 struct vm_fault ;
598 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
65 struct radix_tree_root ;
65 union __anonunion____missing_field_name_233 { struct list_head private_list; struct callback_head callback_head; } ;
65 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; struct radix_tree_root *root; union __anonunion____missing_field_name_233 __annonCompField49; void *slots[64U]; unsigned long tags[3U][1U]; } ;
107 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
566 struct idr { struct radix_tree_root idr_rt; unsigned int idr_next; } ;
176 struct ida { struct radix_tree_root ida_rt; } ;
216 struct dentry ;
217 struct iattr ;
218 struct super_block ;
219 struct file_system_type ;
220 struct kernfs_open_node ;
221 struct kernfs_iattrs ;
245 struct kernfs_root ;
245 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
86 struct kernfs_node ;
86 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
90 struct kernfs_ops ;
90 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
97 union __anonunion____missing_field_name_242 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
97 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_242 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
139 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
158 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
174 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct seq_file *seq_file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; bool released; const struct vm_operations_struct *vm_ops; } ;
194 struct kernfs_ops { int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
521 struct sock ;
522 struct kobject ;
523 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
529 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct refcount_struct { atomic_t refs; } ;
11 typedef struct refcount_struct refcount_t;
41 struct kref { refcount_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_245 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_245 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
38 struct exception_table_entry ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
49 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
276 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
283 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
288 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
304 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
318 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
326 struct module_sect_attrs ;
326 struct module_notes_attrs ;
326 struct trace_event_call ;
326 struct trace_enum_map ;
326 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const s32 *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; const struct kernel_symbol *unused_syms; const s32 *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const s32 *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const s32 *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
33 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
64 struct irq_domain ;
440 struct proc_dir_entry ;
133 struct exception_table_entry { int insn; int fixup; int handler; } ;
61 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; } ;
113 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
146 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
506 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
13 typedef unsigned long kernel_ulong_t;
14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ;
187 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
230 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
676 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_311 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_310 { struct __anonstruct____missing_field_name_311 __annonCompField60; } ;
114 struct lockref { union __anonunion____missing_field_name_310 __annonCompField61; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_313 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_312 { struct __anonstruct____missing_field_name_313 __annonCompField62; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_312 __annonCompField63; const unsigned char *name; } ;
66 struct dentry_operations ;
66 union __anonunion____missing_field_name_314 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
66 union __anonunion_d_u_315 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
66 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_314 __annonCompField64; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_315 d_u; } ;
122 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
593 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
189 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
196 struct pid_namespace ;
196 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 struct rcuwait { struct task_struct *task; } ;
32 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
38 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; struct rcuwait writer; int readers_block; } ;
144 struct delayed_call { void (*fn)(void *); void *arg; } ;
283 struct backing_dev_info ;
284 struct bdi_writeback ;
286 struct export_operations ;
288 struct iovec ;
289 struct kiocb ;
290 struct pipe_inode_info ;
291 struct poll_table_struct ;
292 struct kstatfs ;
293 struct swap_info_struct ;
294 struct iov_iter ;
295 struct fscrypt_info ;
296 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
210 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_317 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_317 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_318 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_318 __annonCompField65; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
540 struct writeback_control ;
541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
317 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
376 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ;
398 struct request_queue ;
399 struct hd_struct ;
399 struct gendisk ;
399 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct backing_dev_info *bd_bdi; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
515 struct posix_acl ;
542 struct inode_operations ;
542 union __anonunion____missing_field_name_323 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
542 union __anonunion____missing_field_name_324 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
542 struct file_lock_context ;
542 struct cdev ;
542 union __anonunion____missing_field_name_325 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
542 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_323 __annonCompField66; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_324 __annonCompField67; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_325 __annonCompField68; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
803 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
811 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
834 union __anonunion_f_u_326 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
834 struct file { union __anonunion_f_u_326 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
919 typedef void *fl_owner_t;
920 struct file_lock ;
921 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
927 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
948 struct net ;
954 struct nlm_lockowner ;
955 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_328 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_327 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_328 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_327 fl_u; } ;
1007 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1074 struct files_struct ;
1227 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1262 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1292 struct super_operations ;
1292 struct xattr_handler ;
1292 struct mtd_info ;
1292 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1579 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1592 struct dir_context ;
1617 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1624 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1692 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(const struct path *, struct kstat *, u32 , unsigned int); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1771 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2014 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3219 struct assoc_array_ptr ;
3219 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct user_struct ;
37 struct signal_struct ;
38 struct key_type ;
42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_329 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_330 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_332 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_331 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_332 __annonCompField71; } ;
128 struct __anonstruct____missing_field_name_334 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_333 { union key_payload payload; struct __anonstruct____missing_field_name_334 __annonCompField73; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_329 __annonCompField69; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_330 __annonCompField70; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_331 __annonCompField72; union __anonunion____missing_field_name_333 __annonCompField74; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
380 struct audit_context ;
26 struct sem_undo_list ;
26 struct sysv_sem { struct sem_undo_list *undo_list; } ;
26 struct sysv_shm { struct list_head shm_clist; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
11 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
24 struct __anonstruct_sigset_t_335 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_335 sigset_t;
25 struct siginfo ;
38 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_337 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_338 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_339 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_340 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_343 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_342 { struct __anonstruct__addr_bnd_343 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_341 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_342 __annonCompField75; } ;
11 struct __anonstruct__sigpoll_344 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_345 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_336 { int _pad[28U]; struct __anonstruct__kill_337 _kill; struct __anonstruct__timer_338 _timer; struct __anonstruct__rt_339 _rt; struct __anonstruct__sigchld_340 _sigchld; struct __anonstruct__sigfault_341 _sigfault; struct __anonstruct__sigpoll_344 _sigpoll; struct __anonstruct__sigsys_345 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_336 _sifields; } ;
118 typedef struct siginfo siginfo_t;
21 struct sigpending { struct list_head list; sigset_t signal; } ;
65 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct bio_list ;
46 struct blk_plug ;
47 struct cfs_rq ;
48 struct fs_struct ;
49 struct futex_pi_state ;
50 struct io_context ;
51 struct nameidata ;
52 struct perf_event_context ;
54 struct reclaim_state ;
55 struct robust_list_head ;
58 struct sighand_struct ;
59 struct task_delay_info ;
60 struct task_group ;
187 struct prev_cputime { u64 utime; u64 stime; raw_spinlock_t lock; } ;
203 struct task_cputime { u64 utime; u64 stime; unsigned long long sum_exec_runtime; } ;
220 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
244 struct load_weight { unsigned long weight; u32 inv_weight; } ;
261 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
322 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
357 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
393 struct rt_rq ;
393 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
411 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
478 struct wake_q_node { struct wake_q_node *next; } ;
482 struct sched_class ;
482 struct rt_mutex_waiter ;
482 struct css_set ;
482 struct compat_robust_list_head ;
482 struct numa_group ;
482 struct kcov ;
482 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; struct vmacache vmacache; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; u64 utime; u64 stime; u64 gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; u64 acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; int closid; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ;
1562 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
60 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ;
86 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
369 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
200 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
315 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
322 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
330 struct wakeup_source ;
331 struct wake_irq ;
332 struct pm_domain_data ;
333 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
551 struct dev_pm_qos ;
551 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool in_dpm_list; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
613 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
76 struct dev_archdata { void *iommu; } ;
8 struct dma_map_ops ;
21 struct device_private ;
22 struct device_driver ;
23 struct driver_private ;
24 struct class ;
25 struct subsys_private ;
26 struct bus_type ;
27 struct device_node ;
28 struct fwnode_handle ;
29 struct iommu_ops ;
30 struct iommu_group ;
31 struct iommu_fwspec ;
62 struct device_attribute ;
62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); int (*num_vf)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
147 struct device_type ;
206 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
212 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
362 struct class_attribute ;
362 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
457 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
527 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
555 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
727 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
790 enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3 } ;
797 struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; } ;
817 struct dma_coherent_mem ;
817 struct cma ;
817 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; const struct dma_map_ops *dma_ops; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ;
976 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
70 struct hotplug_slot ;
70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ;
108 typedef int pci_power_t;
135 typedef unsigned int pci_channel_state_t;
136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ;
161 typedef unsigned short pci_dev_flags_t;
188 typedef unsigned short pci_bus_flags_t;
246 struct pcie_link_state ;
247 struct pci_vpd ;
248 struct pci_sriov ;
250 struct pci_driver ;
250 union __anonunion____missing_field_name_361 { struct pci_sriov *sriov; struct pci_dev *physfn; } ;
250 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u16 aer_cap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned char hotplug_user_indicators; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; unsigned char ptm_root; unsigned char ptm_enabled; u8 ptm_granularity; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_361 __annonCompField80; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ;
419 struct pci_ops ;
419 struct msi_controller ;
482 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ;
606 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;
636 struct pci_dynids { spinlock_t lock; struct list_head list; } ;
650 typedef unsigned int pci_ers_result_t;
660 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ;
690 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ;
1270 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
277 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pud_t *pud; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
340 enum page_entry_size { PE_SIZE_PTE = 0, PE_SIZE_PMD = 1, PE_SIZE_PUD = 2 } ;
346 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_fault *); int (*huge_fault)(struct vm_fault *, enum page_entry_size ); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_fault *); int (*pfn_mkwrite)(struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
1357 struct kvec ;
2513 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
56 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
29 union __anonunion____missing_field_name_374 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; struct pipe_inode_info *pipe; } ;
29 union __anonunion____missing_field_name_375 { unsigned long nr_segs; int idx; } ;
29 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_374 __annonCompField81; union __anonunion____missing_field_name_375 __annonCompField82; } ;
1437 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
11 typedef unsigned short __kernel_sa_family_t;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
43 struct __anonstruct_sync_serial_settings_377 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_377 sync_serial_settings;
50 struct __anonstruct_te1_settings_378 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_378 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_379 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_379 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_380 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_380 fr_proto;
69 struct __anonstruct_fr_proto_pvc_381 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_381 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_382 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_382 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_383 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_383 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
201 union __anonunion_ifs_ifsu_384 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
201 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_384 ifs_ifsu; } ;
220 union __anonunion_ifr_ifrn_385 { char ifrn_name[16U]; } ;
220 union __anonunion_ifr_ifru_386 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
220 struct ifreq { union __anonunion_ifr_ifrn_385 ifr_ifrn; union __anonunion_ifr_ifru_386 ifr_ifru; } ;
18 typedef s32 compat_time_t;
39 typedef s32 compat_long_t;
45 typedef u32 compat_uptr_t;
46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ;
278 struct compat_robust_list { compat_uptr_t next; } ;
282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
126 struct sk_buff ;
161 struct in6_addr ;
15 typedef u64 netdev_features_t;
99 union __anonunion_in6_u_412 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ;
99 struct in6_addr { union __anonunion_in6_u_412 in6_u; } ;
46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
246 struct pipe_buf_operations ;
246 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ;
27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ;
63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;
255 union __anonunion____missing_field_name_426 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ;
255 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_426 __annonCompField91; } ;
279 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
501 typedef unsigned int sk_buff_data_t;
502 struct __anonstruct____missing_field_name_429 { u32 stamp_us; u32 stamp_jiffies; } ;
502 union __anonunion____missing_field_name_428 { u64 v64; struct __anonstruct____missing_field_name_429 __annonCompField92; } ;
502 struct skb_mstamp { union __anonunion____missing_field_name_428 __annonCompField93; } ;
565 union __anonunion____missing_field_name_432 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
565 struct __anonstruct____missing_field_name_431 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_432 __annonCompField94; } ;
565 union __anonunion____missing_field_name_430 { struct __anonstruct____missing_field_name_431 __annonCompField95; struct rb_node rbnode; } ;
565 union __anonunion____missing_field_name_433 { struct net_device *dev; unsigned long dev_scratch; } ;
565 struct sec_path ;
565 struct __anonstruct____missing_field_name_435 { __u16 csum_start; __u16 csum_offset; } ;
565 union __anonunion____missing_field_name_434 { __wsum csum; struct __anonstruct____missing_field_name_435 __annonCompField98; } ;
565 union __anonunion____missing_field_name_436 { unsigned int napi_id; unsigned int sender_cpu; } ;
565 union __anonunion____missing_field_name_437 { __u32 mark; __u32 reserved_tailroom; } ;
565 union __anonunion____missing_field_name_438 { __be16 inner_protocol; __u8 inner_ipproto; } ;
565 struct sk_buff { union __anonunion____missing_field_name_430 __annonCompField96; struct sock *sk; union __anonunion____missing_field_name_433 __annonCompField97; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; unsigned long _nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0U]; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; unsigned char __unused; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char dst_pending_confirm; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; unsigned char offload_fwd_mark; unsigned char tc_skip_classify; unsigned char tc_at_ingress; unsigned char tc_redirected; unsigned char tc_from_ingress; __u16 tc_index; union __anonunion____missing_field_name_434 __annonCompField99; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_436 __annonCompField100; __u32 secmark; union __anonunion____missing_field_name_437 __annonCompField101; union __anonunion____missing_field_name_438 __annonCompField102; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
852 struct dst_entry ;
39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
130 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
194 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
238 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ;
256 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
285 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
311 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
340 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
357 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
456 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
493 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
521 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
627 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
659 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
701 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
734 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
750 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
770 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ;
788 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ;
804 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ;
820 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
837 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
856 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
906 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
1077 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
1085 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1161 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
1539 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ;
39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
97 struct __anonstruct_link_modes_442 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ;
97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_442 link_modes; } ;
158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;
375 struct prot_inuse ;
376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
164 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[9U]; } ;
106 struct linux_mib { unsigned long mibs[119U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ;
181 struct ipv4_devconf ;
182 struct fib_rules_ops ;
183 struct fib_table ;
184 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ;
24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
29 struct inet_hashinfo ;
30 struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_max_tw_buckets; } ;
39 struct inet_peer_base ;
39 struct xt_table ;
39 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_tcp_tw_reuse; struct inet_timewait_death_row tcp_death_row; int sysctl_max_syn_backlog; int sysctl_udp_l3mdev_accept; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; int sysctl_ip_prot_sock; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; unsigned int fib_seq; atomic_t rt_genid; } ;
162 struct neighbour ;
162 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); void (*confirm_neigh)(const struct dst_entry *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
68 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ;
40 struct ipv6_devconf ;
40 struct rt6_info ;
40 struct rt6_statistics ;
40 struct fib6_table ;
40 struct seg6_pernet_data ;
40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; } ;
90 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
96 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ;
20 struct sctp_mib ;
21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int reconf_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
144 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
78 struct nf_logger ;
79 struct nf_queue_handler ;
80 struct nf_hook_entry ;
80 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entry *hooks[13U][8U]; bool defrag_ipv4; bool defrag_ipv6; } ;
26 struct ebt_table ;
27 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ;
19 struct hlist_nulls_node ;
19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
115 struct ip_conntrack_stat { unsigned int found; unsigned int invalid; unsigned int ignore; unsigned int insert; unsigned int insert_failed; unsigned int drop; unsigned int early_drop; unsigned int error; unsigned int expect_new; unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; } ;
13 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; unsigned int users; } ;
27 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
32 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
46 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
51 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
56 struct nf_dccp_net { struct nf_proto_net pn; int dccp_loose; unsigned int dccp_timeout[10U]; } ;
63 struct nf_sctp_net { struct nf_proto_net pn; unsigned int timeouts[10U]; } ;
70 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct nf_dccp_net dccp; struct nf_sctp_net sctp; } ;
84 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ;
91 struct nf_ct_event_notifier ;
91 struct nf_exp_event_notifier ;
91 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; } ;
122 struct nft_af_info ;
123 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ;
509 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct hlist_node node; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ;
21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ;
30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
87 struct mpls_route ;
88 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ;
16 struct proc_ns_operations ;
17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ;
11 struct net_generic ;
12 struct netns_ipvs ;
13 struct ucounts ;
13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; struct ucounts *ucounts; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
248 struct __anonstruct_possible_net_t_454 { struct net *net; } ;
248 typedef struct __anonstruct_possible_net_t_454 possible_net_t;
383 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_ACPI_STATIC = 4, FWNODE_PDATA = 5, FWNODE_IRQCHIP = 6 } ;
393 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
32 typedef u32 phandle;
34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ;
44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ;
1292 struct phy_device ;
1293 struct fixed_phy_status ;
1294 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_PROTO_QCA = 5, DSA_TAG_LAST = 6 } ;
1304 struct dsa_chip_data { struct device *host_dev; int sw_addr; struct device *netdev[12U]; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ;
80 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ;
96 struct packet_type ;
97 struct dsa_switch ;
97 struct dsa_device_ops ;
97 struct dsa_switch_tree { struct list_head list; struct raw_notifier_head nh; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; struct dsa_switch *cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ;
157 struct dsa_mall_mirror_tc_entry { u8 to_local_port; bool ingress; } ;
174 struct dsa_port { struct dsa_switch *ds; unsigned int index; const char *name; struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; struct net_device *bridge_dev; } ;
186 struct dsa_switch_ops ;
186 struct mii_bus ;
186 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; struct notifier_block nb; void *priv; struct dsa_chip_data *cd; const struct dsa_switch_ops *ops; s8 rtable[4U]; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct mii_bus *slave_mii_bus; size_t num_ports; struct dsa_port ports[]; } ;
271 struct switchdev_trans ;
272 struct switchdev_obj ;
273 struct switchdev_obj_port_fdb ;
274 struct switchdev_obj_port_mdb ;
275 struct switchdev_obj_port_vlan ;
287 struct dsa_switch_ops { const char * (*probe)(struct device *, struct device *, int, void **); enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int, struct net_device *); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); void (*port_fast_age)(struct dsa_switch *, int); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); int (*get_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *); int (*port_mirror_add)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *, bool ); void (*port_mirror_del)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *); } ;
468 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ;
132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ;
144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
58 struct mnt_namespace ;
59 struct uts_namespace ;
60 struct ipc_namespace ;
61 struct cgroup_namespace ;
62 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ;
86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ;
22 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ;
36 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; int ucount_max[9U]; } ;
70 struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_t ucount[9U]; } ;
635 struct cgroup ;
14 struct bpf_prog ;
14 struct cgroup_bpf { struct bpf_prog *prog[3U]; struct bpf_prog *effective[3U]; bool disallow_override[3U]; } ;
43 struct cgroup_root ;
44 struct cgroup_subsys ;
45 struct cgroup_taskset ;
90 struct cgroup_file { struct kernfs_node *kn; } ;
91 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
142 struct css_set { struct cgroup_subsys_state *subsys[14U]; atomic_t refcount; struct cgroup *dfl_cgrp; struct list_head tasks; struct list_head mg_tasks; struct list_head task_iters; struct list_head e_cset_node[14U]; struct hlist_node hlist; struct list_head cgrp_links; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; bool dead; struct callback_head callback_head; } ;
222 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[14U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[14U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct cgroup_bpf bpf; int ancestor_ids[]; } ;
310 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
349 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
437 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
631 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; } ;
686 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
42 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
144 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ;
872 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ;
16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; } ;
118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ;
96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ;
117 struct netpoll_info ;
118 struct wireless_dev ;
119 struct wpan_dev ;
120 struct mpls_dev ;
121 struct udp_tunnel_info ;
70 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ;
113 typedef enum netdev_tx netdev_tx_t;
132 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
196 struct neigh_parms ;
217 struct netdev_hw_addr_list { struct list_head list; int count; } ;
222 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
251 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ;
360 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
408 typedef enum rx_handler_result rx_handler_result_t;
409 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
530 struct Qdisc ;
530 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ;
601 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
613 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
625 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
677 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
700 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
713 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
724 struct netdev_tc_txq { u16 count; u16 offset; } ;
735 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
751 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ;
779 struct tc_cls_u32_offload ;
780 struct tc_cls_flower_offload ;
780 struct tc_cls_matchall_offload ;
780 struct tc_cls_bpf_offload ;
780 union __anonunion____missing_field_name_485 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; } ;
780 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_485 __annonCompField116; bool egress_dev; } ;
797 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ;
802 union __anonunion____missing_field_name_486 { struct bpf_prog *prog; bool prog_attached; } ;
802 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_486 __annonCompField117; } ;
825 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(const struct net_device *, int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;
1360 struct __anonstruct_adj_list_487 { struct list_head upper; struct list_head lower; } ;
1360 struct iw_handler_def ;
1360 struct iw_public_data ;
1360 struct switchdev_ops ;
1360 struct l3mdev_ops ;
1360 struct ndisc_ops ;
1360 struct vlan_info ;
1360 struct tipc_bearer ;
1360 struct in_device ;
1360 struct dn_dev ;
1360 struct inet6_dev ;
1360 struct tcf_proto ;
1360 struct cpu_rmap ;
1360 struct pcpu_lstats ;
1360 struct pcpu_sw_netstats ;
1360 struct pcpu_dstats ;
1360 struct pcpu_vstats ;
1360 union __anonunion____missing_field_name_488 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1360 struct garp_port ;
1360 struct mrp_port ;
1360 struct rtnl_link_ops ;
1360 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_487 adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned int min_mtu; unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; unsigned short min_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct nf_hook_entry *nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; struct hlist_head qdisc_hash[16U]; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_488 __annonCompField118; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ;
2185 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ;
2213 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
3191 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ;
162 struct if_irda_qos { unsigned long baudrate; unsigned short data_size; unsigned short window_size; unsigned short min_turn_time; unsigned short max_turn_time; unsigned char add_bofs; unsigned char link_disc; } ;
188 struct if_irda_line { __u8 dtr; __u8 rts; } ;
194 union __anonunion_ifr_ifrn_496 { char ifrn_name[16U]; } ;
194 union __anonunion_ifr_ifru_497 { struct if_irda_line ifru_line; struct if_irda_qos ifru_qos; unsigned short ifru_flags; unsigned int ifru_receiving; unsigned int ifru_mode; unsigned int ifru_dongle; } ;
194 struct if_irda_req { union __anonunion_ifr_ifrn_496 ifr_ifrn; union __anonunion_ifr_ifru_497 ifr_ifru; } ;
34 typedef __u32 magic_t;
16 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ;
521 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ;
27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ;
51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ;
77 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ;
13 struct net_rate_estimator ;
14 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ;
88 struct nla_policy { u16 type; u16 len; } ;
25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ;
162 struct Qdisc_ops ;
163 struct qdisc_walker ;
164 struct tcf_walker ;
30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ;
38 struct qdisc_skb_head { struct sk_buff *head; struct sk_buff *tail; __u32 qlen; spinlock_t lock; } ;
46 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct hlist_node hash; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct net_rate_estimator *rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ;
134 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;
166 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ;
191 struct tcf_result { unsigned long class; u32 classid; } ;
197 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ;
222 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ;
862 struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ;
64 struct __anonstruct_qos_value_t_522 { __u32 value; __u16 bits; } ;
64 typedef struct __anonstruct_qos_value_t_522 qos_value_t;
65 struct qos_info { magic_t magic; qos_value_t baud_rate; qos_value_t max_turn_time; qos_value_t data_size; qos_value_t window_size; qos_value_t additional_bofs; qos_value_t min_turn_time; qos_value_t link_disc_time; qos_value_t power; } ;
93 struct irlap_cb ;
133 struct irda_skb_cb { unsigned int default_qdisc_pad; magic_t magic; __u32 next_speed; __u16 mtt; __u16 xbofs; __u16 next_xbofs; void *context; void (*destructor)(struct sk_buff *); __u16 xbofs_delay; __u8 line; } ;
438 struct __anonstruct_rd_s_527 { u8 addr_res[3U]; volatile u8 status; } ;
438 union __anonunion_rd_u_526 { __le32 addr; struct __anonstruct_rd_s_527 rd_s; } ;
438 struct ring_descr_hw { volatile __le16 rd_count; __le16 reserved; union __anonunion_rd_u_526 rd_u; } ;
548 struct ring_descr { struct ring_descr_hw *hw; struct sk_buff *skb; void *buf; } ;
654 struct vlsi_ring { struct pci_dev *pdev; int dir; unsigned int len; unsigned int size; unsigned int mask; atomic_t head; atomic_t tail; struct ring_descr *rd; } ;
707 struct vlsi_irda_dev { struct pci_dev *pdev; struct irlap_cb *irlap; struct qos_info qos; unsigned int mode; int baud; int new_baud; dma_addr_t busaddr; void *virtaddr; struct vlsi_ring *tx_ring; struct vlsi_ring *rx_ring; ktime_t last_rx; spinlock_t lock; struct mutex mtx; u8 resume_ok; struct proc_dir_entry *proc_entry; } ;
734 typedef struct vlsi_irda_dev vlsi_irda_dev_t;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long, long);
252 void __read_once_size(const volatile void *p, void *res, int size);
277 void __write_once_size(volatile void *p, void *res, int size);
34 extern struct module __this_module;
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
321 bool constant_test_bit(long nr, const volatile unsigned long *addr);
172 int printk(const char *, ...);
282 void dump_stack();
55 void __dynamic_pr_debug(struct _ddebug *, const char *, ...);
415 int sprintf(char *, const char *, ...);
8 void ldv_dma_map_page();
87 void __bad_percpu_size();
71 void warn_slowpath_null(const char *, const int);
9 extern unsigned long vmemmap_base;
23 unsigned long int __phys_addr(unsigned long);
24 int atomic_read(const atomic_t *v);
36 void atomic_set(atomic_t *v, int i);
89 void atomic_inc(atomic_t *v);
32 void * __memcpy(void *, const void *, size_t );
57 void * __memset(void *, int, size_t );
27 s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
8 extern int __preempt_count;
20 int preempt_count();
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
32 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);
43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
286 raw_spinlock_t * spinlock_check(spinlock_t *lock);
352 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
133 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
155 void mutex_lock_nested(struct mutex *, unsigned int);
195 void mutex_unlock(struct mutex *);
162 s64 ktime_divns(const ktime_t kt, s64 div);
173 s64 ktime_to_us(const ktime_t kt);
183 s64 ktime_us_delta(const ktime_t later, const ktime_t earlier);
188 ktime_t ktime_get();
340 void outb(unsigned char value, int port);
340 unsigned char inb(int port);
341 void outw(unsigned short value, int port);
341 unsigned short int inw(int port);
87 const char * kobject_name(const struct kobject *kobj);
139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev);
158 void free_irq(unsigned int, void *);
208 bool capable(int);
109 ssize_t seq_read(struct file *, char *, size_t , loff_t *);
110 loff_t seq_lseek(struct file *, loff_t , int);
117 void seq_printf(struct seq_file *, const char *, ...);
135 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
137 int single_release(struct inode *, struct file *);
979 const char * dev_name(const struct device *dev);
1026 void * dev_get_drvdata(const struct device *dev);
1031 void dev_set_drvdata(struct device *dev, void *data);
154 void kfree(const void *);
330 void * __kmalloc(size_t , gfp_t );
478 void * kmalloc(size_t size, gfp_t flags);
919 int pci_bus_read_config_byte(struct pci_bus *, unsigned int, int, u8 *);
925 int pci_bus_write_config_byte(struct pci_bus *, unsigned int, int, u8 );
943 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
956 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1011 int pci_enable_device(struct pci_dev *);
1028 void pci_disable_device(struct pci_dev *);
1031 void pci_set_master(struct pci_dev *);
1084 int pci_save_state(struct pci_dev *);
1085 void pci_restore_state(struct pci_dev *);
1098 int pci_set_power_state(struct pci_dev *, pci_power_t );
1099 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t );
1157 int pci_request_regions(struct pci_dev *, const char *);
1159 void pci_release_regions(struct pci_dev *);
1212 int __pci_register_driver(struct pci_driver *, struct module *, const char *);
1221 void pci_unregister_driver(struct pci_driver *);
1650 void * pci_get_drvdata(struct pci_dev *pdev);
1655 void pci_set_drvdata(struct pci_dev *pdev, void *data);
1663 const char * pci_name(const struct pci_dev *pdev);
37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );
44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
66 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int);
70 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int);
131 void kmemcheck_mark_initialized(void *address, unsigned int n);
144 int valid_dma_direction(int dma_direction);
28 extern const struct dma_map_ops *dma_ops;
30 const struct dma_map_ops * get_arch_dma_ops(struct bus_type *bus);
35 bool arch_dma_alloc_attrs(struct device **, gfp_t *);
39 int dma_supported(struct device *, u64 );
175 const struct dma_map_ops * get_dma_ops(struct device *dev);
200 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
200 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
223 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);
335 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
347 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
476 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
517 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
523 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);
575 int dma_set_mask(struct device *dev, u64 mask);
680 void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
23 void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
31 void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
38 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
44 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
79 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
86 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
113 int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
8 void __udelay(unsigned long);
10 void __const_udelay(unsigned long);
233 int net_ratelimit();
1927 unsigned char * skb_put(struct sk_buff *, unsigned int);
2030 void skb_reserve(struct sk_buff *skb, int len);
2198 void skb_reset_mac_header(struct sk_buff *skb);
2441 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );
2457 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);
2471 struct sk_buff * dev_alloc_skb(unsigned int length);
3173 void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len);
1927 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);
2022 void * netdev_priv(const struct net_device *dev);
2425 void free_netdev(struct net_device *);
2801 void netif_tx_start_queue(struct netdev_queue *dev_queue);
2812 void netif_start_queue(struct net_device *dev);
2827 void netif_tx_wake_queue(struct netdev_queue *);
2836 void netif_wake_queue(struct net_device *dev);
2851 void netif_tx_stop_queue(struct netdev_queue *dev_queue);
2863 void netif_stop_queue(struct net_device *dev);
2870 bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue);
2881 bool netif_queue_stopped(const struct net_device *dev);
3055 bool netif_running(const struct net_device *dev);
3198 void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );
3229 void dev_kfree_skb_any(struct sk_buff *skb);
3239 int netif_rx(struct sk_buff *);
3240 int netif_rx_ni(struct sk_buff *);
3353 bool netif_carrier_ok(const struct net_device *dev);
3426 bool netif_device_present(struct net_device *dev);
3431 void netif_device_detach(struct net_device *);
3433 void netif_device_attach(struct net_device *);
3690 int register_netdev(struct net_device *);
3691 void unregister_netdev(struct net_device *);
19 struct proc_dir_entry * proc_mkdir(const char *, struct proc_dir_entry *);
26 struct proc_dir_entry * proc_create_data(const char *, umode_t , struct proc_dir_entry *, const struct file_operations *, void *);
38 void proc_set_size(struct proc_dir_entry *, loff_t );
40 void * PDE_DATA(const struct inode *);
43 void remove_proc_entry(const char *, struct proc_dir_entry *);
83 void irda_init_max_qos_capabilies(struct qos_info *);
88 void irda_qos_bits_to_value(struct qos_info *);
214 struct irlap_cb * irlap_open(struct net_device *, struct qos_info *, const char *);
216 void irlap_close(struct irlap_cb *);
219 void irda_device_set_media_busy(struct net_device *, int);
229 struct net_device * alloc_irdadev(int);
239 __u16 irda_get_mtt(const struct sk_buff *skb);
252 __u32 irda_get_next_speed(const struct sk_buff *skb);
54 int async_wrap_skb(struct sk_buff *, __u8 *, int);
8 u16 crc_ccitt(u16 , const u8 *, size_t );
425 unsigned int calc_width_bits(unsigned int baudrate, unsigned int widthselect, unsigned int clockselect);
594 int rd_is_active(struct ring_descr *rd);
599 void rd_activate(struct ring_descr *rd);
604 void rd_set_status(struct ring_descr *rd, u8 s);
609 void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s);
632 void rd_set_count(struct ring_descr *rd, u16 c);
637 u8 rd_get_status(struct ring_descr *rd);
642 dma_addr_t rd_get_addr(struct ring_descr *rd);
650 u16 rd_get_count(struct ring_descr *rd);
680 struct ring_descr * ring_last(struct vlsi_ring *r);
688 struct ring_descr * ring_put(struct vlsi_ring *r);
694 struct ring_descr * ring_first(struct vlsi_ring *r);
702 struct ring_descr * ring_get(struct vlsi_ring *r);
61 char drivername[8U] = { 'v', 'l', 's', 'i', '_', 'i', 'r', '\x0' };
63 const struct pci_device_id vlsi_irda_table[2U] = { { 4100U, 261U, 4294967295U, 4294967295U, 851968U, 16776960U, 0UL } };
75 const struct pci_device_id __mod_pci__vlsi_irda_table_device_table[2U] = { };
86 int clksrc = 0;
98 int ringsize[2U] = { 8, 8 };
111 int sirpulse = 1;
122 int qos_mtt_bits = 7;
128 void vlsi_reg_debug(unsigned int iobase, const char *s);
138 void vlsi_ring_debug(struct vlsi_ring *r);
160 struct proc_dir_entry *vlsi_proc_root = (struct proc_dir_entry *)0;
164 void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev);
180 void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev);
297 void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r);
334 int vlsi_seq_show(struct seq_file *seq, void *v);
371 int vlsi_seq_open(struct inode *inode, struct file *file);
376 const struct file_operations vlsi_proc_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, &vlsi_seq_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
392 struct vlsi_ring * vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, unsigned int size, unsigned int len, int dir);
449 int vlsi_free_ring(struct vlsi_ring *r);
469 int vlsi_create_hwif(vlsi_irda_dev_t *idev);
507 int vlsi_destroy_hwif(vlsi_irda_dev_t *idev);
524 int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd);
597 void vlsi_fill_rx(struct vlsi_ring *r);
623 void vlsi_rx_interrupt(struct net_device *ndev);
672 void vlsi_unarm_rx(vlsi_irda_dev_t *idev);
721 int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd);
750 int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned int iobase);
844 netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev);
1040 void vlsi_tx_interrupt(struct net_device *ndev);
1100 void vlsi_unarm_tx(vlsi_irda_dev_t *idev);
1142 int vlsi_start_clock(struct pci_dev *pdev);
1205 void vlsi_stop_clock(struct pci_dev *pdev);
1230 void vlsi_clear_regs(unsigned int iobase);
1239 int vlsi_init_chip(struct pci_dev *pdev);
1292 int vlsi_start_hw(vlsi_irda_dev_t *idev);
1324 int vlsi_stop_hw(vlsi_irda_dev_t *idev);
1354 void vlsi_tx_timeout(struct net_device *ndev);
1379 int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
1428 irqreturn_t vlsi_interrupt(int irq, void *dev_instance);
1470 int vlsi_open(struct net_device *ndev);
1530 int vlsi_close(struct net_device *ndev);
1553 const struct net_device_ops vlsi_netdev_ops = { 0, 0, &vlsi_open, &vlsi_close, &vlsi_hard_start_xmit, 0, 0, 0, 0, 0, 0, &vlsi_ioctl, 0, 0, 0, &vlsi_tx_timeout, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1561 int vlsi_irda_init(struct net_device *ndev);
1608 int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id);
1679 void vlsi_irda_remove(struct pci_dev *pdev);
1714 int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state);
1755 int vlsi_irda_resume(struct pci_dev *pdev);
1808 struct pci_driver vlsi_irda_driver = { { 0, 0 }, (const char *)(&drivername), (const struct pci_device_id *)(&vlsi_irda_table), &vlsi_irda_probe, &vlsi_irda_remove, &vlsi_irda_suspend, 0, 0, &vlsi_irda_resume, 0, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };
1821 int vlsi_mod_init();
1865 void vlsi_mod_exit();
1891 void ldv_check_final_state();
1894 void ldv_check_return_value(int);
1897 void ldv_check_return_value_probe(int);
1900 void ldv_initialize();
1903 void ldv_handler_precall();
1906 int nondet_int();
1909 int LDV_IN_INTERRUPT = 0;
1912 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
7 bool ldv_is_err(const void *ptr);
14 void * ldv_err_ptr(long error);
21 long int ldv_ptr_err(const void *ptr);
28 bool ldv_is_err_or_null(const void *ptr);
5 int LDV_DMA_MAP_CALLS = 0;
16 void ldv_dma_mapping_error();
return ;
}
-entry_point
{
1914 struct inode *var_group1;
1915 struct file *var_group2;
1916 int res_vlsi_seq_open_6;
1917 struct net_device *var_group3;
1918 int res_vlsi_open_29;
1919 int res_vlsi_close_30;
1920 struct sk_buff *var_group4;
1921 struct ifreq *var_group5;
1922 int var_vlsi_ioctl_27_p2;
1923 struct pci_dev *var_group6;
1924 const struct pci_device_id *var_vlsi_irda_probe_32_p1;
1925 int res_vlsi_irda_probe_32;
1926 struct pm_message var_vlsi_irda_suspend_34_p1;
1927 int var_vlsi_interrupt_28_p0;
1928 void *var_vlsi_interrupt_28_p1;
1929 int ldv_s_vlsi_proc_fops_file_operations;
1930 int ldv_s_vlsi_netdev_ops_net_device_ops;
1931 int ldv_s_vlsi_irda_driver_pci_driver;
1932 int tmp;
1933 int tmp___0;
1934 int tmp___1;
2204 ldv_s_vlsi_proc_fops_file_operations = 0;
2206 ldv_s_vlsi_netdev_ops_net_device_ops = 0;
2209 ldv_s_vlsi_irda_driver_pci_driver = 0;
2171 LDV_IN_INTERRUPT = 1;
2180 ldv_initialize() { /* Function call is skipped due to function is undefined */}
2201 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2202 -vlsi_mod_init()
{
1823 int i;
1824 int ret;
1825 int tmp;
1826 int tmp___0;
1825 assume(!(clksrc < 0));
1825 assume(!(clksrc > 3));
1831 i = 0;
1831 goto ldv_53813;
1831 assume(i <= 1);
1833 goto ldv_53812;
1832 ldv_53812:;
1832 switch (ringsize[i]);
1833 assume(!((ringsize[i]) == 4));
1834 assume((ringsize[i]) == 8);
1835 fall through
1836 fall through
1837 fall through
1838 goto ldv_53810;
1847 ldv_53810:;
1831 i = i + 1;
1832 ldv_53813:;
1831 assume(i <= 1);
1833 goto ldv_53812;
1832 ldv_53812:;
1832 switch (ringsize[i]);
1833 assume(!((ringsize[i]) == 4));
1834 assume(!((ringsize[i]) == 8));
1835 assume(!((ringsize[i]) == 16));
1836 assume(!((ringsize[i]) == 32));
1837 assume(!((ringsize[i]) == 64));
default
1840 tmp___0 = net_ratelimit() { /* Function call is skipped due to function is undefined */}
1840 assume(tmp___0 != 0);
1840 char *__CPAchecker_TMP_0;
1841 assume(i != 0);
1841 __CPAchecker_TMP_0 = (char *)"rx";
1840 printk("\f%s: invalid %s ringsize %d, using default=8\n", (char *)(&drivername), __CPAchecker_TMP_0, ringsize[i]) { /* Function call is skipped due to function is undefined */}
1844 ringsize[i] = 8;
1845 goto ldv_53810;
1847 ldv_53810:;
1831 i = i + 1;
1832 ldv_53813:;
1831 assume(!(i <= 1));
1849 sirpulse = sirpulse != 0;
1855 vlsi_proc_root = proc_mkdir("driver/vlsi_ir", (struct proc_dir_entry *)0) { /* Function call is skipped due to function is undefined */}
1857 ret = __pci_register_driver(&vlsi_irda_driver, &__this_module, "vlsi_ir") { /* Function call is skipped due to function is undefined */}
1859 assume(!(ret != 0));
1861 return ret;;
}
2202 assume(!(tmp != 0));
2215 goto ldv_53878;
2215 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
2215 assume(tmp___1 != 0);
2220 goto ldv_53877;
2216 ldv_53877:;
2221 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
2221 switch (tmp___0);
2222 assume(!(tmp___0 == 0));
2260 assume(tmp___0 == 1);
2263 assume(ldv_s_vlsi_netdev_ops_net_device_ops == 0);
2278 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2279 -vlsi_open(var_group3)
{
1472 vlsi_irda_dev_t *idev;
1473 void *tmp;
1474 int err;
1475 char hwname[32U];
1476 int tmp___0;
1477 int tmp___1;
1478 int tmp___2;
1479 int tmp___3;
1480 int tmp___4;
1472 -netdev_priv((const struct net_device *)ndev)
{
2024 return ((void *)dev) + 3136U;;
}
1472 idev = (vlsi_irda_dev_t *)tmp;
1473 err = -11;
1476 tmp___1 = pci_request_regions(idev->pdev, (const char *)(&drivername)) { /* Function call is skipped due to function is undefined */}
1476 assume(!(tmp___1 != 0));
1480 ndev->base_addr = (unsigned long)(((idev->pdev->resource)[0]).start);
1481 int __CPAchecker_TMP_0 = (int)(idev->pdev->irq);
1481 ndev->irq = __CPAchecker_TMP_0;
1487 int __CPAchecker_TMP_1 = (int)(ndev->base_addr);
1487 -outb(84, __CPAchecker_TMP_1)
{
340 Ignored inline assembler code
341 return ;;
}
1489 unsigned int __CPAchecker_TMP_2 = (unsigned int)(ndev->irq);
1489 -request_irq(__CPAchecker_TMP_2, &vlsi_interrupt, 128UL, (const char *)(&drivername), (void *)ndev)
{
147 int tmp;
147 tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */}
147 return tmp;;
}
1489 assume(!(tmp___3 != 0));
1496 -vlsi_create_hwif(idev)
{
471 char *ringarea;
472 struct ring_descr_hw *hwmap;
473 void *tmp;
474 struct vlsi_ring *tmp___0;
474 idev->virtaddr = (void *)0;
475 idev->busaddr = 0ULL;
477 -pci_zalloc_consistent(idev->pdev, 1024UL, &(idev->busaddr))
{
26 void *tmp;
26 struct device *__CPAchecker_TMP_0;
26 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));
26 __CPAchecker_TMP_0 = &(hwdev->dev);
26 -dma_zalloc_coherent(__CPAchecker_TMP_0, size, dma_handle, 17301536U)
{
683 void *ret;
684 void *tmp;
683 -dma_alloc_coherent(dev, size, dma_handle, flag | 32768U)
{
520 void *tmp;
520 -dma_alloc_attrs(dev, size, dma_handle, flag, 0UL)
{
479 const struct dma_map_ops *ops;
480 const struct dma_map_ops *tmp;
481 void *cpu_addr;
482 long tmp___0;
483 _Bool tmp___1;
484 int tmp___2;
480 -get_dma_ops(dev)
{
177 const struct dma_map_ops *tmp;
177 assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));
179 struct bus_type *__CPAchecker_TMP_1;
179 assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));
179 __CPAchecker_TMP_1 = (struct bus_type *)0;
179 -get_arch_dma_ops(__CPAchecker_TMP_1)
{
32 return dma_ops;;
}
179 return tmp;;
}
480 ops = tmp;
483 tmp___0 = __builtin_expect(((unsigned long)ops) == ((unsigned long)((const struct dma_map_ops *)0)), 0L) { /* Function call is skipped due to function is undefined */}
483 assume(!(tmp___0 != 0L));
488 tmp___1 = arch_dma_alloc_attrs(&dev, &flag) { /* Function call is skipped due to function is undefined */}
488 assume(!(tmp___1 == 0));
488 tmp___2 = 0;
488 assume(tmp___2 == 0);
490 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);
490 assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void * (*)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long))0))));
493 cpu_addr = (*(ops->alloc))(dev, size, dma_handle, flag, attrs);
494 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr) { /* Function call is skipped due to function is undefined */}
495 return cpu_addr;;
}
520 return tmp;;
}
683 ret = tmp;
685 return ret;;
}
26 return tmp;;
}
477 ringarea = (char *)tmp;
479 assume(!(((unsigned long)ringarea) == ((unsigned long)((char *)0))));
482 hwmap = (struct ring_descr_hw *)ringarea;
483 -vlsi_alloc_ring(idev->pdev, hwmap, (unsigned int)(ringsize[1]), 4095U, 2)
{
395 struct vlsi_ring *r;
396 struct ring_descr *rd;
397 unsigned int i;
398 unsigned int j;
399 unsigned long long busaddr;
400 void *tmp;
401 int tmp___0;
400 assume(!(size == 0U));
400 assume(!(((size - 1U) & size) != 0U));
403 -kmalloc((((unsigned long)size) * 24UL) + 40UL, 20971712U)
{
480 void *tmp___2;
495 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
495 return tmp___2;;
}
403 r = (struct vlsi_ring *)tmp;
404 assume(!(((unsigned long)r) == ((unsigned long)((struct vlsi_ring *)0))));
406 __memset((void *)r, 0, 40UL) { /* Function call is skipped due to function is undefined */}
408 r->pdev = pdev;
409 r->dir = dir;
410 r->len = len;
411 r->rd = ((struct ring_descr *)r) + 1U;
412 r->mask = size - 1U;
413 r->size = size;
414 -atomic_set(&(r->head), 0)
{
38 union __anonunion___u_44 __u;
38 __u.__val = i;
38 -__write_once_size((volatile void *)(&(v->counter)), (void *)(&(__u.__c)), 4)
{
279 switch (size);
280 assume(!(size == 1));
281 assume(!(size == 2));
282 assume(size == 4);
282 *((volatile __u32 *)p) = *((__u32 *)res);
282 goto ldv_905;
290 return ;;
}
40 return ;;
}
415 -atomic_set(&(r->tail), 0)
{
38 union __anonunion___u_44 __u;
38 __u.__val = i;
38 -__write_once_size((volatile void *)(&(v->counter)), (void *)(&(__u.__c)), 4)
{
279 switch (size);
280 assume(!(size == 1));
281 assume(!(size == 2));
282 assume(size == 4);
282 *((volatile __u32 *)p) = *((__u32 *)res);
282 goto ldv_905;
290 return ;;
}
40 return ;;
}
417 i = 0U;
417 goto ldv_53502;
417 assume(i < size);
419 goto ldv_53501;
418 ldv_53501:;
418 rd = (r->rd) + ((unsigned long)i);
419 __memset((void *)rd, 0, 24UL) { /* Function call is skipped due to function is undefined */}
420 rd->hw = hwmap + ((unsigned long)i);
421 -kmalloc((size_t )len, 20971713U)
{
480 void *tmp___2;
495 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
495 return tmp___2;;
}
422 unsigned long __CPAchecker_TMP_0 = (unsigned long)(rd->buf);
422 assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void *)0))));
422 -pci_map_single(pdev, rd->buf, (size_t )len, dir)
{
41 unsigned long long tmp;
40 struct device *__CPAchecker_TMP_0;
40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));
40 __CPAchecker_TMP_0 = &(hwdev->dev);
40 -dma_map_single_attrs(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, 0UL)
{
38 unsigned long long tmp;
38 -ldv_dma_map_page()
{
10 assume(!(LDV_DMA_MAP_CALLS != 0));
12 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1;
13 return ;;
}
40 -ldv_dma_map_single_attrs_5(dev, ptr, size, dir, attrs)
{
204 const struct dma_map_ops *ops;
205 const struct dma_map_ops *tmp;
206 unsigned long long addr;
207 int tmp___0;
208 long tmp___1;
209 unsigned long tmp___2;
210 unsigned long tmp___3;
205 -get_dma_ops(dev)
{
177 const struct dma_map_ops *tmp;
177 assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));
179 struct bus_type *__CPAchecker_TMP_1;
179 assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));
179 __CPAchecker_TMP_1 = (struct bus_type *)0;
179 -get_arch_dma_ops(__CPAchecker_TMP_1)
{
32 return dma_ops;;
}
179 return tmp;;
}
205 ops = tmp;
208 -kmemcheck_mark_initialized(ptr, (unsigned int)size)
{
133 return ;;
}
209 -valid_dma_direction((int)dir)
{
146 int __CPAchecker_TMP_0;
146 assume(!(dma_direction == 0));
146 assume(!(dma_direction == 1));
146 assume(dma_direction == 2);
__CPAchecker_TMP_0 = 1;
146 return __CPAchecker_TMP_0;;
}
209 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */}
209 assume(!(tmp___1 != 0L));
210 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}
210 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs);
213 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}
213 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */}
216 return addr;;
}
40 return tmp;;
}
40 return tmp;;
}
422 assume(!(busaddr == 0ULL));
442 -rd_set_addr_status(rd, busaddr, 0)
{
611 int tmp;
617 assume(!((a >> 24) != 0ULL));
624 a = a & 16777215ULL;
627 rd->hw->rd_u.addr = (unsigned int)a;
628 Ignored inline assembler code
629 -rd_set_status(rd, (int)s)
{
606 rd->hw->rd_u.rd_s.status = s;
607 return ;;
}
630 return ;;
}
444 rd->skb = (struct sk_buff *)0;
417 i = i + 1U;
418 ldv_53502:;
417 assume(i < size);
419 goto ldv_53501;
418 ldv_53501:;
418 rd = (r->rd) + ((unsigned long)i);
419 __memset((void *)rd, 0, 24UL) { /* Function call is skipped due to function is undefined */}
420 rd->hw = hwmap + ((unsigned long)i);
421 -kmalloc((size_t )len, 20971713U)
{
480 void *tmp___2;
495 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
495 return tmp___2;;
}
422 unsigned long __CPAchecker_TMP_0 = (unsigned long)(rd->buf);
422 assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void *)0))));
422 -pci_map_single(pdev, rd->buf, (size_t )len, dir)
{
41 unsigned long long tmp;
40 struct device *__CPAchecker_TMP_0;
40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));
40 __CPAchecker_TMP_0 = &(hwdev->dev);
40 -dma_map_single_attrs(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, 0UL)
{
38 unsigned long long tmp;
38 -ldv_dma_map_page()
{
10 assume(LDV_DMA_MAP_CALLS != 0);
10 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_ATOMIC_H 2 #define _ASM_X86_ATOMIC_H 3 4 #include <linux/compiler.h> 5 #include <linux/types.h> 6 #include <asm/alternative.h> 7 #include <asm/cmpxchg.h> 8 #include <asm/rmwcc.h> 9 #include <asm/barrier.h> 10 11 /* 12 * Atomic operations that C can't guarantee us. Useful for 13 * resource counting etc.. 14 */ 15 16 #define ATOMIC_INIT(i) { (i) } 17 18 /** 19 * atomic_read - read atomic variable 20 * @v: pointer of type atomic_t 21 * 22 * Atomically reads the value of @v. 23 */ 24 static __always_inline int atomic_read(const atomic_t *v) 25 { 26 return READ_ONCE((v)->counter); 27 } 28 29 /** 30 * atomic_set - set atomic variable 31 * @v: pointer of type atomic_t 32 * @i: required value 33 * 34 * Atomically sets the value of @v to @i. 35 */ 36 static __always_inline void atomic_set(atomic_t *v, int i) 37 { 38 WRITE_ONCE(v->counter, i); 39 } 40 41 /** 42 * atomic_add - add integer to atomic variable 43 * @i: integer value to add 44 * @v: pointer of type atomic_t 45 * 46 * Atomically adds @i to @v. 47 */ 48 static __always_inline void atomic_add(int i, atomic_t *v) 49 { 50 asm volatile(LOCK_PREFIX "addl %1,%0" 51 : "+m" (v->counter) 52 : "ir" (i)); 53 } 54 55 /** 56 * atomic_sub - subtract integer from atomic variable 57 * @i: integer value to subtract 58 * @v: pointer of type atomic_t 59 * 60 * Atomically subtracts @i from @v. 61 */ 62 static __always_inline void atomic_sub(int i, atomic_t *v) 63 { 64 asm volatile(LOCK_PREFIX "subl %1,%0" 65 : "+m" (v->counter) 66 : "ir" (i)); 67 } 68 69 /** 70 * atomic_sub_and_test - subtract value from variable and test result 71 * @i: integer value to subtract 72 * @v: pointer of type atomic_t 73 * 74 * Atomically subtracts @i from @v and returns 75 * true if the result is zero, or false for all 76 * other cases. 77 */ 78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) 79 { 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 81 } 82 83 /** 84 * atomic_inc - increment atomic variable 85 * @v: pointer of type atomic_t 86 * 87 * Atomically increments @v by 1. 88 */ 89 static __always_inline void atomic_inc(atomic_t *v) 90 { 91 asm volatile(LOCK_PREFIX "incl %0" 92 : "+m" (v->counter)); 93 } 94 95 /** 96 * atomic_dec - decrement atomic variable 97 * @v: pointer of type atomic_t 98 * 99 * Atomically decrements @v by 1. 100 */ 101 static __always_inline void atomic_dec(atomic_t *v) 102 { 103 asm volatile(LOCK_PREFIX "decl %0" 104 : "+m" (v->counter)); 105 } 106 107 /** 108 * atomic_dec_and_test - decrement and test 109 * @v: pointer of type atomic_t 110 * 111 * Atomically decrements @v by 1 and 112 * returns true if the result is 0, or false for all other 113 * cases. 114 */ 115 static __always_inline bool atomic_dec_and_test(atomic_t *v) 116 { 117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 118 } 119 120 /** 121 * atomic_inc_and_test - increment and test 122 * @v: pointer of type atomic_t 123 * 124 * Atomically increments @v by 1 125 * and returns true if the result is zero, or false for all 126 * other cases. 127 */ 128 static __always_inline bool atomic_inc_and_test(atomic_t *v) 129 { 130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 131 } 132 133 /** 134 * atomic_add_negative - add and test if negative 135 * @i: integer value to add 136 * @v: pointer of type atomic_t 137 * 138 * Atomically adds @i to @v and returns true 139 * if the result is negative, or false when 140 * result is greater than or equal to zero. 141 */ 142 static __always_inline bool atomic_add_negative(int i, atomic_t *v) 143 { 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 145 } 146 147 /** 148 * atomic_add_return - add integer and return 149 * @i: integer value to add 150 * @v: pointer of type atomic_t 151 * 152 * Atomically adds @i to @v and returns @i + @v 153 */ 154 static __always_inline int atomic_add_return(int i, atomic_t *v) 155 { 156 return i + xadd(&v->counter, i); 157 } 158 159 /** 160 * atomic_sub_return - subtract integer and return 161 * @v: pointer of type atomic_t 162 * @i: integer value to subtract 163 * 164 * Atomically subtracts @i from @v and returns @v - @i 165 */ 166 static __always_inline int atomic_sub_return(int i, atomic_t *v) 167 { 168 return atomic_add_return(-i, v); 169 } 170 171 #define atomic_inc_return(v) (atomic_add_return(1, v)) 172 #define atomic_dec_return(v) (atomic_sub_return(1, v)) 173 174 static __always_inline int atomic_fetch_add(int i, atomic_t *v) 175 { 176 return xadd(&v->counter, i); 177 } 178 179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v) 180 { 181 return xadd(&v->counter, -i); 182 } 183 184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) 185 { 186 return cmpxchg(&v->counter, old, new); 187 } 188 189 static inline int atomic_xchg(atomic_t *v, int new) 190 { 191 return xchg(&v->counter, new); 192 } 193 194 #define ATOMIC_OP(op) \ 195 static inline void atomic_##op(int i, atomic_t *v) \ 196 { \ 197 asm volatile(LOCK_PREFIX #op"l %1,%0" \ 198 : "+m" (v->counter) \ 199 : "ir" (i) \ 200 : "memory"); \ 201 } 202 203 #define ATOMIC_FETCH_OP(op, c_op) \ 204 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 205 { \ 206 int old, val = atomic_read(v); \ 207 for (;;) { \ 208 old = atomic_cmpxchg(v, val, val c_op i); \ 209 if (old == val) \ 210 break; \ 211 val = old; \ 212 } \ 213 return old; \ 214 } 215 216 #define ATOMIC_OPS(op, c_op) \ 217 ATOMIC_OP(op) \ 218 ATOMIC_FETCH_OP(op, c_op) 219 220 ATOMIC_OPS(and, &) 221 ATOMIC_OPS(or , |) 222 ATOMIC_OPS(xor, ^) 223 224 #undef ATOMIC_OPS 225 #undef ATOMIC_FETCH_OP 226 #undef ATOMIC_OP 227 228 /** 229 * __atomic_add_unless - add unless the number is already a given value 230 * @v: pointer of type atomic_t 231 * @a: the amount to add to v... 232 * @u: ...unless v is equal to u. 233 * 234 * Atomically adds @a to @v, so long as @v was not already @u. 235 * Returns the old value of @v. 236 */ 237 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 238 { 239 int c, old; 240 c = atomic_read(v); 241 for (;;) { 242 if (unlikely(c == (u))) 243 break; 244 old = atomic_cmpxchg((v), c, c + (a)); 245 if (likely(old == c)) 246 break; 247 c = old; 248 } 249 return c; 250 } 251 252 /** 253 * atomic_inc_short - increment of a short integer 254 * @v: pointer to type int 255 * 256 * Atomically adds 1 to @v 257 * Returns the new value of @u 258 */ 259 static __always_inline short int atomic_inc_short(short int *v) 260 { 261 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); 262 return *v; 263 } 264 265 #ifdef CONFIG_X86_32 266 # include <asm/atomic64_32.h> 267 #else 268 # include <asm/atomic64_64.h> 269 #endif 270 271 #endif /* _ASM_X86_ATOMIC_H */
1 #ifndef _ASM_X86_DMA_MAPPING_H 2 #define _ASM_X86_DMA_MAPPING_H 3 4 /* 5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and 6 * Documentation/DMA-API.txt for documentation. 7 */ 8 9 #include <linux/kmemcheck.h> 10 #include <linux/scatterlist.h> 11 #include <linux/dma-debug.h> 12 #include <asm/io.h> 13 #include <asm/swiotlb.h> 14 #include <linux/dma-contiguous.h> 15 16 #ifdef CONFIG_ISA 17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 18 #else 19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 20 #endif 21 22 #define DMA_ERROR_CODE 0 23 24 extern int iommu_merge; 25 extern struct device x86_dma_fallback_dev; 26 extern int panic_on_overflow; 27 28 extern const struct dma_map_ops *dma_ops; 29 30 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 31 { 32 return dma_ops; 33 } 34 35 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); 36 #define arch_dma_alloc_attrs arch_dma_alloc_attrs 37 38 #define HAVE_ARCH_DMA_SUPPORTED 1 39 extern int dma_supported(struct device *hwdev, u64 mask); 40 41 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 42 dma_addr_t *dma_addr, gfp_t flag, 43 unsigned long attrs); 44 45 extern void dma_generic_free_coherent(struct device *dev, size_t size, 46 void *vaddr, dma_addr_t dma_addr, 47 unsigned long attrs); 48 49 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 50 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 51 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 52 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 53 #else 54 55 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 56 { 57 if (!dev->dma_mask) 58 return 0; 59 60 return addr + size - 1 <= *dev->dma_mask; 61 } 62 63 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 64 { 65 return paddr; 66 } 67 68 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 69 { 70 return daddr; 71 } 72 #endif /* CONFIG_X86_DMA_REMAP */ 73 74 static inline void 75 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 76 enum dma_data_direction dir) 77 { 78 flush_write_buffers(); 79 } 80 81 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 82 gfp_t gfp) 83 { 84 unsigned long dma_mask = 0; 85 86 dma_mask = dev->coherent_dma_mask; 87 if (!dma_mask) 88 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 89 90 return dma_mask; 91 } 92 93 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 94 { 95 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 96 97 if (dma_mask <= DMA_BIT_MASK(24)) 98 gfp |= GFP_DMA; 99 #ifdef CONFIG_X86_64 100 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 101 gfp |= GFP_DMA32; 102 #endif 103 return gfp; 104 } 105 106 #endif
1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 #define ARCH_HAS_IOREMAP_WT 39 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <asm/page.h> 43 #include <asm/early_ioremap.h> 44 #include <asm/pgtable_types.h> 45 46 #define build_mmio_read(name, size, type, reg, barrier) \ 47 static inline type name(const volatile void __iomem *addr) \ 48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 49 :"m" (*(volatile type __force *)addr) barrier); return ret; } 50 51 #define build_mmio_write(name, size, type, reg, barrier) \ 52 static inline void name(type val, volatile void __iomem *addr) \ 53 { asm volatile("mov" size " %0,%1": :reg (val), \ 54 "m" (*(volatile type __force *)addr) barrier); } 55 56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 59 60 build_mmio_read(__readb, "b", unsigned char, "=q", ) 61 build_mmio_read(__readw, "w", unsigned short, "=r", ) 62 build_mmio_read(__readl, "l", unsigned int, "=r", ) 63 64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 65 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 66 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 67 68 build_mmio_write(__writeb, "b", unsigned char, "q", ) 69 build_mmio_write(__writew, "w", unsigned short, "r", ) 70 build_mmio_write(__writel, "l", unsigned int, "r", ) 71 72 #define readb_relaxed(a) __readb(a) 73 #define readw_relaxed(a) __readw(a) 74 #define readl_relaxed(a) __readl(a) 75 #define __raw_readb __readb 76 #define __raw_readw __readw 77 #define __raw_readl __readl 78 79 #define writeb_relaxed(v, a) __writeb(v, a) 80 #define writew_relaxed(v, a) __writew(v, a) 81 #define writel_relaxed(v, a) __writel(v, a) 82 #define __raw_writeb __writeb 83 #define __raw_writew __writew 84 #define __raw_writel __writel 85 86 #define mmiowb() barrier() 87 88 #ifdef CONFIG_X86_64 89 90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 92 93 #define readq_relaxed(a) readq(a) 94 #define writeq_relaxed(v, a) writeq(v, a) 95 96 #define __raw_readq(a) readq(a) 97 #define __raw_writeq(val, addr) writeq(val, addr) 98 99 /* Let people know that we have them */ 100 #define readq readq 101 #define writeq writeq 102 103 #endif 104 105 /** 106 * virt_to_phys - map virtual addresses to physical 107 * @address: address to remap 108 * 109 * The returned physical address is the physical (CPU) mapping for 110 * the memory address given. It is only valid to use this function on 111 * addresses directly mapped or allocated via kmalloc. 112 * 113 * This function does not give bus mappings for DMA transfers. In 114 * almost all conceivable cases a device driver should not be using 115 * this function 116 */ 117 118 static inline phys_addr_t virt_to_phys(volatile void *address) 119 { 120 return __pa(address); 121 } 122 123 /** 124 * phys_to_virt - map physical address to virtual 125 * @address: address to remap 126 * 127 * The returned virtual address is a current CPU mapping for 128 * the memory address given. It is only valid to use this function on 129 * addresses that have a kernel mapping 130 * 131 * This function does not handle bus mappings for DMA transfers. In 132 * almost all conceivable cases a device driver should not be using 133 * this function 134 */ 135 136 static inline void *phys_to_virt(phys_addr_t address) 137 { 138 return __va(address); 139 } 140 141 /* 142 * Change "struct page" to physical address. 143 */ 144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 145 146 /* 147 * ISA I/O bus memory addresses are 1:1 with the physical address. 148 * However, we truncate the address to unsigned int to avoid undesirable 149 * promitions in legacy drivers. 150 */ 151 static inline unsigned int isa_virt_to_bus(volatile void *address) 152 { 153 return (unsigned int)virt_to_phys(address); 154 } 155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 156 #define isa_bus_to_virt phys_to_virt 157 158 /* 159 * However PCI ones are not necessarily 1:1 and therefore these interfaces 160 * are forbidden in portable PCI drivers. 161 * 162 * Allow them on x86 for legacy drivers, though. 163 */ 164 #define virt_to_bus virt_to_phys 165 #define bus_to_virt phys_to_virt 166 167 /* 168 * The default ioremap() behavior is non-cached; if you need something 169 * else, you probably want one of the following. 170 */ 171 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 172 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); 173 #define ioremap_uc ioremap_uc 174 175 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 176 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); 177 178 /** 179 * ioremap - map bus memory into CPU space 180 * @offset: bus address of the memory 181 * @size: size of the resource to map 182 * 183 * ioremap performs a platform specific sequence of operations to 184 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 185 * writew/writel functions and the other mmio helpers. The returned 186 * address is not guaranteed to be usable directly as a virtual 187 * address. 188 * 189 * If the area you are trying to map is a PCI BAR you should have a 190 * look at pci_iomap(). 191 */ 192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 193 { 194 return ioremap_nocache(offset, size); 195 } 196 197 extern void iounmap(volatile void __iomem *addr); 198 199 extern void set_iounmap_nonlazy(void); 200 201 #ifdef __KERNEL__ 202 203 #include <asm-generic/iomap.h> 204 205 /* 206 * Convert a virtual cached pointer to an uncached pointer 207 */ 208 #define xlate_dev_kmem_ptr(p) p 209 210 /** 211 * memset_io Set a range of I/O memory to a constant value 212 * @addr: The beginning of the I/O-memory range to set 213 * @val: The value to set the memory to 214 * @count: The number of bytes to set 215 * 216 * Set a range of I/O memory to a given value. 217 */ 218 static inline void 219 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 220 { 221 memset((void __force *)addr, val, count); 222 } 223 224 /** 225 * memcpy_fromio Copy a block of data from I/O memory 226 * @dst: The (RAM) destination for the copy 227 * @src: The (I/O memory) source for the data 228 * @count: The number of bytes to copy 229 * 230 * Copy a block of data from I/O memory. 231 */ 232 static inline void 233 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 234 { 235 memcpy(dst, (const void __force *)src, count); 236 } 237 238 /** 239 * memcpy_toio Copy a block of data into I/O memory 240 * @dst: The (I/O memory) destination for the copy 241 * @src: The (RAM) source for the data 242 * @count: The number of bytes to copy 243 * 244 * Copy a block of data to I/O memory. 245 */ 246 static inline void 247 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 248 { 249 memcpy((void __force *)dst, src, count); 250 } 251 252 /* 253 * ISA space is 'always mapped' on a typical x86 system, no need to 254 * explicitly ioremap() it. The fact that the ISA IO space is mapped 255 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 256 * are physical addresses. The following constant pointer can be 257 * used as the IO-area pointer (it can be iounmapped as well, so the 258 * analogy with PCI is quite large): 259 */ 260 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 261 262 /* 263 * Cache management 264 * 265 * This needed for two cases 266 * 1. Out of order aware processors 267 * 2. Accidentally out of order processors (PPro errata #51) 268 */ 269 270 static inline void flush_write_buffers(void) 271 { 272 #if defined(CONFIG_X86_PPRO_FENCE) 273 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 274 #endif 275 } 276 277 #endif /* __KERNEL__ */ 278 279 extern void native_io_delay(void); 280 281 extern int io_delay_type; 282 extern void io_delay_init(void); 283 284 #if defined(CONFIG_PARAVIRT) 285 #include <asm/paravirt.h> 286 #else 287 288 static inline void slow_down_io(void) 289 { 290 native_io_delay(); 291 #ifdef REALLY_SLOW_IO 292 native_io_delay(); 293 native_io_delay(); 294 native_io_delay(); 295 #endif 296 } 297 298 #endif 299 300 #define BUILDIO(bwl, bw, type) \ 301 static inline void out##bwl(unsigned type value, int port) \ 302 { \ 303 asm volatile("out" #bwl " %" #bw "0, %w1" \ 304 : : "a"(value), "Nd"(port)); \ 305 } \ 306 \ 307 static inline unsigned type in##bwl(int port) \ 308 { \ 309 unsigned type value; \ 310 asm volatile("in" #bwl " %w1, %" #bw "0" \ 311 : "=a"(value) : "Nd"(port)); \ 312 return value; \ 313 } \ 314 \ 315 static inline void out##bwl##_p(unsigned type value, int port) \ 316 { \ 317 out##bwl(value, port); \ 318 slow_down_io(); \ 319 } \ 320 \ 321 static inline unsigned type in##bwl##_p(int port) \ 322 { \ 323 unsigned type value = in##bwl(port); \ 324 slow_down_io(); \ 325 return value; \ 326 } \ 327 \ 328 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 329 { \ 330 asm volatile("rep; outs" #bwl \ 331 : "+S"(addr), "+c"(count) : "d"(port)); \ 332 } \ 333 \ 334 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 335 { \ 336 asm volatile("rep; ins" #bwl \ 337 : "+D"(addr), "+c"(count) : "d"(port)); \ 338 } 339 340 BUILDIO(b, b, char) 341 BUILDIO(w, w, short) 342 BUILDIO(l, , int) 343 344 extern void *xlate_dev_mem_ptr(phys_addr_t phys); 345 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 346 347 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 348 enum page_cache_mode pcm); 349 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 350 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); 351 352 extern bool is_early_ioremap_ptep(pte_t *ptep); 353 354 #ifdef CONFIG_XEN 355 #include <xen/xen.h> 356 struct bio_vec; 357 358 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 359 const struct bio_vec *vec2); 360 361 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 362 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ 363 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) 364 #endif /* CONFIG_XEN */ 365 366 #define IO_SPACE_LIMIT 0xffff 367 368 #ifdef CONFIG_MTRR 369 extern int __must_check arch_phys_wc_index(int handle); 370 #define arch_phys_wc_index arch_phys_wc_index 371 372 extern int __must_check arch_phys_wc_add(unsigned long base, 373 unsigned long size); 374 extern void arch_phys_wc_del(int handle); 375 #define arch_phys_wc_add arch_phys_wc_add 376 #endif 377 378 #ifdef CONFIG_X86_PAT 379 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); 380 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); 381 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc 382 #endif 383 384 #endif /* _ASM_X86_IO_H */
1 2 /********************************************************************* 3 * 4 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 5 * 6 * Copyright (c) 2001-2003 Martin Diehl 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, see <http://www.gnu.org/licenses/>. 20 * 21 ********************************************************************/ 22 23 #include <linux/module.h> 24 25 #define DRIVER_NAME "vlsi_ir" 26 #define DRIVER_VERSION "v0.5" 27 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 28 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 29 30 MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 31 MODULE_AUTHOR(DRIVER_AUTHOR); 32 MODULE_LICENSE("GPL"); 33 34 /********************************************************/ 35 36 #include <linux/kernel.h> 37 #include <linux/ktime.h> 38 #include <linux/init.h> 39 #include <linux/interrupt.h> 40 #include <linux/pci.h> 41 #include <linux/slab.h> 42 #include <linux/netdevice.h> 43 #include <linux/skbuff.h> 44 #include <linux/delay.h> 45 #include <linux/proc_fs.h> 46 #include <linux/seq_file.h> 47 #include <linux/math64.h> 48 #include <linux/mutex.h> 49 #include <linux/uaccess.h> 50 #include <asm/byteorder.h> 51 52 #include <net/irda/irda.h> 53 #include <net/irda/irda_device.h> 54 #include <net/irda/wrapper.h> 55 #include <net/irda/crc.h> 56 57 #include "vlsi_ir.h" 58 59 /********************************************************/ 60 61 static /* const */ char drivername[] = DRIVER_NAME; 62 63 static const struct pci_device_id vlsi_irda_table[] = { 64 { 65 .class = PCI_CLASS_WIRELESS_IRDA << 8, 66 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 67 .vendor = PCI_VENDOR_ID_VLSI, 68 .device = PCI_DEVICE_ID_VLSI_82C147, 69 .subvendor = PCI_ANY_ID, 70 .subdevice = PCI_ANY_ID, 71 }, 72 { /* all zeroes */ } 73 }; 74 75 MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 76 77 /********************************************************/ 78 79 /* clksrc: which clock source to be used 80 * 0: auto - try PLL, fallback to 40MHz XCLK 81 * 1: on-chip 48MHz PLL 82 * 2: external 48MHz XCLK 83 * 3: external 40MHz XCLK (HP OB-800) 84 */ 85 86 static int clksrc = 0; /* default is 0(auto) */ 87 module_param(clksrc, int, 0); 88 MODULE_PARM_DESC(clksrc, "clock input source selection"); 89 90 /* ringsize: size of the tx and rx descriptor rings 91 * independent for tx and rx 92 * specify as ringsize=tx[,rx] 93 * allowed values: 4, 8, 16, 32, 64 94 * Due to the IrDA 1.x max. allowed window size=7, 95 * there should be no gain when using rings larger than 8 96 */ 97 98 static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 99 module_param_array(ringsize, int, NULL, 0); 100 MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 101 102 /* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 103 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 104 * 1: nominal 3/16 bittime width 105 * note: IrDA compliant peer devices should be happy regardless 106 * which one is used. Primary goal is to save some power 107 * on the sender's side - at 9.6kbaud for example the short 108 * pulse width saves more than 90% of the transmitted IR power. 109 */ 110 111 static int sirpulse = 1; /* default is 3/16 bittime */ 112 module_param(sirpulse, int, 0); 113 MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 114 115 /* qos_mtt_bits: encoded min-turn-time value we require the peer device 116 * to use before transmitting to us. "Type 1" (per-station) 117 * bitfield according to IrLAP definition (section 6.6.8) 118 * Don't know which transceiver is used by my OB800 - the 119 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 120 */ 121 122 static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 123 module_param(qos_mtt_bits, int, 0); 124 MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 125 126 /********************************************************/ 127 128 static void vlsi_reg_debug(unsigned iobase, const char *s) 129 { 130 int i; 131 132 printk(KERN_DEBUG "%s: ", s); 133 for (i = 0; i < 0x20; i++) 134 printk("%02x", (unsigned)inb((iobase+i))); 135 printk("\n"); 136 } 137 138 static void vlsi_ring_debug(struct vlsi_ring *r) 139 { 140 struct ring_descr *rd; 141 unsigned i; 142 143 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 144 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 145 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, 146 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 147 for (i = 0; i < r->size; i++) { 148 rd = &r->rd[i]; 149 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); 150 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 151 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 152 __func__, (unsigned) rd_get_status(rd), 153 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 154 } 155 } 156 157 /********************************************************/ 158 159 /* needed regardless of CONFIG_PROC_FS */ 160 static struct proc_dir_entry *vlsi_proc_root = NULL; 161 162 #ifdef CONFIG_PROC_FS 163 164 static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 165 { 166 unsigned iobase = pci_resource_start(pdev, 0); 167 unsigned i; 168 169 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n", 170 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 171 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 172 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 173 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 174 seq_printf(seq, "hw registers: "); 175 for (i = 0; i < 0x20; i++) 176 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 177 seq_printf(seq, "\n"); 178 } 179 180 static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 181 { 182 vlsi_irda_dev_t *idev = netdev_priv(ndev); 183 u8 byte; 184 u16 word; 185 s32 sec, usec; 186 unsigned iobase = ndev->base_addr; 187 188 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 189 netif_device_present(ndev) ? "attached" : "detached", 190 netif_running(ndev) ? "running" : "not running", 191 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 192 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 193 194 if (!netif_running(ndev)) 195 return; 196 197 seq_printf(seq, "\nhw-state:\n"); 198 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 199 seq_printf(seq, "IRMISC:%s%s%s uart%s", 200 (byte&IRMISC_IRRAIL) ? " irrail" : "", 201 (byte&IRMISC_IRPD) ? " irpd" : "", 202 (byte&IRMISC_UARTTST) ? " uarttest" : "", 203 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 204 if (byte&IRMISC_UARTEN) { 205 seq_printf(seq, "0x%s\n", 206 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 207 : ((byte&1) ? "3f8" : "2f8")); 208 } 209 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 210 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 211 (byte&CLKCTL_PD_INV) ? "powered" : "down", 212 (byte&CLKCTL_LOCK) ? " locked" : "", 213 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 214 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 215 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 216 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 217 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 218 219 byte = inb(iobase+VLSI_PIO_IRINTR); 220 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 221 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 222 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 223 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 224 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 225 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 226 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 227 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 228 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 229 word = inw(iobase+VLSI_PIO_RINGPTR); 230 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 231 word = inw(iobase+VLSI_PIO_RINGBASE); 232 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 233 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 234 word = inw(iobase+VLSI_PIO_RINGSIZE); 235 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 236 RINGSIZE_TO_TXSIZE(word)); 237 238 word = inw(iobase+VLSI_PIO_IRCFG); 239 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 240 (word&IRCFG_LOOP) ? " LOOP" : "", 241 (word&IRCFG_ENTX) ? " ENTX" : "", 242 (word&IRCFG_ENRX) ? " ENRX" : "", 243 (word&IRCFG_MSTR) ? " MSTR" : "", 244 (word&IRCFG_RXANY) ? " RXANY" : "", 245 (word&IRCFG_CRC16) ? " CRC16" : "", 246 (word&IRCFG_FIR) ? " FIR" : "", 247 (word&IRCFG_MIR) ? " MIR" : "", 248 (word&IRCFG_SIR) ? " SIR" : "", 249 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 250 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 251 (word&IRCFG_TXPOL) ? " TXPOL" : "", 252 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 253 word = inw(iobase+VLSI_PIO_IRENABLE); 254 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 255 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 256 (word&IRENABLE_CFGER) ? " CFGERR" : "", 257 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 258 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 259 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 260 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 261 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 262 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 263 word = inw(iobase+VLSI_PIO_PHYCTL); 264 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 265 (unsigned)PHYCTL_TO_BAUD(word), 266 (unsigned)PHYCTL_TO_PLSWID(word), 267 (unsigned)PHYCTL_TO_PREAMB(word)); 268 word = inw(iobase+VLSI_PIO_NPHYCTL); 269 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 270 (unsigned)PHYCTL_TO_BAUD(word), 271 (unsigned)PHYCTL_TO_PLSWID(word), 272 (unsigned)PHYCTL_TO_PREAMB(word)); 273 word = inw(iobase+VLSI_PIO_MAXPKT); 274 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 275 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 276 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 277 278 seq_printf(seq, "\nsw-state:\n"); 279 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 280 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 281 sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx), 282 USEC_PER_SEC, &usec); 283 seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec); 284 285 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 286 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 287 ndev->stats.rx_dropped); 288 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 289 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 290 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 291 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 292 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 293 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 294 295 } 296 297 static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 298 { 299 struct ring_descr *rd; 300 unsigned i, j; 301 int h, t; 302 303 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 304 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 305 h = atomic_read(&r->head) & r->mask; 306 t = atomic_read(&r->tail) & r->mask; 307 seq_printf(seq, "head = %d / tail = %d ", h, t); 308 if (h == t) 309 seq_printf(seq, "(empty)\n"); 310 else { 311 if (((t+1)&r->mask) == h) 312 seq_printf(seq, "(full)\n"); 313 else 314 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 315 rd = &r->rd[h]; 316 j = (unsigned) rd_get_count(rd); 317 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 318 h, (unsigned)rd_get_status(rd), j); 319 if (j > 0) { 320 seq_printf(seq, " data: %*ph\n", 321 min_t(unsigned, j, 20), rd->buf); 322 } 323 } 324 for (i = 0; i < r->size; i++) { 325 rd = &r->rd[i]; 326 seq_printf(seq, "> ring descr %u: ", i); 327 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 328 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 329 (unsigned) rd_get_status(rd), 330 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 331 } 332 } 333 334 static int vlsi_seq_show(struct seq_file *seq, void *v) 335 { 336 struct net_device *ndev = seq->private; 337 vlsi_irda_dev_t *idev = netdev_priv(ndev); 338 unsigned long flags; 339 340 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 341 seq_printf(seq, "clksrc: %s\n", 342 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 343 : ((clksrc==1)?"48MHz PLL":"autodetect")); 344 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 345 ringsize[0], ringsize[1]); 346 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 347 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 348 349 spin_lock_irqsave(&idev->lock, flags); 350 if (idev->pdev != NULL) { 351 vlsi_proc_pdev(seq, idev->pdev); 352 353 if (idev->pdev->current_state == 0) 354 vlsi_proc_ndev(seq, ndev); 355 else 356 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 357 idev->resume_ok); 358 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 359 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 360 vlsi_proc_ring(seq, idev->rx_ring); 361 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 362 vlsi_proc_ring(seq, idev->tx_ring); 363 } 364 } 365 seq_printf(seq, "\n"); 366 spin_unlock_irqrestore(&idev->lock, flags); 367 368 return 0; 369 } 370 371 static int vlsi_seq_open(struct inode *inode, struct file *file) 372 { 373 return single_open(file, vlsi_seq_show, PDE_DATA(inode)); 374 } 375 376 static const struct file_operations vlsi_proc_fops = { 377 .owner = THIS_MODULE, 378 .open = vlsi_seq_open, 379 .read = seq_read, 380 .llseek = seq_lseek, 381 .release = single_release, 382 }; 383 384 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 385 386 #else /* CONFIG_PROC_FS */ 387 #define VLSI_PROC_FOPS NULL 388 #endif 389 390 /********************************************************/ 391 392 static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 393 unsigned size, unsigned len, int dir) 394 { 395 struct vlsi_ring *r; 396 struct ring_descr *rd; 397 unsigned i, j; 398 dma_addr_t busaddr; 399 400 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 401 return NULL; 402 403 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 404 if (!r) 405 return NULL; 406 memset(r, 0, sizeof(*r)); 407 408 r->pdev = pdev; 409 r->dir = dir; 410 r->len = len; 411 r->rd = (struct ring_descr *)(r+1); 412 r->mask = size - 1; 413 r->size = size; 414 atomic_set(&r->head, 0); 415 atomic_set(&r->tail, 0); 416 417 for (i = 0; i < size; i++) { 418 rd = r->rd + i; 419 memset(rd, 0, sizeof(*rd)); 420 rd->hw = hwmap + i; 421 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 422 if (rd->buf == NULL || 423 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 424 if (rd->buf) { 425 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 426 __func__, rd->buf); 427 kfree(rd->buf); 428 rd->buf = NULL; 429 } 430 for (j = 0; j < i; j++) { 431 rd = r->rd + j; 432 busaddr = rd_get_addr(rd); 433 rd_set_addr_status(rd, 0, 0); 434 if (busaddr) 435 pci_unmap_single(pdev, busaddr, len, dir); 436 kfree(rd->buf); 437 rd->buf = NULL; 438 } 439 kfree(r); 440 return NULL; 441 } 442 rd_set_addr_status(rd, busaddr, 0); 443 /* initially, the dma buffer is owned by the CPU */ 444 rd->skb = NULL; 445 } 446 return r; 447 } 448 449 static int vlsi_free_ring(struct vlsi_ring *r) 450 { 451 struct ring_descr *rd; 452 unsigned i; 453 dma_addr_t busaddr; 454 455 for (i = 0; i < r->size; i++) { 456 rd = r->rd + i; 457 if (rd->skb) 458 dev_kfree_skb_any(rd->skb); 459 busaddr = rd_get_addr(rd); 460 rd_set_addr_status(rd, 0, 0); 461 if (busaddr) 462 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 463 kfree(rd->buf); 464 } 465 kfree(r); 466 return 0; 467 } 468 469 static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 470 { 471 char *ringarea; 472 struct ring_descr_hw *hwmap; 473 474 idev->virtaddr = NULL; 475 idev->busaddr = 0; 476 477 ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, 478 &idev->busaddr); 479 if (!ringarea) 480 goto out; 481 482 hwmap = (struct ring_descr_hw *)ringarea; 483 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 484 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 485 if (idev->rx_ring == NULL) 486 goto out_unmap; 487 488 hwmap += MAX_RING_DESCR; 489 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 490 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 491 if (idev->tx_ring == NULL) 492 goto out_free_rx; 493 494 idev->virtaddr = ringarea; 495 return 0; 496 497 out_free_rx: 498 vlsi_free_ring(idev->rx_ring); 499 out_unmap: 500 idev->rx_ring = idev->tx_ring = NULL; 501 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 502 idev->busaddr = 0; 503 out: 504 return -ENOMEM; 505 } 506 507 static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 508 { 509 vlsi_free_ring(idev->rx_ring); 510 vlsi_free_ring(idev->tx_ring); 511 idev->rx_ring = idev->tx_ring = NULL; 512 513 if (idev->busaddr) 514 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 515 516 idev->virtaddr = NULL; 517 idev->busaddr = 0; 518 519 return 0; 520 } 521 522 /********************************************************/ 523 524 static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 525 { 526 u16 status; 527 int crclen, len = 0; 528 struct sk_buff *skb; 529 int ret = 0; 530 struct net_device *ndev = pci_get_drvdata(r->pdev); 531 vlsi_irda_dev_t *idev = netdev_priv(ndev); 532 533 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 534 /* dma buffer now owned by the CPU */ 535 status = rd_get_status(rd); 536 if (status & RD_RX_ERROR) { 537 if (status & RD_RX_OVER) 538 ret |= VLSI_RX_OVER; 539 if (status & RD_RX_LENGTH) 540 ret |= VLSI_RX_LENGTH; 541 if (status & RD_RX_PHYERR) 542 ret |= VLSI_RX_FRAME; 543 if (status & RD_RX_CRCERR) 544 ret |= VLSI_RX_CRC; 545 goto done; 546 } 547 548 len = rd_get_count(rd); 549 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 550 len -= crclen; /* remove trailing CRC */ 551 if (len <= 0) { 552 pr_debug("%s: strange frame (len=%d)\n", __func__, len); 553 ret |= VLSI_RX_DROP; 554 goto done; 555 } 556 557 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 558 559 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 560 * endian-adjustment there just in place will dirty a cache line 561 * which belongs to the map and thus we must be sure it will 562 * get flushed before giving the buffer back to hardware. 563 * vlsi_fill_rx() will do this anyway - but here we rely on. 564 */ 565 le16_to_cpus(rd->buf+len); 566 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 567 pr_debug("%s: crc error\n", __func__); 568 ret |= VLSI_RX_CRC; 569 goto done; 570 } 571 } 572 573 if (!rd->skb) { 574 net_warn_ratelimited("%s: rx packet lost\n", __func__); 575 ret |= VLSI_RX_DROP; 576 goto done; 577 } 578 579 skb = rd->skb; 580 rd->skb = NULL; 581 skb->dev = ndev; 582 memcpy(skb_put(skb,len), rd->buf, len); 583 skb_reset_mac_header(skb); 584 if (in_interrupt()) 585 netif_rx(skb); 586 else 587 netif_rx_ni(skb); 588 589 done: 590 rd_set_status(rd, 0); 591 rd_set_count(rd, 0); 592 /* buffer still owned by CPU */ 593 594 return (ret) ? -ret : len; 595 } 596 597 static void vlsi_fill_rx(struct vlsi_ring *r) 598 { 599 struct ring_descr *rd; 600 601 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 602 if (rd_is_active(rd)) { 603 net_warn_ratelimited("%s: driver bug: rx descr race with hw\n", 604 __func__); 605 vlsi_ring_debug(r); 606 break; 607 } 608 if (!rd->skb) { 609 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 610 if (rd->skb) { 611 skb_reserve(rd->skb,1); 612 rd->skb->protocol = htons(ETH_P_IRDA); 613 } 614 else 615 break; /* probably not worth logging? */ 616 } 617 /* give dma buffer back to busmaster */ 618 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 619 rd_activate(rd); 620 } 621 } 622 623 static void vlsi_rx_interrupt(struct net_device *ndev) 624 { 625 vlsi_irda_dev_t *idev = netdev_priv(ndev); 626 struct vlsi_ring *r = idev->rx_ring; 627 struct ring_descr *rd; 628 int ret; 629 630 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 631 632 if (rd_is_active(rd)) 633 break; 634 635 ret = vlsi_process_rx(r, rd); 636 637 if (ret < 0) { 638 ret = -ret; 639 ndev->stats.rx_errors++; 640 if (ret & VLSI_RX_DROP) 641 ndev->stats.rx_dropped++; 642 if (ret & VLSI_RX_OVER) 643 ndev->stats.rx_over_errors++; 644 if (ret & VLSI_RX_LENGTH) 645 ndev->stats.rx_length_errors++; 646 if (ret & VLSI_RX_FRAME) 647 ndev->stats.rx_frame_errors++; 648 if (ret & VLSI_RX_CRC) 649 ndev->stats.rx_crc_errors++; 650 } 651 else if (ret > 0) { 652 ndev->stats.rx_packets++; 653 ndev->stats.rx_bytes += ret; 654 } 655 } 656 657 idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */ 658 659 vlsi_fill_rx(r); 660 661 if (ring_first(r) == NULL) { 662 /* we are in big trouble, if this should ever happen */ 663 net_err_ratelimited("%s: rx ring exhausted!\n", __func__); 664 vlsi_ring_debug(r); 665 } 666 else 667 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 668 } 669 670 /* caller must have stopped the controller from busmastering */ 671 672 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 673 { 674 struct net_device *ndev = pci_get_drvdata(idev->pdev); 675 struct vlsi_ring *r = idev->rx_ring; 676 struct ring_descr *rd; 677 int ret; 678 679 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 680 681 ret = 0; 682 if (rd_is_active(rd)) { 683 rd_set_status(rd, 0); 684 if (rd_get_count(rd)) { 685 pr_debug("%s - dropping rx packet\n", __func__); 686 ret = -VLSI_RX_DROP; 687 } 688 rd_set_count(rd, 0); 689 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 690 if (rd->skb) { 691 dev_kfree_skb_any(rd->skb); 692 rd->skb = NULL; 693 } 694 } 695 else 696 ret = vlsi_process_rx(r, rd); 697 698 if (ret < 0) { 699 ret = -ret; 700 ndev->stats.rx_errors++; 701 if (ret & VLSI_RX_DROP) 702 ndev->stats.rx_dropped++; 703 if (ret & VLSI_RX_OVER) 704 ndev->stats.rx_over_errors++; 705 if (ret & VLSI_RX_LENGTH) 706 ndev->stats.rx_length_errors++; 707 if (ret & VLSI_RX_FRAME) 708 ndev->stats.rx_frame_errors++; 709 if (ret & VLSI_RX_CRC) 710 ndev->stats.rx_crc_errors++; 711 } 712 else if (ret > 0) { 713 ndev->stats.rx_packets++; 714 ndev->stats.rx_bytes += ret; 715 } 716 } 717 } 718 719 /********************************************************/ 720 721 static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 722 { 723 u16 status; 724 int len; 725 int ret; 726 727 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 728 /* dma buffer now owned by the CPU */ 729 status = rd_get_status(rd); 730 if (status & RD_TX_UNDRN) 731 ret = VLSI_TX_FIFO; 732 else 733 ret = 0; 734 rd_set_status(rd, 0); 735 736 if (rd->skb) { 737 len = rd->skb->len; 738 dev_kfree_skb_any(rd->skb); 739 rd->skb = NULL; 740 } 741 else /* tx-skb already freed? - should never happen */ 742 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 743 744 rd_set_count(rd, 0); 745 /* dma buffer still owned by the CPU */ 746 747 return (ret) ? -ret : len; 748 } 749 750 static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 751 { 752 u16 nphyctl; 753 u16 config; 754 unsigned mode; 755 int ret; 756 int baudrate; 757 int fifocnt; 758 759 baudrate = idev->new_baud; 760 pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); 761 if (baudrate == 4000000) { 762 mode = IFF_FIR; 763 config = IRCFG_FIR; 764 nphyctl = PHYCTL_FIR; 765 } 766 else if (baudrate == 1152000) { 767 mode = IFF_MIR; 768 config = IRCFG_MIR | IRCFG_CRC16; 769 nphyctl = PHYCTL_MIR(clksrc==3); 770 } 771 else { 772 mode = IFF_SIR; 773 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 774 switch(baudrate) { 775 default: 776 net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n", 777 __func__, baudrate); 778 baudrate = 9600; 779 /* fallthru */ 780 case 2400: 781 case 9600: 782 case 19200: 783 case 38400: 784 case 57600: 785 case 115200: 786 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 787 break; 788 } 789 } 790 config |= IRCFG_MSTR | IRCFG_ENRX; 791 792 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 793 if (fifocnt != 0) { 794 pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt); 795 } 796 797 outw(0, iobase+VLSI_PIO_IRENABLE); 798 outw(config, iobase+VLSI_PIO_IRCFG); 799 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 800 wmb(); 801 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 802 mb(); 803 804 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 805 806 /* read back settings for validation */ 807 808 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 809 810 if (mode == IFF_FIR) 811 config ^= IRENABLE_FIR_ON; 812 else if (mode == IFF_MIR) 813 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 814 else 815 config ^= IRENABLE_SIR_ON; 816 817 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 818 net_warn_ratelimited("%s: failed to set %s mode!\n", 819 __func__, 820 mode == IFF_SIR ? "SIR" : 821 mode == IFF_MIR ? "MIR" : "FIR"); 822 ret = -1; 823 } 824 else { 825 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 826 net_warn_ratelimited("%s: failed to apply baudrate %d\n", 827 __func__, baudrate); 828 ret = -1; 829 } 830 else { 831 idev->mode = mode; 832 idev->baud = baudrate; 833 idev->new_baud = 0; 834 ret = 0; 835 } 836 } 837 838 if (ret) 839 vlsi_reg_debug(iobase,__func__); 840 841 return ret; 842 } 843 844 static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, 845 struct net_device *ndev) 846 { 847 vlsi_irda_dev_t *idev = netdev_priv(ndev); 848 struct vlsi_ring *r = idev->tx_ring; 849 struct ring_descr *rd; 850 unsigned long flags; 851 unsigned iobase = ndev->base_addr; 852 u8 status; 853 u16 config; 854 int mtt, diff; 855 int len, speed; 856 char *msg = NULL; 857 858 speed = irda_get_next_speed(skb); 859 spin_lock_irqsave(&idev->lock, flags); 860 if (speed != -1 && speed != idev->baud) { 861 netif_stop_queue(ndev); 862 idev->new_baud = speed; 863 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 864 } 865 else 866 status = 0; 867 868 if (skb->len == 0) { 869 /* handle zero packets - should be speed change */ 870 if (status == 0) { 871 msg = "bogus zero-length packet"; 872 goto drop_unlock; 873 } 874 875 /* due to the completely asynch tx operation we might have 876 * IrLAP racing with the hardware here, f.e. if the controller 877 * is just sending the last packet with current speed while 878 * the LAP is already switching the speed using synchronous 879 * len=0 packet. Immediate execution would lead to hw lockup 880 * requiring a powercycle to reset. Good candidate to trigger 881 * this is the final UA:RSP packet after receiving a DISC:CMD 882 * when getting the LAP down. 883 * Note that we are not protected by the queue_stop approach 884 * because the final UA:RSP arrives _without_ request to apply 885 * new-speed-after-this-packet - hence the driver doesn't know 886 * this was the last packet and doesn't stop the queue. So the 887 * forced switch to default speed from LAP gets through as fast 888 * as only some 10 usec later while the UA:RSP is still processed 889 * by the hardware and we would get screwed. 890 */ 891 892 if (ring_first(idev->tx_ring) == NULL) { 893 /* no race - tx-ring already empty */ 894 vlsi_set_baud(idev, iobase); 895 netif_wake_queue(ndev); 896 } 897 else 898 ; 899 /* keep the speed change pending like it would 900 * for any len>0 packet. tx completion interrupt 901 * will apply it when the tx ring becomes empty. 902 */ 903 spin_unlock_irqrestore(&idev->lock, flags); 904 dev_kfree_skb_any(skb); 905 return NETDEV_TX_OK; 906 } 907 908 /* sanity checks - simply drop the packet */ 909 910 rd = ring_last(r); 911 if (!rd) { 912 msg = "ring full, but queue wasn't stopped"; 913 goto drop_unlock; 914 } 915 916 if (rd_is_active(rd)) { 917 msg = "entry still owned by hw"; 918 goto drop_unlock; 919 } 920 921 if (!rd->buf) { 922 msg = "tx ring entry without pci buffer"; 923 goto drop_unlock; 924 } 925 926 if (rd->skb) { 927 msg = "ring entry with old skb still attached"; 928 goto drop_unlock; 929 } 930 931 /* no need for serialization or interrupt disable during mtt */ 932 spin_unlock_irqrestore(&idev->lock, flags); 933 934 if ((mtt = irda_get_mtt(skb)) > 0) { 935 diff = ktime_us_delta(ktime_get(), idev->last_rx); 936 if (mtt > diff) 937 udelay(mtt - diff); 938 /* must not sleep here - called under netif_tx_lock! */ 939 } 940 941 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 942 * after subsequent tx-completion 943 */ 944 945 if (idev->mode == IFF_SIR) { 946 status |= RD_TX_DISCRC; /* no hw-crc creation */ 947 len = async_wrap_skb(skb, rd->buf, r->len); 948 949 /* Some rare worst case situation in SIR mode might lead to 950 * potential buffer overflow. The wrapper detects this, returns 951 * with a shortened frame (without FCS/EOF) but doesn't provide 952 * any error indication about the invalid packet which we are 953 * going to transmit. 954 * Therefore we log if the buffer got filled to the point, where the 955 * wrapper would abort, i.e. when there are less than 5 bytes left to 956 * allow appending the FCS/EOF. 957 */ 958 959 if (len >= r->len-5) 960 net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n", 961 __func__); 962 } 963 else { 964 /* hw deals with MIR/FIR mode wrapping */ 965 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 966 len = skb->len; 967 if (len > r->len) { 968 msg = "frame exceeds tx buffer length"; 969 goto drop; 970 } 971 else 972 skb_copy_from_linear_data(skb, rd->buf, len); 973 } 974 975 rd->skb = skb; /* remember skb for tx-complete stats */ 976 977 rd_set_count(rd, len); 978 rd_set_status(rd, status); /* not yet active! */ 979 980 /* give dma buffer back to busmaster-hw (flush caches to make 981 * CPU-driven changes visible from the pci bus). 982 */ 983 984 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 985 986 /* Switching to TX mode here races with the controller 987 * which may stop TX at any time when fetching an inactive descriptor 988 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 989 * _after_ the new descriptor was activated on the ring. This ensures 990 * we will either find TX already stopped or we can be sure, there 991 * will be a TX-complete interrupt even if the chip stopped doing 992 * TX just after we found it still running. The ISR will then find 993 * the non-empty ring and restart TX processing. The enclosing 994 * spinlock provides the correct serialization to prevent race with isr. 995 */ 996 997 spin_lock_irqsave(&idev->lock,flags); 998 999 rd_activate(rd); 1000 1001 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1002 int fifocnt; 1003 1004 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1005 if (fifocnt != 0) { 1006 pr_debug("%s: rx fifo not empty(%d)\n", 1007 __func__, fifocnt); 1008 } 1009 1010 config = inw(iobase+VLSI_PIO_IRCFG); 1011 mb(); 1012 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1013 wmb(); 1014 outw(0, iobase+VLSI_PIO_PROMPT); 1015 } 1016 1017 if (ring_put(r) == NULL) { 1018 netif_stop_queue(ndev); 1019 pr_debug("%s: tx ring full - queue stopped\n", __func__); 1020 } 1021 spin_unlock_irqrestore(&idev->lock, flags); 1022 1023 return NETDEV_TX_OK; 1024 1025 drop_unlock: 1026 spin_unlock_irqrestore(&idev->lock, flags); 1027 drop: 1028 net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg); 1029 dev_kfree_skb_any(skb); 1030 ndev->stats.tx_errors++; 1031 ndev->stats.tx_dropped++; 1032 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1033 * In fact any retval!=0 causes the packet scheduler to requeue the 1034 * packet for later retry of transmission - which isn't exactly 1035 * what we want after we've just called dev_kfree_skb_any ;-) 1036 */ 1037 return NETDEV_TX_OK; 1038 } 1039 1040 static void vlsi_tx_interrupt(struct net_device *ndev) 1041 { 1042 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1043 struct vlsi_ring *r = idev->tx_ring; 1044 struct ring_descr *rd; 1045 unsigned iobase; 1046 int ret; 1047 u16 config; 1048 1049 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1050 1051 if (rd_is_active(rd)) 1052 break; 1053 1054 ret = vlsi_process_tx(r, rd); 1055 1056 if (ret < 0) { 1057 ret = -ret; 1058 ndev->stats.tx_errors++; 1059 if (ret & VLSI_TX_DROP) 1060 ndev->stats.tx_dropped++; 1061 if (ret & VLSI_TX_FIFO) 1062 ndev->stats.tx_fifo_errors++; 1063 } 1064 else if (ret > 0){ 1065 ndev->stats.tx_packets++; 1066 ndev->stats.tx_bytes += ret; 1067 } 1068 } 1069 1070 iobase = ndev->base_addr; 1071 1072 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1073 vlsi_set_baud(idev, iobase); 1074 1075 config = inw(iobase+VLSI_PIO_IRCFG); 1076 if (rd == NULL) /* tx ring empty: re-enable rx */ 1077 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1078 1079 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1080 int fifocnt; 1081 1082 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1083 if (fifocnt != 0) { 1084 pr_debug("%s: rx fifo not empty(%d)\n", 1085 __func__, fifocnt); 1086 } 1087 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1088 } 1089 1090 outw(0, iobase+VLSI_PIO_PROMPT); 1091 1092 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1093 netif_wake_queue(ndev); 1094 pr_debug("%s: queue awoken\n", __func__); 1095 } 1096 } 1097 1098 /* caller must have stopped the controller from busmastering */ 1099 1100 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1101 { 1102 struct net_device *ndev = pci_get_drvdata(idev->pdev); 1103 struct vlsi_ring *r = idev->tx_ring; 1104 struct ring_descr *rd; 1105 int ret; 1106 1107 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1108 1109 ret = 0; 1110 if (rd_is_active(rd)) { 1111 rd_set_status(rd, 0); 1112 rd_set_count(rd, 0); 1113 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1114 if (rd->skb) { 1115 dev_kfree_skb_any(rd->skb); 1116 rd->skb = NULL; 1117 } 1118 pr_debug("%s - dropping tx packet\n", __func__); 1119 ret = -VLSI_TX_DROP; 1120 } 1121 else 1122 ret = vlsi_process_tx(r, rd); 1123 1124 if (ret < 0) { 1125 ret = -ret; 1126 ndev->stats.tx_errors++; 1127 if (ret & VLSI_TX_DROP) 1128 ndev->stats.tx_dropped++; 1129 if (ret & VLSI_TX_FIFO) 1130 ndev->stats.tx_fifo_errors++; 1131 } 1132 else if (ret > 0){ 1133 ndev->stats.tx_packets++; 1134 ndev->stats.tx_bytes += ret; 1135 } 1136 } 1137 1138 } 1139 1140 /********************************************************/ 1141 1142 static int vlsi_start_clock(struct pci_dev *pdev) 1143 { 1144 u8 clkctl, lock; 1145 int i, count; 1146 1147 if (clksrc < 2) { /* auto or PLL: try PLL */ 1148 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1149 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1150 1151 /* procedure to detect PLL lock synchronisation: 1152 * after 0.5 msec initial delay we expect to find 3 PLL lock 1153 * indications within 10 msec for successful PLL detection. 1154 */ 1155 udelay(500); 1156 count = 0; 1157 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1158 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1159 if (lock&CLKCTL_LOCK) { 1160 if (++count >= 3) 1161 break; 1162 } 1163 udelay(50); 1164 } 1165 if (count < 3) { 1166 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1167 net_err_ratelimited("%s: no PLL or failed to lock!\n", 1168 __func__); 1169 clkctl = CLKCTL_CLKSTP; 1170 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1171 return -1; 1172 } 1173 else /* was: clksrc=0(auto) */ 1174 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1175 1176 pr_debug("%s: PLL not locked, fallback to clksrc=%d\n", 1177 __func__, clksrc); 1178 } 1179 else 1180 clksrc = 1; /* got successful PLL lock */ 1181 } 1182 1183 if (clksrc != 1) { 1184 /* we get here if either no PLL detected in auto-mode or 1185 an external clock source was explicitly specified */ 1186 1187 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1188 if (clksrc == 3) 1189 clkctl |= CLKCTL_XCKSEL; 1190 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1191 1192 /* no way to test for working XCLK */ 1193 } 1194 else 1195 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1196 1197 /* ok, now going to connect the chip with the clock source */ 1198 1199 clkctl &= ~CLKCTL_CLKSTP; 1200 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1201 1202 return 0; 1203 } 1204 1205 static void vlsi_stop_clock(struct pci_dev *pdev) 1206 { 1207 u8 clkctl; 1208 1209 /* disconnect chip from clock source */ 1210 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1211 clkctl |= CLKCTL_CLKSTP; 1212 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1213 1214 /* disable all clock sources */ 1215 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1216 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1217 } 1218 1219 /********************************************************/ 1220 1221 /* writing all-zero to the VLSI PCI IO register area seems to prevent 1222 * some occasional situations where the hardware fails (symptoms are 1223 * what appears as stalled tx/rx state machines, i.e. everything ok for 1224 * receive or transmit but hw makes no progress or is unable to access 1225 * the bus memory locations). 1226 * Best place to call this is immediately after/before the internal clock 1227 * gets started/stopped. 1228 */ 1229 1230 static inline void vlsi_clear_regs(unsigned iobase) 1231 { 1232 unsigned i; 1233 const unsigned chip_io_extent = 32; 1234 1235 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1236 outw(0, iobase + i); 1237 } 1238 1239 static int vlsi_init_chip(struct pci_dev *pdev) 1240 { 1241 struct net_device *ndev = pci_get_drvdata(pdev); 1242 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1243 unsigned iobase; 1244 u16 ptr; 1245 1246 /* start the clock and clean the registers */ 1247 1248 if (vlsi_start_clock(pdev)) { 1249 net_err_ratelimited("%s: no valid clock source\n", __func__); 1250 return -1; 1251 } 1252 iobase = ndev->base_addr; 1253 vlsi_clear_regs(iobase); 1254 1255 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1256 1257 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1258 1259 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1260 1261 outw(0, iobase+VLSI_PIO_IRCFG); 1262 wmb(); 1263 1264 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1265 1266 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1267 1268 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1269 iobase+VLSI_PIO_RINGSIZE); 1270 1271 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1272 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1273 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1274 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1275 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1276 1277 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1278 1279 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1280 wmb(); 1281 1282 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1283 * basically every received pulse fires an ACTIVITY-INT 1284 * leading to >>1000 INT's per second instead of few 10 1285 */ 1286 1287 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1288 1289 return 0; 1290 } 1291 1292 static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1293 { 1294 struct pci_dev *pdev = idev->pdev; 1295 struct net_device *ndev = pci_get_drvdata(pdev); 1296 unsigned iobase = ndev->base_addr; 1297 u8 byte; 1298 1299 /* we don't use the legacy UART, disable its address decoding */ 1300 1301 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1302 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1303 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1304 1305 /* enable PCI busmaster access to our 16MB page */ 1306 1307 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1308 pci_set_master(pdev); 1309 1310 if (vlsi_init_chip(pdev) < 0) { 1311 pci_disable_device(pdev); 1312 return -1; 1313 } 1314 1315 vlsi_fill_rx(idev->rx_ring); 1316 1317 idev->last_rx = ktime_get(); /* first mtt may start from now on */ 1318 1319 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1320 1321 return 0; 1322 } 1323 1324 static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1325 { 1326 struct pci_dev *pdev = idev->pdev; 1327 struct net_device *ndev = pci_get_drvdata(pdev); 1328 unsigned iobase = ndev->base_addr; 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&idev->lock,flags); 1332 outw(0, iobase+VLSI_PIO_IRENABLE); 1333 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1334 1335 /* disable and w/c irqs */ 1336 outb(0, iobase+VLSI_PIO_IRINTR); 1337 wmb(); 1338 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1339 spin_unlock_irqrestore(&idev->lock,flags); 1340 1341 vlsi_unarm_tx(idev); 1342 vlsi_unarm_rx(idev); 1343 1344 vlsi_clear_regs(iobase); 1345 vlsi_stop_clock(pdev); 1346 1347 pci_disable_device(pdev); 1348 1349 return 0; 1350 } 1351 1352 /**************************************************************/ 1353 1354 static void vlsi_tx_timeout(struct net_device *ndev) 1355 { 1356 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1357 1358 1359 vlsi_reg_debug(ndev->base_addr, __func__); 1360 vlsi_ring_debug(idev->tx_ring); 1361 1362 if (netif_running(ndev)) 1363 netif_stop_queue(ndev); 1364 1365 vlsi_stop_hw(idev); 1366 1367 /* now simply restart the whole thing */ 1368 1369 if (!idev->new_baud) 1370 idev->new_baud = idev->baud; /* keep current baudrate */ 1371 1372 if (vlsi_start_hw(idev)) 1373 net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n", 1374 __func__, pci_name(idev->pdev), ndev->name); 1375 else 1376 netif_start_queue(ndev); 1377 } 1378 1379 static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1380 { 1381 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1382 struct if_irda_req *irq = (struct if_irda_req *) rq; 1383 unsigned long flags; 1384 u16 fifocnt; 1385 int ret = 0; 1386 1387 switch (cmd) { 1388 case SIOCSBANDWIDTH: 1389 if (!capable(CAP_NET_ADMIN)) { 1390 ret = -EPERM; 1391 break; 1392 } 1393 spin_lock_irqsave(&idev->lock, flags); 1394 idev->new_baud = irq->ifr_baudrate; 1395 /* when called from userland there might be a minor race window here 1396 * if the stack tries to change speed concurrently - which would be 1397 * pretty strange anyway with the userland having full control... 1398 */ 1399 vlsi_set_baud(idev, ndev->base_addr); 1400 spin_unlock_irqrestore(&idev->lock, flags); 1401 break; 1402 case SIOCSMEDIABUSY: 1403 if (!capable(CAP_NET_ADMIN)) { 1404 ret = -EPERM; 1405 break; 1406 } 1407 irda_device_set_media_busy(ndev, TRUE); 1408 break; 1409 case SIOCGRECEIVING: 1410 /* the best we can do: check whether there are any bytes in rx fifo. 1411 * The trustable window (in case some data arrives just afterwards) 1412 * may be as short as 1usec or so at 4Mbps. 1413 */ 1414 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1415 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1416 break; 1417 default: 1418 net_warn_ratelimited("%s: notsupp - cmd=%04x\n", 1419 __func__, cmd); 1420 ret = -EOPNOTSUPP; 1421 } 1422 1423 return ret; 1424 } 1425 1426 /********************************************************/ 1427 1428 static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1429 { 1430 struct net_device *ndev = dev_instance; 1431 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1432 unsigned iobase; 1433 u8 irintr; 1434 int boguscount = 5; 1435 unsigned long flags; 1436 int handled = 0; 1437 1438 iobase = ndev->base_addr; 1439 spin_lock_irqsave(&idev->lock,flags); 1440 do { 1441 irintr = inb(iobase+VLSI_PIO_IRINTR); 1442 mb(); 1443 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1444 1445 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1446 break; 1447 1448 handled = 1; 1449 1450 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1451 break; /* nothing todo if only activity */ 1452 1453 if (irintr&IRINTR_RPKTINT) 1454 vlsi_rx_interrupt(ndev); 1455 1456 if (irintr&IRINTR_TPKTINT) 1457 vlsi_tx_interrupt(ndev); 1458 1459 } while (--boguscount > 0); 1460 spin_unlock_irqrestore(&idev->lock,flags); 1461 1462 if (boguscount <= 0) 1463 net_info_ratelimited("%s: too much work in interrupt!\n", 1464 __func__); 1465 return IRQ_RETVAL(handled); 1466 } 1467 1468 /********************************************************/ 1469 1470 static int vlsi_open(struct net_device *ndev) 1471 { 1472 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1473 int err = -EAGAIN; 1474 char hwname[32]; 1475 1476 if (pci_request_regions(idev->pdev, drivername)) { 1477 net_warn_ratelimited("%s: io resource busy\n", __func__); 1478 goto errout; 1479 } 1480 ndev->base_addr = pci_resource_start(idev->pdev,0); 1481 ndev->irq = idev->pdev->irq; 1482 1483 /* under some rare occasions the chip apparently comes up with 1484 * IRQ's pending. We better w/c pending IRQ and disable them all 1485 */ 1486 1487 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1488 1489 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1490 drivername, ndev)) { 1491 net_warn_ratelimited("%s: couldn't get IRQ: %d\n", 1492 __func__, ndev->irq); 1493 goto errout_io; 1494 } 1495 1496 if ((err = vlsi_create_hwif(idev)) != 0) 1497 goto errout_irq; 1498 1499 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1500 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1501 if (!idev->irlap) 1502 goto errout_free_ring; 1503 1504 idev->last_rx = ktime_get(); /* first mtt may start from now on */ 1505 1506 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1507 1508 if ((err = vlsi_start_hw(idev)) != 0) 1509 goto errout_close_irlap; 1510 1511 netif_start_queue(ndev); 1512 1513 net_info_ratelimited("%s: device %s operational\n", 1514 __func__, ndev->name); 1515 1516 return 0; 1517 1518 errout_close_irlap: 1519 irlap_close(idev->irlap); 1520 errout_free_ring: 1521 vlsi_destroy_hwif(idev); 1522 errout_irq: 1523 free_irq(ndev->irq,ndev); 1524 errout_io: 1525 pci_release_regions(idev->pdev); 1526 errout: 1527 return err; 1528 } 1529 1530 static int vlsi_close(struct net_device *ndev) 1531 { 1532 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1533 1534 netif_stop_queue(ndev); 1535 1536 if (idev->irlap) 1537 irlap_close(idev->irlap); 1538 idev->irlap = NULL; 1539 1540 vlsi_stop_hw(idev); 1541 1542 vlsi_destroy_hwif(idev); 1543 1544 free_irq(ndev->irq,ndev); 1545 1546 pci_release_regions(idev->pdev); 1547 1548 net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name); 1549 1550 return 0; 1551 } 1552 1553 static const struct net_device_ops vlsi_netdev_ops = { 1554 .ndo_open = vlsi_open, 1555 .ndo_stop = vlsi_close, 1556 .ndo_start_xmit = vlsi_hard_start_xmit, 1557 .ndo_do_ioctl = vlsi_ioctl, 1558 .ndo_tx_timeout = vlsi_tx_timeout, 1559 }; 1560 1561 static int vlsi_irda_init(struct net_device *ndev) 1562 { 1563 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1564 struct pci_dev *pdev = idev->pdev; 1565 1566 ndev->irq = pdev->irq; 1567 ndev->base_addr = pci_resource_start(pdev,0); 1568 1569 /* PCI busmastering 1570 * see include file for details why we need these 2 masks, in this order! 1571 */ 1572 1573 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || 1574 pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1575 net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n", 1576 __func__); 1577 return -1; 1578 } 1579 1580 irda_init_max_qos_capabilies(&idev->qos); 1581 1582 /* the VLSI82C147 does not support 576000! */ 1583 1584 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1585 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1586 | IR_1152000 | (IR_4000000 << 8); 1587 1588 idev->qos.min_turn_time.bits = qos_mtt_bits; 1589 1590 irda_qos_bits_to_value(&idev->qos); 1591 1592 /* currently no public media definitions for IrDA */ 1593 1594 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1595 ndev->if_port = IF_PORT_UNKNOWN; 1596 1597 ndev->netdev_ops = &vlsi_netdev_ops; 1598 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1599 1600 SET_NETDEV_DEV(ndev, &pdev->dev); 1601 1602 return 0; 1603 } 1604 1605 /**************************************************************/ 1606 1607 static int 1608 vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1609 { 1610 struct net_device *ndev; 1611 vlsi_irda_dev_t *idev; 1612 1613 if (pci_enable_device(pdev)) 1614 goto out; 1615 else 1616 pdev->current_state = 0; /* hw must be running now */ 1617 1618 net_info_ratelimited("%s: IrDA PCI controller %s detected\n", 1619 drivername, pci_name(pdev)); 1620 1621 if ( !pci_resource_start(pdev,0) || 1622 !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1623 net_err_ratelimited("%s: bar 0 invalid", __func__); 1624 goto out_disable; 1625 } 1626 1627 ndev = alloc_irdadev(sizeof(*idev)); 1628 if (ndev==NULL) { 1629 net_err_ratelimited("%s: Unable to allocate device memory.\n", 1630 __func__); 1631 goto out_disable; 1632 } 1633 1634 idev = netdev_priv(ndev); 1635 1636 spin_lock_init(&idev->lock); 1637 mutex_init(&idev->mtx); 1638 mutex_lock(&idev->mtx); 1639 idev->pdev = pdev; 1640 1641 if (vlsi_irda_init(ndev) < 0) 1642 goto out_freedev; 1643 1644 if (register_netdev(ndev) < 0) { 1645 net_err_ratelimited("%s: register_netdev failed\n", __func__); 1646 goto out_freedev; 1647 } 1648 1649 if (vlsi_proc_root != NULL) { 1650 struct proc_dir_entry *ent; 1651 1652 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, 1653 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1654 if (!ent) { 1655 net_warn_ratelimited("%s: failed to create proc entry\n", 1656 __func__); 1657 } else { 1658 proc_set_size(ent, 0); 1659 } 1660 idev->proc_entry = ent; 1661 } 1662 net_info_ratelimited("%s: registered device %s\n", 1663 drivername, ndev->name); 1664 1665 pci_set_drvdata(pdev, ndev); 1666 mutex_unlock(&idev->mtx); 1667 1668 return 0; 1669 1670 out_freedev: 1671 mutex_unlock(&idev->mtx); 1672 free_netdev(ndev); 1673 out_disable: 1674 pci_disable_device(pdev); 1675 out: 1676 return -ENODEV; 1677 } 1678 1679 static void vlsi_irda_remove(struct pci_dev *pdev) 1680 { 1681 struct net_device *ndev = pci_get_drvdata(pdev); 1682 vlsi_irda_dev_t *idev; 1683 1684 if (!ndev) { 1685 net_err_ratelimited("%s: lost netdevice?\n", drivername); 1686 return; 1687 } 1688 1689 unregister_netdev(ndev); 1690 1691 idev = netdev_priv(ndev); 1692 mutex_lock(&idev->mtx); 1693 if (idev->proc_entry) { 1694 remove_proc_entry(ndev->name, vlsi_proc_root); 1695 idev->proc_entry = NULL; 1696 } 1697 mutex_unlock(&idev->mtx); 1698 1699 free_netdev(ndev); 1700 1701 net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev)); 1702 } 1703 1704 #ifdef CONFIG_PM 1705 1706 /* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1707 * Some of the Linux PCI-PM code however depends on this, for example in 1708 * pci_set_power_state(). So we have to take care to perform the required 1709 * operations on our own (particularly reflecting the pdev->current_state) 1710 * otherwise we might get cheated by pci-pm. 1711 */ 1712 1713 1714 static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1715 { 1716 struct net_device *ndev = pci_get_drvdata(pdev); 1717 vlsi_irda_dev_t *idev; 1718 1719 if (!ndev) { 1720 net_err_ratelimited("%s - %s: no netdevice\n", 1721 __func__, pci_name(pdev)); 1722 return 0; 1723 } 1724 idev = netdev_priv(ndev); 1725 mutex_lock(&idev->mtx); 1726 if (pdev->current_state != 0) { /* already suspended */ 1727 if (state.event > pdev->current_state) { /* simply go deeper */ 1728 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1729 pdev->current_state = state.event; 1730 } 1731 else 1732 net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n", 1733 __func__, pci_name(pdev), 1734 pdev->current_state, state.event); 1735 mutex_unlock(&idev->mtx); 1736 return 0; 1737 } 1738 1739 if (netif_running(ndev)) { 1740 netif_device_detach(ndev); 1741 vlsi_stop_hw(idev); 1742 pci_save_state(pdev); 1743 if (!idev->new_baud) 1744 /* remember speed settings to restore on resume */ 1745 idev->new_baud = idev->baud; 1746 } 1747 1748 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1749 pdev->current_state = state.event; 1750 idev->resume_ok = 1; 1751 mutex_unlock(&idev->mtx); 1752 return 0; 1753 } 1754 1755 static int vlsi_irda_resume(struct pci_dev *pdev) 1756 { 1757 struct net_device *ndev = pci_get_drvdata(pdev); 1758 vlsi_irda_dev_t *idev; 1759 1760 if (!ndev) { 1761 net_err_ratelimited("%s - %s: no netdevice\n", 1762 __func__, pci_name(pdev)); 1763 return 0; 1764 } 1765 idev = netdev_priv(ndev); 1766 mutex_lock(&idev->mtx); 1767 if (pdev->current_state == 0) { 1768 mutex_unlock(&idev->mtx); 1769 net_warn_ratelimited("%s - %s: already resumed\n", 1770 __func__, pci_name(pdev)); 1771 return 0; 1772 } 1773 1774 pci_set_power_state(pdev, PCI_D0); 1775 pdev->current_state = PM_EVENT_ON; 1776 1777 if (!idev->resume_ok) { 1778 /* should be obsolete now - but used to happen due to: 1779 * - pci layer initially setting pdev->current_state = 4 (unknown) 1780 * - pci layer did not walk the save_state-tree (might be APM problem) 1781 * so we could not refuse to suspend from undefined state 1782 * - vlsi_irda_suspend detected invalid state and refused to save 1783 * configuration for resume - but was too late to stop suspending 1784 * - vlsi_irda_resume got screwed when trying to resume from garbage 1785 * 1786 * now we explicitly set pdev->current_state = 0 after enabling the 1787 * device and independently resume_ok should catch any garbage config. 1788 */ 1789 net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__); 1790 mutex_unlock(&idev->mtx); 1791 return 0; 1792 } 1793 1794 if (netif_running(ndev)) { 1795 pci_restore_state(pdev); 1796 vlsi_start_hw(idev); 1797 netif_device_attach(ndev); 1798 } 1799 idev->resume_ok = 0; 1800 mutex_unlock(&idev->mtx); 1801 return 0; 1802 } 1803 1804 #endif /* CONFIG_PM */ 1805 1806 /*********************************************************/ 1807 1808 static struct pci_driver vlsi_irda_driver = { 1809 .name = drivername, 1810 .id_table = vlsi_irda_table, 1811 .probe = vlsi_irda_probe, 1812 .remove = vlsi_irda_remove, 1813 #ifdef CONFIG_PM 1814 .suspend = vlsi_irda_suspend, 1815 .resume = vlsi_irda_resume, 1816 #endif 1817 }; 1818 1819 #define PROC_DIR ("driver/" DRIVER_NAME) 1820 1821 static int __init vlsi_mod_init(void) 1822 { 1823 int i, ret; 1824 1825 if (clksrc < 0 || clksrc > 3) { 1826 net_err_ratelimited("%s: invalid clksrc=%d\n", 1827 drivername, clksrc); 1828 return -1; 1829 } 1830 1831 for (i = 0; i < 2; i++) { 1832 switch(ringsize[i]) { 1833 case 4: 1834 case 8: 1835 case 16: 1836 case 32: 1837 case 64: 1838 break; 1839 default: 1840 net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n", 1841 drivername, 1842 i ? "rx" : "tx", 1843 ringsize[i]); 1844 ringsize[i] = 8; 1845 break; 1846 } 1847 } 1848 1849 sirpulse = !!sirpulse; 1850 1851 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1852 * Failure to create the procfs entry is handled like running 1853 * without procfs - it's not required for the driver to work. 1854 */ 1855 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1856 1857 ret = pci_register_driver(&vlsi_irda_driver); 1858 1859 if (ret && vlsi_proc_root) 1860 remove_proc_entry(PROC_DIR, NULL); 1861 return ret; 1862 1863 } 1864 1865 static void __exit vlsi_mod_exit(void) 1866 { 1867 pci_unregister_driver(&vlsi_irda_driver); 1868 if (vlsi_proc_root) 1869 remove_proc_entry(PROC_DIR, NULL); 1870 } 1871 1872 module_init(vlsi_mod_init); 1873 module_exit(vlsi_mod_exit); 1874 1875 1876 1877 1878 1879 /* LDV_COMMENT_BEGIN_MAIN */ 1880 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 1881 1882 /*###########################################################################*/ 1883 1884 /*############## Driver Environment Generator 0.2 output ####################*/ 1885 1886 /*###########################################################################*/ 1887 1888 1889 1890 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1891 void ldv_check_final_state(void); 1892 1893 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1894 void ldv_check_return_value(int res); 1895 1896 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1897 void ldv_check_return_value_probe(int res); 1898 1899 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1900 void ldv_initialize(void); 1901 1902 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1903 void ldv_handler_precall(void); 1904 1905 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1906 int nondet_int(void); 1907 1908 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1909 int LDV_IN_INTERRUPT; 1910 1911 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1912 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1913 1914 1915 1916 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1917 /*============================= VARIABLE DECLARATION PART =============================*/ 1918 /** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/ 1919 /* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/ 1920 /* LDV_COMMENT_BEGIN_PREP */ 1921 #define DRIVER_NAME "vlsi_ir" 1922 #define DRIVER_VERSION "v0.5" 1923 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1924 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1925 #ifdef CONFIG_PROC_FS 1926 /* LDV_COMMENT_END_PREP */ 1927 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */ 1928 struct inode * var_group1; 1929 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */ 1930 struct file * var_group2; 1931 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_seq_open" */ 1932 static int res_vlsi_seq_open_6; 1933 /* LDV_COMMENT_BEGIN_PREP */ 1934 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1935 #else 1936 #define VLSI_PROC_FOPS NULL 1937 #endif 1938 #ifdef CONFIG_PM 1939 #endif 1940 #ifdef CONFIG_PM 1941 #endif 1942 #define PROC_DIR ("driver/" DRIVER_NAME) 1943 /* LDV_COMMENT_END_PREP */ 1944 1945 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 1946 /* content: static int vlsi_open(struct net_device *ndev)*/ 1947 /* LDV_COMMENT_BEGIN_PREP */ 1948 #define DRIVER_NAME "vlsi_ir" 1949 #define DRIVER_VERSION "v0.5" 1950 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1951 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1952 #ifdef CONFIG_PROC_FS 1953 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1954 #else 1955 #define VLSI_PROC_FOPS NULL 1956 #endif 1957 /* LDV_COMMENT_END_PREP */ 1958 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_open" */ 1959 struct net_device * var_group3; 1960 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_open" */ 1961 static int res_vlsi_open_29; 1962 /* LDV_COMMENT_BEGIN_PREP */ 1963 #ifdef CONFIG_PM 1964 #endif 1965 #ifdef CONFIG_PM 1966 #endif 1967 #define PROC_DIR ("driver/" DRIVER_NAME) 1968 /* LDV_COMMENT_END_PREP */ 1969 /* content: static int vlsi_close(struct net_device *ndev)*/ 1970 /* LDV_COMMENT_BEGIN_PREP */ 1971 #define DRIVER_NAME "vlsi_ir" 1972 #define DRIVER_VERSION "v0.5" 1973 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1974 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1975 #ifdef CONFIG_PROC_FS 1976 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1977 #else 1978 #define VLSI_PROC_FOPS NULL 1979 #endif 1980 /* LDV_COMMENT_END_PREP */ 1981 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_close" */ 1982 static int res_vlsi_close_30; 1983 /* LDV_COMMENT_BEGIN_PREP */ 1984 #ifdef CONFIG_PM 1985 #endif 1986 #ifdef CONFIG_PM 1987 #endif 1988 #define PROC_DIR ("driver/" DRIVER_NAME) 1989 /* LDV_COMMENT_END_PREP */ 1990 /* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/ 1991 /* LDV_COMMENT_BEGIN_PREP */ 1992 #define DRIVER_NAME "vlsi_ir" 1993 #define DRIVER_VERSION "v0.5" 1994 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1995 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1996 #ifdef CONFIG_PROC_FS 1997 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1998 #else 1999 #define VLSI_PROC_FOPS NULL 2000 #endif 2001 /* LDV_COMMENT_END_PREP */ 2002 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_hard_start_xmit" */ 2003 struct sk_buff * var_group4; 2004 /* LDV_COMMENT_BEGIN_PREP */ 2005 #ifdef CONFIG_PM 2006 #endif 2007 #ifdef CONFIG_PM 2008 #endif 2009 #define PROC_DIR ("driver/" DRIVER_NAME) 2010 /* LDV_COMMENT_END_PREP */ 2011 /* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/ 2012 /* LDV_COMMENT_BEGIN_PREP */ 2013 #define DRIVER_NAME "vlsi_ir" 2014 #define DRIVER_VERSION "v0.5" 2015 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2016 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2017 #ifdef CONFIG_PROC_FS 2018 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2019 #else 2020 #define VLSI_PROC_FOPS NULL 2021 #endif 2022 /* LDV_COMMENT_END_PREP */ 2023 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */ 2024 struct ifreq * var_group5; 2025 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */ 2026 int var_vlsi_ioctl_27_p2; 2027 /* LDV_COMMENT_BEGIN_PREP */ 2028 #ifdef CONFIG_PM 2029 #endif 2030 #ifdef CONFIG_PM 2031 #endif 2032 #define PROC_DIR ("driver/" DRIVER_NAME) 2033 /* LDV_COMMENT_END_PREP */ 2034 /* content: static void vlsi_tx_timeout(struct net_device *ndev)*/ 2035 /* LDV_COMMENT_BEGIN_PREP */ 2036 #define DRIVER_NAME "vlsi_ir" 2037 #define DRIVER_VERSION "v0.5" 2038 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2039 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2040 #ifdef CONFIG_PROC_FS 2041 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2042 #else 2043 #define VLSI_PROC_FOPS NULL 2044 #endif 2045 /* LDV_COMMENT_END_PREP */ 2046 /* LDV_COMMENT_BEGIN_PREP */ 2047 #ifdef CONFIG_PM 2048 #endif 2049 #ifdef CONFIG_PM 2050 #endif 2051 #define PROC_DIR ("driver/" DRIVER_NAME) 2052 /* LDV_COMMENT_END_PREP */ 2053 2054 /** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/ 2055 /* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/ 2056 /* LDV_COMMENT_BEGIN_PREP */ 2057 #define DRIVER_NAME "vlsi_ir" 2058 #define DRIVER_VERSION "v0.5" 2059 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2060 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2061 #ifdef CONFIG_PROC_FS 2062 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2063 #else 2064 #define VLSI_PROC_FOPS NULL 2065 #endif 2066 /* LDV_COMMENT_END_PREP */ 2067 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */ 2068 struct pci_dev * var_group6; 2069 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */ 2070 const struct pci_device_id * var_vlsi_irda_probe_32_p1; 2071 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_irda_probe" */ 2072 static int res_vlsi_irda_probe_32; 2073 /* LDV_COMMENT_BEGIN_PREP */ 2074 #ifdef CONFIG_PM 2075 #endif 2076 #ifdef CONFIG_PM 2077 #endif 2078 #define PROC_DIR ("driver/" DRIVER_NAME) 2079 /* LDV_COMMENT_END_PREP */ 2080 /* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/ 2081 /* LDV_COMMENT_BEGIN_PREP */ 2082 #define DRIVER_NAME "vlsi_ir" 2083 #define DRIVER_VERSION "v0.5" 2084 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2085 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2086 #ifdef CONFIG_PROC_FS 2087 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2088 #else 2089 #define VLSI_PROC_FOPS NULL 2090 #endif 2091 /* LDV_COMMENT_END_PREP */ 2092 /* LDV_COMMENT_BEGIN_PREP */ 2093 #ifdef CONFIG_PM 2094 #endif 2095 #ifdef CONFIG_PM 2096 #endif 2097 #define PROC_DIR ("driver/" DRIVER_NAME) 2098 /* LDV_COMMENT_END_PREP */ 2099 /* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/ 2100 /* LDV_COMMENT_BEGIN_PREP */ 2101 #define DRIVER_NAME "vlsi_ir" 2102 #define DRIVER_VERSION "v0.5" 2103 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2104 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2105 #ifdef CONFIG_PROC_FS 2106 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2107 #else 2108 #define VLSI_PROC_FOPS NULL 2109 #endif 2110 #ifdef CONFIG_PM 2111 /* LDV_COMMENT_END_PREP */ 2112 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_suspend" */ 2113 pm_message_t var_vlsi_irda_suspend_34_p1; 2114 /* LDV_COMMENT_BEGIN_PREP */ 2115 #endif 2116 #ifdef CONFIG_PM 2117 #endif 2118 #define PROC_DIR ("driver/" DRIVER_NAME) 2119 /* LDV_COMMENT_END_PREP */ 2120 /* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/ 2121 /* LDV_COMMENT_BEGIN_PREP */ 2122 #define DRIVER_NAME "vlsi_ir" 2123 #define DRIVER_VERSION "v0.5" 2124 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2125 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2126 #ifdef CONFIG_PROC_FS 2127 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2128 #else 2129 #define VLSI_PROC_FOPS NULL 2130 #endif 2131 #ifdef CONFIG_PM 2132 /* LDV_COMMENT_END_PREP */ 2133 /* LDV_COMMENT_BEGIN_PREP */ 2134 #endif 2135 #ifdef CONFIG_PM 2136 #endif 2137 #define PROC_DIR ("driver/" DRIVER_NAME) 2138 /* LDV_COMMENT_END_PREP */ 2139 2140 /** CALLBACK SECTION request_irq **/ 2141 /* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/ 2142 /* LDV_COMMENT_BEGIN_PREP */ 2143 #define DRIVER_NAME "vlsi_ir" 2144 #define DRIVER_VERSION "v0.5" 2145 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2146 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2147 #ifdef CONFIG_PROC_FS 2148 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2149 #else 2150 #define VLSI_PROC_FOPS NULL 2151 #endif 2152 /* LDV_COMMENT_END_PREP */ 2153 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */ 2154 int var_vlsi_interrupt_28_p0; 2155 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */ 2156 void * var_vlsi_interrupt_28_p1; 2157 /* LDV_COMMENT_BEGIN_PREP */ 2158 #ifdef CONFIG_PM 2159 #endif 2160 #ifdef CONFIG_PM 2161 #endif 2162 #define PROC_DIR ("driver/" DRIVER_NAME) 2163 /* LDV_COMMENT_END_PREP */ 2164 2165 2166 2167 2168 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 2169 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 2170 /*============================= VARIABLE INITIALIZING PART =============================*/ 2171 LDV_IN_INTERRUPT=1; 2172 2173 2174 2175 2176 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 2177 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 2178 /*============================= FUNCTION CALL SECTION =============================*/ 2179 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 2180 ldv_initialize(); 2181 2182 /** INIT: init_type: ST_MODULE_INIT **/ 2183 /* content: static int __init vlsi_mod_init(void)*/ 2184 /* LDV_COMMENT_BEGIN_PREP */ 2185 #define DRIVER_NAME "vlsi_ir" 2186 #define DRIVER_VERSION "v0.5" 2187 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2188 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2189 #ifdef CONFIG_PROC_FS 2190 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2191 #else 2192 #define VLSI_PROC_FOPS NULL 2193 #endif 2194 #ifdef CONFIG_PM 2195 #endif 2196 #ifdef CONFIG_PM 2197 #endif 2198 #define PROC_DIR ("driver/" DRIVER_NAME) 2199 /* LDV_COMMENT_END_PREP */ 2200 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 2201 ldv_handler_precall(); 2202 if(vlsi_mod_init()) 2203 goto ldv_final; 2204 int ldv_s_vlsi_proc_fops_file_operations = 0; 2205 2206 int ldv_s_vlsi_netdev_ops_net_device_ops = 0; 2207 2208 2209 int ldv_s_vlsi_irda_driver_pci_driver = 0; 2210 2211 2212 2213 2214 2215 while( nondet_int() 2216 || !(ldv_s_vlsi_proc_fops_file_operations == 0) 2217 || !(ldv_s_vlsi_netdev_ops_net_device_ops == 0) 2218 || !(ldv_s_vlsi_irda_driver_pci_driver == 0) 2219 ) { 2220 2221 switch(nondet_int()) { 2222 2223 case 0: { 2224 2225 /** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/ 2226 if(ldv_s_vlsi_proc_fops_file_operations==0) { 2227 2228 /* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/ 2229 /* LDV_COMMENT_BEGIN_PREP */ 2230 #define DRIVER_NAME "vlsi_ir" 2231 #define DRIVER_VERSION "v0.5" 2232 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2233 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2234 #ifdef CONFIG_PROC_FS 2235 /* LDV_COMMENT_END_PREP */ 2236 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "vlsi_proc_fops". Standart function test for correct return result. */ 2237 ldv_handler_precall(); 2238 res_vlsi_seq_open_6 = vlsi_seq_open( var_group1, var_group2); 2239 ldv_check_return_value(res_vlsi_seq_open_6); 2240 if(res_vlsi_seq_open_6) 2241 goto ldv_module_exit; 2242 /* LDV_COMMENT_BEGIN_PREP */ 2243 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2244 #else 2245 #define VLSI_PROC_FOPS NULL 2246 #endif 2247 #ifdef CONFIG_PM 2248 #endif 2249 #ifdef CONFIG_PM 2250 #endif 2251 #define PROC_DIR ("driver/" DRIVER_NAME) 2252 /* LDV_COMMENT_END_PREP */ 2253 ldv_s_vlsi_proc_fops_file_operations=0; 2254 2255 } 2256 2257 } 2258 2259 break; 2260 case 1: { 2261 2262 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2263 if(ldv_s_vlsi_netdev_ops_net_device_ops==0) { 2264 2265 /* content: static int vlsi_open(struct net_device *ndev)*/ 2266 /* LDV_COMMENT_BEGIN_PREP */ 2267 #define DRIVER_NAME "vlsi_ir" 2268 #define DRIVER_VERSION "v0.5" 2269 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2270 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2271 #ifdef CONFIG_PROC_FS 2272 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2273 #else 2274 #define VLSI_PROC_FOPS NULL 2275 #endif 2276 /* LDV_COMMENT_END_PREP */ 2277 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */ 2278 ldv_handler_precall(); 2279 res_vlsi_open_29 = vlsi_open( var_group3); 2280 ldv_check_return_value(res_vlsi_open_29); 2281 if(res_vlsi_open_29 < 0) 2282 goto ldv_module_exit; 2283 /* LDV_COMMENT_BEGIN_PREP */ 2284 #ifdef CONFIG_PM 2285 #endif 2286 #ifdef CONFIG_PM 2287 #endif 2288 #define PROC_DIR ("driver/" DRIVER_NAME) 2289 /* LDV_COMMENT_END_PREP */ 2290 ldv_s_vlsi_netdev_ops_net_device_ops++; 2291 2292 } 2293 2294 } 2295 2296 break; 2297 case 2: { 2298 2299 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2300 if(ldv_s_vlsi_netdev_ops_net_device_ops==1) { 2301 2302 /* content: static int vlsi_close(struct net_device *ndev)*/ 2303 /* LDV_COMMENT_BEGIN_PREP */ 2304 #define DRIVER_NAME "vlsi_ir" 2305 #define DRIVER_VERSION "v0.5" 2306 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2307 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2308 #ifdef CONFIG_PROC_FS 2309 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2310 #else 2311 #define VLSI_PROC_FOPS NULL 2312 #endif 2313 /* LDV_COMMENT_END_PREP */ 2314 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */ 2315 ldv_handler_precall(); 2316 res_vlsi_close_30 = vlsi_close( var_group3); 2317 ldv_check_return_value(res_vlsi_close_30); 2318 if(res_vlsi_close_30) 2319 goto ldv_module_exit; 2320 /* LDV_COMMENT_BEGIN_PREP */ 2321 #ifdef CONFIG_PM 2322 #endif 2323 #ifdef CONFIG_PM 2324 #endif 2325 #define PROC_DIR ("driver/" DRIVER_NAME) 2326 /* LDV_COMMENT_END_PREP */ 2327 ldv_s_vlsi_netdev_ops_net_device_ops=0; 2328 2329 } 2330 2331 } 2332 2333 break; 2334 case 3: { 2335 2336 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2337 2338 2339 /* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/ 2340 /* LDV_COMMENT_BEGIN_PREP */ 2341 #define DRIVER_NAME "vlsi_ir" 2342 #define DRIVER_VERSION "v0.5" 2343 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2344 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2345 #ifdef CONFIG_PROC_FS 2346 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2347 #else 2348 #define VLSI_PROC_FOPS NULL 2349 #endif 2350 /* LDV_COMMENT_END_PREP */ 2351 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "vlsi_netdev_ops" */ 2352 ldv_handler_precall(); 2353 vlsi_hard_start_xmit( var_group4, var_group3); 2354 /* LDV_COMMENT_BEGIN_PREP */ 2355 #ifdef CONFIG_PM 2356 #endif 2357 #ifdef CONFIG_PM 2358 #endif 2359 #define PROC_DIR ("driver/" DRIVER_NAME) 2360 /* LDV_COMMENT_END_PREP */ 2361 2362 2363 2364 2365 } 2366 2367 break; 2368 case 4: { 2369 2370 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2371 2372 2373 /* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/ 2374 /* LDV_COMMENT_BEGIN_PREP */ 2375 #define DRIVER_NAME "vlsi_ir" 2376 #define DRIVER_VERSION "v0.5" 2377 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2378 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2379 #ifdef CONFIG_PROC_FS 2380 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2381 #else 2382 #define VLSI_PROC_FOPS NULL 2383 #endif 2384 /* LDV_COMMENT_END_PREP */ 2385 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "vlsi_netdev_ops" */ 2386 ldv_handler_precall(); 2387 vlsi_ioctl( var_group3, var_group5, var_vlsi_ioctl_27_p2); 2388 /* LDV_COMMENT_BEGIN_PREP */ 2389 #ifdef CONFIG_PM 2390 #endif 2391 #ifdef CONFIG_PM 2392 #endif 2393 #define PROC_DIR ("driver/" DRIVER_NAME) 2394 /* LDV_COMMENT_END_PREP */ 2395 2396 2397 2398 2399 } 2400 2401 break; 2402 case 5: { 2403 2404 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2405 2406 2407 /* content: static void vlsi_tx_timeout(struct net_device *ndev)*/ 2408 /* LDV_COMMENT_BEGIN_PREP */ 2409 #define DRIVER_NAME "vlsi_ir" 2410 #define DRIVER_VERSION "v0.5" 2411 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2412 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2413 #ifdef CONFIG_PROC_FS 2414 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2415 #else 2416 #define VLSI_PROC_FOPS NULL 2417 #endif 2418 /* LDV_COMMENT_END_PREP */ 2419 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "vlsi_netdev_ops" */ 2420 ldv_handler_precall(); 2421 vlsi_tx_timeout( var_group3); 2422 /* LDV_COMMENT_BEGIN_PREP */ 2423 #ifdef CONFIG_PM 2424 #endif 2425 #ifdef CONFIG_PM 2426 #endif 2427 #define PROC_DIR ("driver/" DRIVER_NAME) 2428 /* LDV_COMMENT_END_PREP */ 2429 2430 2431 2432 2433 } 2434 2435 break; 2436 case 6: { 2437 2438 /** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/ 2439 if(ldv_s_vlsi_irda_driver_pci_driver==0) { 2440 2441 /* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/ 2442 /* LDV_COMMENT_BEGIN_PREP */ 2443 #define DRIVER_NAME "vlsi_ir" 2444 #define DRIVER_VERSION "v0.5" 2445 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2446 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2447 #ifdef CONFIG_PROC_FS 2448 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2449 #else 2450 #define VLSI_PROC_FOPS NULL 2451 #endif 2452 /* LDV_COMMENT_END_PREP */ 2453 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "vlsi_irda_driver". Standart function test for correct return result. */ 2454 res_vlsi_irda_probe_32 = vlsi_irda_probe( var_group6, var_vlsi_irda_probe_32_p1); 2455 ldv_check_return_value(res_vlsi_irda_probe_32); 2456 ldv_check_return_value_probe(res_vlsi_irda_probe_32); 2457 if(res_vlsi_irda_probe_32) 2458 goto ldv_module_exit; 2459 /* LDV_COMMENT_BEGIN_PREP */ 2460 #ifdef CONFIG_PM 2461 #endif 2462 #ifdef CONFIG_PM 2463 #endif 2464 #define PROC_DIR ("driver/" DRIVER_NAME) 2465 /* LDV_COMMENT_END_PREP */ 2466 ldv_s_vlsi_irda_driver_pci_driver++; 2467 2468 } 2469 2470 } 2471 2472 break; 2473 case 7: { 2474 2475 /** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/ 2476 if(ldv_s_vlsi_irda_driver_pci_driver==1) { 2477 2478 /* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/ 2479 /* LDV_COMMENT_BEGIN_PREP */ 2480 #define DRIVER_NAME "vlsi_ir" 2481 #define DRIVER_VERSION "v0.5" 2482 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2483 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2484 #ifdef CONFIG_PROC_FS 2485 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2486 #else 2487 #define VLSI_PROC_FOPS NULL 2488 #endif 2489 /* LDV_COMMENT_END_PREP */ 2490 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "vlsi_irda_driver" */ 2491 ldv_handler_precall(); 2492 vlsi_irda_remove( var_group6); 2493 /* LDV_COMMENT_BEGIN_PREP */ 2494 #ifdef CONFIG_PM 2495 #endif 2496 #ifdef CONFIG_PM 2497 #endif 2498 #define PROC_DIR ("driver/" DRIVER_NAME) 2499 /* LDV_COMMENT_END_PREP */ 2500 ldv_s_vlsi_irda_driver_pci_driver=0; 2501 2502 } 2503 2504 } 2505 2506 break; 2507 case 8: { 2508 2509 /** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/ 2510 2511 2512 /* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/ 2513 /* LDV_COMMENT_BEGIN_PREP */ 2514 #define DRIVER_NAME "vlsi_ir" 2515 #define DRIVER_VERSION "v0.5" 2516 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2517 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2518 #ifdef CONFIG_PROC_FS 2519 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2520 #else 2521 #define VLSI_PROC_FOPS NULL 2522 #endif 2523 #ifdef CONFIG_PM 2524 /* LDV_COMMENT_END_PREP */ 2525 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "vlsi_irda_driver" */ 2526 ldv_handler_precall(); 2527 vlsi_irda_suspend( var_group6, var_vlsi_irda_suspend_34_p1); 2528 /* LDV_COMMENT_BEGIN_PREP */ 2529 #endif 2530 #ifdef CONFIG_PM 2531 #endif 2532 #define PROC_DIR ("driver/" DRIVER_NAME) 2533 /* LDV_COMMENT_END_PREP */ 2534 2535 2536 2537 2538 } 2539 2540 break; 2541 case 9: { 2542 2543 /** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/ 2544 2545 2546 /* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/ 2547 /* LDV_COMMENT_BEGIN_PREP */ 2548 #define DRIVER_NAME "vlsi_ir" 2549 #define DRIVER_VERSION "v0.5" 2550 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2551 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2552 #ifdef CONFIG_PROC_FS 2553 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2554 #else 2555 #define VLSI_PROC_FOPS NULL 2556 #endif 2557 #ifdef CONFIG_PM 2558 /* LDV_COMMENT_END_PREP */ 2559 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "vlsi_irda_driver" */ 2560 ldv_handler_precall(); 2561 vlsi_irda_resume( var_group6); 2562 /* LDV_COMMENT_BEGIN_PREP */ 2563 #endif 2564 #ifdef CONFIG_PM 2565 #endif 2566 #define PROC_DIR ("driver/" DRIVER_NAME) 2567 /* LDV_COMMENT_END_PREP */ 2568 2569 2570 2571 2572 } 2573 2574 break; 2575 case 10: { 2576 2577 /** CALLBACK SECTION request_irq **/ 2578 LDV_IN_INTERRUPT=2; 2579 2580 /* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/ 2581 /* LDV_COMMENT_BEGIN_PREP */ 2582 #define DRIVER_NAME "vlsi_ir" 2583 #define DRIVER_VERSION "v0.5" 2584 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2585 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2586 #ifdef CONFIG_PROC_FS 2587 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2588 #else 2589 #define VLSI_PROC_FOPS NULL 2590 #endif 2591 /* LDV_COMMENT_END_PREP */ 2592 /* LDV_COMMENT_FUNCTION_CALL */ 2593 ldv_handler_precall(); 2594 vlsi_interrupt( var_vlsi_interrupt_28_p0, var_vlsi_interrupt_28_p1); 2595 /* LDV_COMMENT_BEGIN_PREP */ 2596 #ifdef CONFIG_PM 2597 #endif 2598 #ifdef CONFIG_PM 2599 #endif 2600 #define PROC_DIR ("driver/" DRIVER_NAME) 2601 /* LDV_COMMENT_END_PREP */ 2602 LDV_IN_INTERRUPT=1; 2603 2604 2605 2606 } 2607 2608 break; 2609 default: break; 2610 2611 } 2612 2613 } 2614 2615 ldv_module_exit: 2616 2617 /** INIT: init_type: ST_MODULE_EXIT **/ 2618 /* content: static void __exit vlsi_mod_exit(void)*/ 2619 /* LDV_COMMENT_BEGIN_PREP */ 2620 #define DRIVER_NAME "vlsi_ir" 2621 #define DRIVER_VERSION "v0.5" 2622 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2623 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2624 #ifdef CONFIG_PROC_FS 2625 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2626 #else 2627 #define VLSI_PROC_FOPS NULL 2628 #endif 2629 #ifdef CONFIG_PM 2630 #endif 2631 #ifdef CONFIG_PM 2632 #endif 2633 #define PROC_DIR ("driver/" DRIVER_NAME) 2634 /* LDV_COMMENT_END_PREP */ 2635 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 2636 ldv_handler_precall(); 2637 vlsi_mod_exit(); 2638 2639 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 2640 ldv_final: ldv_check_final_state(); 2641 2642 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 2643 return; 2644 2645 } 2646 #endif 2647 2648 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 extern void ldv_dma_map_page(void); 9 extern void ldv_dma_mapping_error(void); 10 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/4878/dscv_tempdir/dscv/ri/331_1a/drivers/net/irda/vlsi_ir.c" 11 12 /********************************************************************* 13 * 14 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 15 * 16 * Copyright (c) 2001-2003 Martin Diehl 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License as 20 * published by the Free Software Foundation; either version 2 of 21 * the License, or (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, see <http://www.gnu.org/licenses/>. 30 * 31 ********************************************************************/ 32 33 #include <linux/module.h> 34 35 #define DRIVER_NAME "vlsi_ir" 36 #define DRIVER_VERSION "v0.5" 37 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 38 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 39 40 MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 41 MODULE_AUTHOR(DRIVER_AUTHOR); 42 MODULE_LICENSE("GPL"); 43 44 /********************************************************/ 45 46 #include <linux/kernel.h> 47 #include <linux/ktime.h> 48 #include <linux/init.h> 49 #include <linux/interrupt.h> 50 #include <linux/pci.h> 51 #include <linux/slab.h> 52 #include <linux/netdevice.h> 53 #include <linux/skbuff.h> 54 #include <linux/delay.h> 55 #include <linux/proc_fs.h> 56 #include <linux/seq_file.h> 57 #include <linux/math64.h> 58 #include <linux/mutex.h> 59 #include <linux/uaccess.h> 60 #include <asm/byteorder.h> 61 62 #include <net/irda/irda.h> 63 #include <net/irda/irda_device.h> 64 #include <net/irda/wrapper.h> 65 #include <net/irda/crc.h> 66 67 #include "vlsi_ir.h" 68 69 /********************************************************/ 70 71 static /* const */ char drivername[] = DRIVER_NAME; 72 73 static const struct pci_device_id vlsi_irda_table[] = { 74 { 75 .class = PCI_CLASS_WIRELESS_IRDA << 8, 76 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 77 .vendor = PCI_VENDOR_ID_VLSI, 78 .device = PCI_DEVICE_ID_VLSI_82C147, 79 .subvendor = PCI_ANY_ID, 80 .subdevice = PCI_ANY_ID, 81 }, 82 { /* all zeroes */ } 83 }; 84 85 MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 86 87 /********************************************************/ 88 89 /* clksrc: which clock source to be used 90 * 0: auto - try PLL, fallback to 40MHz XCLK 91 * 1: on-chip 48MHz PLL 92 * 2: external 48MHz XCLK 93 * 3: external 40MHz XCLK (HP OB-800) 94 */ 95 96 static int clksrc = 0; /* default is 0(auto) */ 97 module_param(clksrc, int, 0); 98 MODULE_PARM_DESC(clksrc, "clock input source selection"); 99 100 /* ringsize: size of the tx and rx descriptor rings 101 * independent for tx and rx 102 * specify as ringsize=tx[,rx] 103 * allowed values: 4, 8, 16, 32, 64 104 * Due to the IrDA 1.x max. allowed window size=7, 105 * there should be no gain when using rings larger than 8 106 */ 107 108 static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 109 module_param_array(ringsize, int, NULL, 0); 110 MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 111 112 /* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 113 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 114 * 1: nominal 3/16 bittime width 115 * note: IrDA compliant peer devices should be happy regardless 116 * which one is used. Primary goal is to save some power 117 * on the sender's side - at 9.6kbaud for example the short 118 * pulse width saves more than 90% of the transmitted IR power. 119 */ 120 121 static int sirpulse = 1; /* default is 3/16 bittime */ 122 module_param(sirpulse, int, 0); 123 MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 124 125 /* qos_mtt_bits: encoded min-turn-time value we require the peer device 126 * to use before transmitting to us. "Type 1" (per-station) 127 * bitfield according to IrLAP definition (section 6.6.8) 128 * Don't know which transceiver is used by my OB800 - the 129 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 130 */ 131 132 static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 133 module_param(qos_mtt_bits, int, 0); 134 MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 135 136 /********************************************************/ 137 138 static void vlsi_reg_debug(unsigned iobase, const char *s) 139 { 140 int i; 141 142 printk(KERN_DEBUG "%s: ", s); 143 for (i = 0; i < 0x20; i++) 144 printk("%02x", (unsigned)inb((iobase+i))); 145 printk("\n"); 146 } 147 148 static void vlsi_ring_debug(struct vlsi_ring *r) 149 { 150 struct ring_descr *rd; 151 unsigned i; 152 153 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 154 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 155 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, 156 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 157 for (i = 0; i < r->size; i++) { 158 rd = &r->rd[i]; 159 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); 160 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 161 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 162 __func__, (unsigned) rd_get_status(rd), 163 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 164 } 165 } 166 167 /********************************************************/ 168 169 /* needed regardless of CONFIG_PROC_FS */ 170 static struct proc_dir_entry *vlsi_proc_root = NULL; 171 172 #ifdef CONFIG_PROC_FS 173 174 static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 175 { 176 unsigned iobase = pci_resource_start(pdev, 0); 177 unsigned i; 178 179 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n", 180 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 181 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 182 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 183 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 184 seq_printf(seq, "hw registers: "); 185 for (i = 0; i < 0x20; i++) 186 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 187 seq_printf(seq, "\n"); 188 } 189 190 static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 191 { 192 vlsi_irda_dev_t *idev = netdev_priv(ndev); 193 u8 byte; 194 u16 word; 195 s32 sec, usec; 196 unsigned iobase = ndev->base_addr; 197 198 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 199 netif_device_present(ndev) ? "attached" : "detached", 200 netif_running(ndev) ? "running" : "not running", 201 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 202 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 203 204 if (!netif_running(ndev)) 205 return; 206 207 seq_printf(seq, "\nhw-state:\n"); 208 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 209 seq_printf(seq, "IRMISC:%s%s%s uart%s", 210 (byte&IRMISC_IRRAIL) ? " irrail" : "", 211 (byte&IRMISC_IRPD) ? " irpd" : "", 212 (byte&IRMISC_UARTTST) ? " uarttest" : "", 213 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 214 if (byte&IRMISC_UARTEN) { 215 seq_printf(seq, "0x%s\n", 216 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 217 : ((byte&1) ? "3f8" : "2f8")); 218 } 219 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 220 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 221 (byte&CLKCTL_PD_INV) ? "powered" : "down", 222 (byte&CLKCTL_LOCK) ? " locked" : "", 223 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 224 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 225 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 226 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 227 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 228 229 byte = inb(iobase+VLSI_PIO_IRINTR); 230 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 231 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 232 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 233 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 234 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 235 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 236 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 237 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 238 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 239 word = inw(iobase+VLSI_PIO_RINGPTR); 240 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 241 word = inw(iobase+VLSI_PIO_RINGBASE); 242 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 243 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 244 word = inw(iobase+VLSI_PIO_RINGSIZE); 245 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 246 RINGSIZE_TO_TXSIZE(word)); 247 248 word = inw(iobase+VLSI_PIO_IRCFG); 249 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 250 (word&IRCFG_LOOP) ? " LOOP" : "", 251 (word&IRCFG_ENTX) ? " ENTX" : "", 252 (word&IRCFG_ENRX) ? " ENRX" : "", 253 (word&IRCFG_MSTR) ? " MSTR" : "", 254 (word&IRCFG_RXANY) ? " RXANY" : "", 255 (word&IRCFG_CRC16) ? " CRC16" : "", 256 (word&IRCFG_FIR) ? " FIR" : "", 257 (word&IRCFG_MIR) ? " MIR" : "", 258 (word&IRCFG_SIR) ? " SIR" : "", 259 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 260 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 261 (word&IRCFG_TXPOL) ? " TXPOL" : "", 262 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 263 word = inw(iobase+VLSI_PIO_IRENABLE); 264 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 265 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 266 (word&IRENABLE_CFGER) ? " CFGERR" : "", 267 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 268 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 269 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 270 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 271 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 272 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 273 word = inw(iobase+VLSI_PIO_PHYCTL); 274 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 275 (unsigned)PHYCTL_TO_BAUD(word), 276 (unsigned)PHYCTL_TO_PLSWID(word), 277 (unsigned)PHYCTL_TO_PREAMB(word)); 278 word = inw(iobase+VLSI_PIO_NPHYCTL); 279 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 280 (unsigned)PHYCTL_TO_BAUD(word), 281 (unsigned)PHYCTL_TO_PLSWID(word), 282 (unsigned)PHYCTL_TO_PREAMB(word)); 283 word = inw(iobase+VLSI_PIO_MAXPKT); 284 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 285 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 286 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 287 288 seq_printf(seq, "\nsw-state:\n"); 289 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 290 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 291 sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx), 292 USEC_PER_SEC, &usec); 293 seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec); 294 295 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 296 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 297 ndev->stats.rx_dropped); 298 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 299 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 300 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 301 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 302 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 303 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 304 305 } 306 307 static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 308 { 309 struct ring_descr *rd; 310 unsigned i, j; 311 int h, t; 312 313 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 314 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 315 h = atomic_read(&r->head) & r->mask; 316 t = atomic_read(&r->tail) & r->mask; 317 seq_printf(seq, "head = %d / tail = %d ", h, t); 318 if (h == t) 319 seq_printf(seq, "(empty)\n"); 320 else { 321 if (((t+1)&r->mask) == h) 322 seq_printf(seq, "(full)\n"); 323 else 324 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 325 rd = &r->rd[h]; 326 j = (unsigned) rd_get_count(rd); 327 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 328 h, (unsigned)rd_get_status(rd), j); 329 if (j > 0) { 330 seq_printf(seq, " data: %*ph\n", 331 min_t(unsigned, j, 20), rd->buf); 332 } 333 } 334 for (i = 0; i < r->size; i++) { 335 rd = &r->rd[i]; 336 seq_printf(seq, "> ring descr %u: ", i); 337 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 338 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 339 (unsigned) rd_get_status(rd), 340 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 341 } 342 } 343 344 static int vlsi_seq_show(struct seq_file *seq, void *v) 345 { 346 struct net_device *ndev = seq->private; 347 vlsi_irda_dev_t *idev = netdev_priv(ndev); 348 unsigned long flags; 349 350 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 351 seq_printf(seq, "clksrc: %s\n", 352 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 353 : ((clksrc==1)?"48MHz PLL":"autodetect")); 354 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 355 ringsize[0], ringsize[1]); 356 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 357 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 358 359 spin_lock_irqsave(&idev->lock, flags); 360 if (idev->pdev != NULL) { 361 vlsi_proc_pdev(seq, idev->pdev); 362 363 if (idev->pdev->current_state == 0) 364 vlsi_proc_ndev(seq, ndev); 365 else 366 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 367 idev->resume_ok); 368 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 369 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 370 vlsi_proc_ring(seq, idev->rx_ring); 371 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 372 vlsi_proc_ring(seq, idev->tx_ring); 373 } 374 } 375 seq_printf(seq, "\n"); 376 spin_unlock_irqrestore(&idev->lock, flags); 377 378 return 0; 379 } 380 381 static int vlsi_seq_open(struct inode *inode, struct file *file) 382 { 383 return single_open(file, vlsi_seq_show, PDE_DATA(inode)); 384 } 385 386 static const struct file_operations vlsi_proc_fops = { 387 .owner = THIS_MODULE, 388 .open = vlsi_seq_open, 389 .read = seq_read, 390 .llseek = seq_lseek, 391 .release = single_release, 392 }; 393 394 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 395 396 #else /* CONFIG_PROC_FS */ 397 #define VLSI_PROC_FOPS NULL 398 #endif 399 400 /********************************************************/ 401 402 static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 403 unsigned size, unsigned len, int dir) 404 { 405 struct vlsi_ring *r; 406 struct ring_descr *rd; 407 unsigned i, j; 408 dma_addr_t busaddr; 409 410 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 411 return NULL; 412 413 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 414 if (!r) 415 return NULL; 416 memset(r, 0, sizeof(*r)); 417 418 r->pdev = pdev; 419 r->dir = dir; 420 r->len = len; 421 r->rd = (struct ring_descr *)(r+1); 422 r->mask = size - 1; 423 r->size = size; 424 atomic_set(&r->head, 0); 425 atomic_set(&r->tail, 0); 426 427 for (i = 0; i < size; i++) { 428 rd = r->rd + i; 429 memset(rd, 0, sizeof(*rd)); 430 rd->hw = hwmap + i; 431 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 432 if (rd->buf == NULL || 433 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 434 if (rd->buf) { 435 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 436 __func__, rd->buf); 437 kfree(rd->buf); 438 rd->buf = NULL; 439 } 440 for (j = 0; j < i; j++) { 441 rd = r->rd + j; 442 busaddr = rd_get_addr(rd); 443 rd_set_addr_status(rd, 0, 0); 444 if (busaddr) 445 pci_unmap_single(pdev, busaddr, len, dir); 446 kfree(rd->buf); 447 rd->buf = NULL; 448 } 449 kfree(r); 450 return NULL; 451 } 452 rd_set_addr_status(rd, busaddr, 0); 453 /* initially, the dma buffer is owned by the CPU */ 454 rd->skb = NULL; 455 } 456 return r; 457 } 458 459 static int vlsi_free_ring(struct vlsi_ring *r) 460 { 461 struct ring_descr *rd; 462 unsigned i; 463 dma_addr_t busaddr; 464 465 for (i = 0; i < r->size; i++) { 466 rd = r->rd + i; 467 if (rd->skb) 468 dev_kfree_skb_any(rd->skb); 469 busaddr = rd_get_addr(rd); 470 rd_set_addr_status(rd, 0, 0); 471 if (busaddr) 472 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 473 kfree(rd->buf); 474 } 475 kfree(r); 476 return 0; 477 } 478 479 static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 480 { 481 char *ringarea; 482 struct ring_descr_hw *hwmap; 483 484 idev->virtaddr = NULL; 485 idev->busaddr = 0; 486 487 ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, 488 &idev->busaddr); 489 if (!ringarea) 490 goto out; 491 492 hwmap = (struct ring_descr_hw *)ringarea; 493 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 494 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 495 if (idev->rx_ring == NULL) 496 goto out_unmap; 497 498 hwmap += MAX_RING_DESCR; 499 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 500 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 501 if (idev->tx_ring == NULL) 502 goto out_free_rx; 503 504 idev->virtaddr = ringarea; 505 return 0; 506 507 out_free_rx: 508 vlsi_free_ring(idev->rx_ring); 509 out_unmap: 510 idev->rx_ring = idev->tx_ring = NULL; 511 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 512 idev->busaddr = 0; 513 out: 514 return -ENOMEM; 515 } 516 517 static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 518 { 519 vlsi_free_ring(idev->rx_ring); 520 vlsi_free_ring(idev->tx_ring); 521 idev->rx_ring = idev->tx_ring = NULL; 522 523 if (idev->busaddr) 524 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 525 526 idev->virtaddr = NULL; 527 idev->busaddr = 0; 528 529 return 0; 530 } 531 532 /********************************************************/ 533 534 static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 535 { 536 u16 status; 537 int crclen, len = 0; 538 struct sk_buff *skb; 539 int ret = 0; 540 struct net_device *ndev = pci_get_drvdata(r->pdev); 541 vlsi_irda_dev_t *idev = netdev_priv(ndev); 542 543 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 544 /* dma buffer now owned by the CPU */ 545 status = rd_get_status(rd); 546 if (status & RD_RX_ERROR) { 547 if (status & RD_RX_OVER) 548 ret |= VLSI_RX_OVER; 549 if (status & RD_RX_LENGTH) 550 ret |= VLSI_RX_LENGTH; 551 if (status & RD_RX_PHYERR) 552 ret |= VLSI_RX_FRAME; 553 if (status & RD_RX_CRCERR) 554 ret |= VLSI_RX_CRC; 555 goto done; 556 } 557 558 len = rd_get_count(rd); 559 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 560 len -= crclen; /* remove trailing CRC */ 561 if (len <= 0) { 562 pr_debug("%s: strange frame (len=%d)\n", __func__, len); 563 ret |= VLSI_RX_DROP; 564 goto done; 565 } 566 567 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 568 569 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 570 * endian-adjustment there just in place will dirty a cache line 571 * which belongs to the map and thus we must be sure it will 572 * get flushed before giving the buffer back to hardware. 573 * vlsi_fill_rx() will do this anyway - but here we rely on. 574 */ 575 le16_to_cpus(rd->buf+len); 576 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 577 pr_debug("%s: crc error\n", __func__); 578 ret |= VLSI_RX_CRC; 579 goto done; 580 } 581 } 582 583 if (!rd->skb) { 584 net_warn_ratelimited("%s: rx packet lost\n", __func__); 585 ret |= VLSI_RX_DROP; 586 goto done; 587 } 588 589 skb = rd->skb; 590 rd->skb = NULL; 591 skb->dev = ndev; 592 memcpy(skb_put(skb,len), rd->buf, len); 593 skb_reset_mac_header(skb); 594 if (in_interrupt()) 595 netif_rx(skb); 596 else 597 netif_rx_ni(skb); 598 599 done: 600 rd_set_status(rd, 0); 601 rd_set_count(rd, 0); 602 /* buffer still owned by CPU */ 603 604 return (ret) ? -ret : len; 605 } 606 607 static void vlsi_fill_rx(struct vlsi_ring *r) 608 { 609 struct ring_descr *rd; 610 611 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 612 if (rd_is_active(rd)) { 613 net_warn_ratelimited("%s: driver bug: rx descr race with hw\n", 614 __func__); 615 vlsi_ring_debug(r); 616 break; 617 } 618 if (!rd->skb) { 619 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 620 if (rd->skb) { 621 skb_reserve(rd->skb,1); 622 rd->skb->protocol = htons(ETH_P_IRDA); 623 } 624 else 625 break; /* probably not worth logging? */ 626 } 627 /* give dma buffer back to busmaster */ 628 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 629 rd_activate(rd); 630 } 631 } 632 633 static void vlsi_rx_interrupt(struct net_device *ndev) 634 { 635 vlsi_irda_dev_t *idev = netdev_priv(ndev); 636 struct vlsi_ring *r = idev->rx_ring; 637 struct ring_descr *rd; 638 int ret; 639 640 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 641 642 if (rd_is_active(rd)) 643 break; 644 645 ret = vlsi_process_rx(r, rd); 646 647 if (ret < 0) { 648 ret = -ret; 649 ndev->stats.rx_errors++; 650 if (ret & VLSI_RX_DROP) 651 ndev->stats.rx_dropped++; 652 if (ret & VLSI_RX_OVER) 653 ndev->stats.rx_over_errors++; 654 if (ret & VLSI_RX_LENGTH) 655 ndev->stats.rx_length_errors++; 656 if (ret & VLSI_RX_FRAME) 657 ndev->stats.rx_frame_errors++; 658 if (ret & VLSI_RX_CRC) 659 ndev->stats.rx_crc_errors++; 660 } 661 else if (ret > 0) { 662 ndev->stats.rx_packets++; 663 ndev->stats.rx_bytes += ret; 664 } 665 } 666 667 idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */ 668 669 vlsi_fill_rx(r); 670 671 if (ring_first(r) == NULL) { 672 /* we are in big trouble, if this should ever happen */ 673 net_err_ratelimited("%s: rx ring exhausted!\n", __func__); 674 vlsi_ring_debug(r); 675 } 676 else 677 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 678 } 679 680 /* caller must have stopped the controller from busmastering */ 681 682 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 683 { 684 struct net_device *ndev = pci_get_drvdata(idev->pdev); 685 struct vlsi_ring *r = idev->rx_ring; 686 struct ring_descr *rd; 687 int ret; 688 689 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 690 691 ret = 0; 692 if (rd_is_active(rd)) { 693 rd_set_status(rd, 0); 694 if (rd_get_count(rd)) { 695 pr_debug("%s - dropping rx packet\n", __func__); 696 ret = -VLSI_RX_DROP; 697 } 698 rd_set_count(rd, 0); 699 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 700 if (rd->skb) { 701 dev_kfree_skb_any(rd->skb); 702 rd->skb = NULL; 703 } 704 } 705 else 706 ret = vlsi_process_rx(r, rd); 707 708 if (ret < 0) { 709 ret = -ret; 710 ndev->stats.rx_errors++; 711 if (ret & VLSI_RX_DROP) 712 ndev->stats.rx_dropped++; 713 if (ret & VLSI_RX_OVER) 714 ndev->stats.rx_over_errors++; 715 if (ret & VLSI_RX_LENGTH) 716 ndev->stats.rx_length_errors++; 717 if (ret & VLSI_RX_FRAME) 718 ndev->stats.rx_frame_errors++; 719 if (ret & VLSI_RX_CRC) 720 ndev->stats.rx_crc_errors++; 721 } 722 else if (ret > 0) { 723 ndev->stats.rx_packets++; 724 ndev->stats.rx_bytes += ret; 725 } 726 } 727 } 728 729 /********************************************************/ 730 731 static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 732 { 733 u16 status; 734 int len; 735 int ret; 736 737 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 738 /* dma buffer now owned by the CPU */ 739 status = rd_get_status(rd); 740 if (status & RD_TX_UNDRN) 741 ret = VLSI_TX_FIFO; 742 else 743 ret = 0; 744 rd_set_status(rd, 0); 745 746 if (rd->skb) { 747 len = rd->skb->len; 748 dev_kfree_skb_any(rd->skb); 749 rd->skb = NULL; 750 } 751 else /* tx-skb already freed? - should never happen */ 752 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 753 754 rd_set_count(rd, 0); 755 /* dma buffer still owned by the CPU */ 756 757 return (ret) ? -ret : len; 758 } 759 760 static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 761 { 762 u16 nphyctl; 763 u16 config; 764 unsigned mode; 765 int ret; 766 int baudrate; 767 int fifocnt; 768 769 baudrate = idev->new_baud; 770 pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); 771 if (baudrate == 4000000) { 772 mode = IFF_FIR; 773 config = IRCFG_FIR; 774 nphyctl = PHYCTL_FIR; 775 } 776 else if (baudrate == 1152000) { 777 mode = IFF_MIR; 778 config = IRCFG_MIR | IRCFG_CRC16; 779 nphyctl = PHYCTL_MIR(clksrc==3); 780 } 781 else { 782 mode = IFF_SIR; 783 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 784 switch(baudrate) { 785 default: 786 net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n", 787 __func__, baudrate); 788 baudrate = 9600; 789 /* fallthru */ 790 case 2400: 791 case 9600: 792 case 19200: 793 case 38400: 794 case 57600: 795 case 115200: 796 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 797 break; 798 } 799 } 800 config |= IRCFG_MSTR | IRCFG_ENRX; 801 802 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 803 if (fifocnt != 0) { 804 pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt); 805 } 806 807 outw(0, iobase+VLSI_PIO_IRENABLE); 808 outw(config, iobase+VLSI_PIO_IRCFG); 809 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 810 wmb(); 811 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 812 mb(); 813 814 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 815 816 /* read back settings for validation */ 817 818 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 819 820 if (mode == IFF_FIR) 821 config ^= IRENABLE_FIR_ON; 822 else if (mode == IFF_MIR) 823 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 824 else 825 config ^= IRENABLE_SIR_ON; 826 827 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 828 net_warn_ratelimited("%s: failed to set %s mode!\n", 829 __func__, 830 mode == IFF_SIR ? "SIR" : 831 mode == IFF_MIR ? "MIR" : "FIR"); 832 ret = -1; 833 } 834 else { 835 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 836 net_warn_ratelimited("%s: failed to apply baudrate %d\n", 837 __func__, baudrate); 838 ret = -1; 839 } 840 else { 841 idev->mode = mode; 842 idev->baud = baudrate; 843 idev->new_baud = 0; 844 ret = 0; 845 } 846 } 847 848 if (ret) 849 vlsi_reg_debug(iobase,__func__); 850 851 return ret; 852 } 853 854 static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, 855 struct net_device *ndev) 856 { 857 vlsi_irda_dev_t *idev = netdev_priv(ndev); 858 struct vlsi_ring *r = idev->tx_ring; 859 struct ring_descr *rd; 860 unsigned long flags; 861 unsigned iobase = ndev->base_addr; 862 u8 status; 863 u16 config; 864 int mtt, diff; 865 int len, speed; 866 char *msg = NULL; 867 868 speed = irda_get_next_speed(skb); 869 spin_lock_irqsave(&idev->lock, flags); 870 if (speed != -1 && speed != idev->baud) { 871 netif_stop_queue(ndev); 872 idev->new_baud = speed; 873 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 874 } 875 else 876 status = 0; 877 878 if (skb->len == 0) { 879 /* handle zero packets - should be speed change */ 880 if (status == 0) { 881 msg = "bogus zero-length packet"; 882 goto drop_unlock; 883 } 884 885 /* due to the completely asynch tx operation we might have 886 * IrLAP racing with the hardware here, f.e. if the controller 887 * is just sending the last packet with current speed while 888 * the LAP is already switching the speed using synchronous 889 * len=0 packet. Immediate execution would lead to hw lockup 890 * requiring a powercycle to reset. Good candidate to trigger 891 * this is the final UA:RSP packet after receiving a DISC:CMD 892 * when getting the LAP down. 893 * Note that we are not protected by the queue_stop approach 894 * because the final UA:RSP arrives _without_ request to apply 895 * new-speed-after-this-packet - hence the driver doesn't know 896 * this was the last packet and doesn't stop the queue. So the 897 * forced switch to default speed from LAP gets through as fast 898 * as only some 10 usec later while the UA:RSP is still processed 899 * by the hardware and we would get screwed. 900 */ 901 902 if (ring_first(idev->tx_ring) == NULL) { 903 /* no race - tx-ring already empty */ 904 vlsi_set_baud(idev, iobase); 905 netif_wake_queue(ndev); 906 } 907 else 908 ; 909 /* keep the speed change pending like it would 910 * for any len>0 packet. tx completion interrupt 911 * will apply it when the tx ring becomes empty. 912 */ 913 spin_unlock_irqrestore(&idev->lock, flags); 914 dev_kfree_skb_any(skb); 915 return NETDEV_TX_OK; 916 } 917 918 /* sanity checks - simply drop the packet */ 919 920 rd = ring_last(r); 921 if (!rd) { 922 msg = "ring full, but queue wasn't stopped"; 923 goto drop_unlock; 924 } 925 926 if (rd_is_active(rd)) { 927 msg = "entry still owned by hw"; 928 goto drop_unlock; 929 } 930 931 if (!rd->buf) { 932 msg = "tx ring entry without pci buffer"; 933 goto drop_unlock; 934 } 935 936 if (rd->skb) { 937 msg = "ring entry with old skb still attached"; 938 goto drop_unlock; 939 } 940 941 /* no need for serialization or interrupt disable during mtt */ 942 spin_unlock_irqrestore(&idev->lock, flags); 943 944 if ((mtt = irda_get_mtt(skb)) > 0) { 945 diff = ktime_us_delta(ktime_get(), idev->last_rx); 946 if (mtt > diff) 947 udelay(mtt - diff); 948 /* must not sleep here - called under netif_tx_lock! */ 949 } 950 951 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 952 * after subsequent tx-completion 953 */ 954 955 if (idev->mode == IFF_SIR) { 956 status |= RD_TX_DISCRC; /* no hw-crc creation */ 957 len = async_wrap_skb(skb, rd->buf, r->len); 958 959 /* Some rare worst case situation in SIR mode might lead to 960 * potential buffer overflow. The wrapper detects this, returns 961 * with a shortened frame (without FCS/EOF) but doesn't provide 962 * any error indication about the invalid packet which we are 963 * going to transmit. 964 * Therefore we log if the buffer got filled to the point, where the 965 * wrapper would abort, i.e. when there are less than 5 bytes left to 966 * allow appending the FCS/EOF. 967 */ 968 969 if (len >= r->len-5) 970 net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n", 971 __func__); 972 } 973 else { 974 /* hw deals with MIR/FIR mode wrapping */ 975 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 976 len = skb->len; 977 if (len > r->len) { 978 msg = "frame exceeds tx buffer length"; 979 goto drop; 980 } 981 else 982 skb_copy_from_linear_data(skb, rd->buf, len); 983 } 984 985 rd->skb = skb; /* remember skb for tx-complete stats */ 986 987 rd_set_count(rd, len); 988 rd_set_status(rd, status); /* not yet active! */ 989 990 /* give dma buffer back to busmaster-hw (flush caches to make 991 * CPU-driven changes visible from the pci bus). 992 */ 993 994 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 995 996 /* Switching to TX mode here races with the controller 997 * which may stop TX at any time when fetching an inactive descriptor 998 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 999 * _after_ the new descriptor was activated on the ring. This ensures 1000 * we will either find TX already stopped or we can be sure, there 1001 * will be a TX-complete interrupt even if the chip stopped doing 1002 * TX just after we found it still running. The ISR will then find 1003 * the non-empty ring and restart TX processing. The enclosing 1004 * spinlock provides the correct serialization to prevent race with isr. 1005 */ 1006 1007 spin_lock_irqsave(&idev->lock,flags); 1008 1009 rd_activate(rd); 1010 1011 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1012 int fifocnt; 1013 1014 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1015 if (fifocnt != 0) { 1016 pr_debug("%s: rx fifo not empty(%d)\n", 1017 __func__, fifocnt); 1018 } 1019 1020 config = inw(iobase+VLSI_PIO_IRCFG); 1021 mb(); 1022 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1023 wmb(); 1024 outw(0, iobase+VLSI_PIO_PROMPT); 1025 } 1026 1027 if (ring_put(r) == NULL) { 1028 netif_stop_queue(ndev); 1029 pr_debug("%s: tx ring full - queue stopped\n", __func__); 1030 } 1031 spin_unlock_irqrestore(&idev->lock, flags); 1032 1033 return NETDEV_TX_OK; 1034 1035 drop_unlock: 1036 spin_unlock_irqrestore(&idev->lock, flags); 1037 drop: 1038 net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg); 1039 dev_kfree_skb_any(skb); 1040 ndev->stats.tx_errors++; 1041 ndev->stats.tx_dropped++; 1042 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1043 * In fact any retval!=0 causes the packet scheduler to requeue the 1044 * packet for later retry of transmission - which isn't exactly 1045 * what we want after we've just called dev_kfree_skb_any ;-) 1046 */ 1047 return NETDEV_TX_OK; 1048 } 1049 1050 static void vlsi_tx_interrupt(struct net_device *ndev) 1051 { 1052 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1053 struct vlsi_ring *r = idev->tx_ring; 1054 struct ring_descr *rd; 1055 unsigned iobase; 1056 int ret; 1057 u16 config; 1058 1059 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1060 1061 if (rd_is_active(rd)) 1062 break; 1063 1064 ret = vlsi_process_tx(r, rd); 1065 1066 if (ret < 0) { 1067 ret = -ret; 1068 ndev->stats.tx_errors++; 1069 if (ret & VLSI_TX_DROP) 1070 ndev->stats.tx_dropped++; 1071 if (ret & VLSI_TX_FIFO) 1072 ndev->stats.tx_fifo_errors++; 1073 } 1074 else if (ret > 0){ 1075 ndev->stats.tx_packets++; 1076 ndev->stats.tx_bytes += ret; 1077 } 1078 } 1079 1080 iobase = ndev->base_addr; 1081 1082 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1083 vlsi_set_baud(idev, iobase); 1084 1085 config = inw(iobase+VLSI_PIO_IRCFG); 1086 if (rd == NULL) /* tx ring empty: re-enable rx */ 1087 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1088 1089 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1090 int fifocnt; 1091 1092 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1093 if (fifocnt != 0) { 1094 pr_debug("%s: rx fifo not empty(%d)\n", 1095 __func__, fifocnt); 1096 } 1097 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1098 } 1099 1100 outw(0, iobase+VLSI_PIO_PROMPT); 1101 1102 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1103 netif_wake_queue(ndev); 1104 pr_debug("%s: queue awoken\n", __func__); 1105 } 1106 } 1107 1108 /* caller must have stopped the controller from busmastering */ 1109 1110 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1111 { 1112 struct net_device *ndev = pci_get_drvdata(idev->pdev); 1113 struct vlsi_ring *r = idev->tx_ring; 1114 struct ring_descr *rd; 1115 int ret; 1116 1117 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1118 1119 ret = 0; 1120 if (rd_is_active(rd)) { 1121 rd_set_status(rd, 0); 1122 rd_set_count(rd, 0); 1123 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1124 if (rd->skb) { 1125 dev_kfree_skb_any(rd->skb); 1126 rd->skb = NULL; 1127 } 1128 pr_debug("%s - dropping tx packet\n", __func__); 1129 ret = -VLSI_TX_DROP; 1130 } 1131 else 1132 ret = vlsi_process_tx(r, rd); 1133 1134 if (ret < 0) { 1135 ret = -ret; 1136 ndev->stats.tx_errors++; 1137 if (ret & VLSI_TX_DROP) 1138 ndev->stats.tx_dropped++; 1139 if (ret & VLSI_TX_FIFO) 1140 ndev->stats.tx_fifo_errors++; 1141 } 1142 else if (ret > 0){ 1143 ndev->stats.tx_packets++; 1144 ndev->stats.tx_bytes += ret; 1145 } 1146 } 1147 1148 } 1149 1150 /********************************************************/ 1151 1152 static int vlsi_start_clock(struct pci_dev *pdev) 1153 { 1154 u8 clkctl, lock; 1155 int i, count; 1156 1157 if (clksrc < 2) { /* auto or PLL: try PLL */ 1158 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1159 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1160 1161 /* procedure to detect PLL lock synchronisation: 1162 * after 0.5 msec initial delay we expect to find 3 PLL lock 1163 * indications within 10 msec for successful PLL detection. 1164 */ 1165 udelay(500); 1166 count = 0; 1167 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1168 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1169 if (lock&CLKCTL_LOCK) { 1170 if (++count >= 3) 1171 break; 1172 } 1173 udelay(50); 1174 } 1175 if (count < 3) { 1176 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1177 net_err_ratelimited("%s: no PLL or failed to lock!\n", 1178 __func__); 1179 clkctl = CLKCTL_CLKSTP; 1180 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1181 return -1; 1182 } 1183 else /* was: clksrc=0(auto) */ 1184 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1185 1186 pr_debug("%s: PLL not locked, fallback to clksrc=%d\n", 1187 __func__, clksrc); 1188 } 1189 else 1190 clksrc = 1; /* got successful PLL lock */ 1191 } 1192 1193 if (clksrc != 1) { 1194 /* we get here if either no PLL detected in auto-mode or 1195 an external clock source was explicitly specified */ 1196 1197 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1198 if (clksrc == 3) 1199 clkctl |= CLKCTL_XCKSEL; 1200 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1201 1202 /* no way to test for working XCLK */ 1203 } 1204 else 1205 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1206 1207 /* ok, now going to connect the chip with the clock source */ 1208 1209 clkctl &= ~CLKCTL_CLKSTP; 1210 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1211 1212 return 0; 1213 } 1214 1215 static void vlsi_stop_clock(struct pci_dev *pdev) 1216 { 1217 u8 clkctl; 1218 1219 /* disconnect chip from clock source */ 1220 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1221 clkctl |= CLKCTL_CLKSTP; 1222 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1223 1224 /* disable all clock sources */ 1225 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1226 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1227 } 1228 1229 /********************************************************/ 1230 1231 /* writing all-zero to the VLSI PCI IO register area seems to prevent 1232 * some occasional situations where the hardware fails (symptoms are 1233 * what appears as stalled tx/rx state machines, i.e. everything ok for 1234 * receive or transmit but hw makes no progress or is unable to access 1235 * the bus memory locations). 1236 * Best place to call this is immediately after/before the internal clock 1237 * gets started/stopped. 1238 */ 1239 1240 static inline void vlsi_clear_regs(unsigned iobase) 1241 { 1242 unsigned i; 1243 const unsigned chip_io_extent = 32; 1244 1245 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1246 outw(0, iobase + i); 1247 } 1248 1249 static int vlsi_init_chip(struct pci_dev *pdev) 1250 { 1251 struct net_device *ndev = pci_get_drvdata(pdev); 1252 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1253 unsigned iobase; 1254 u16 ptr; 1255 1256 /* start the clock and clean the registers */ 1257 1258 if (vlsi_start_clock(pdev)) { 1259 net_err_ratelimited("%s: no valid clock source\n", __func__); 1260 return -1; 1261 } 1262 iobase = ndev->base_addr; 1263 vlsi_clear_regs(iobase); 1264 1265 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1266 1267 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1268 1269 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1270 1271 outw(0, iobase+VLSI_PIO_IRCFG); 1272 wmb(); 1273 1274 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1275 1276 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1277 1278 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1279 iobase+VLSI_PIO_RINGSIZE); 1280 1281 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1282 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1283 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1284 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1285 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1286 1287 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1288 1289 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1290 wmb(); 1291 1292 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1293 * basically every received pulse fires an ACTIVITY-INT 1294 * leading to >>1000 INT's per second instead of few 10 1295 */ 1296 1297 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1298 1299 return 0; 1300 } 1301 1302 static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1303 { 1304 struct pci_dev *pdev = idev->pdev; 1305 struct net_device *ndev = pci_get_drvdata(pdev); 1306 unsigned iobase = ndev->base_addr; 1307 u8 byte; 1308 1309 /* we don't use the legacy UART, disable its address decoding */ 1310 1311 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1312 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1313 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1314 1315 /* enable PCI busmaster access to our 16MB page */ 1316 1317 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1318 pci_set_master(pdev); 1319 1320 if (vlsi_init_chip(pdev) < 0) { 1321 pci_disable_device(pdev); 1322 return -1; 1323 } 1324 1325 vlsi_fill_rx(idev->rx_ring); 1326 1327 idev->last_rx = ktime_get(); /* first mtt may start from now on */ 1328 1329 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1330 1331 return 0; 1332 } 1333 1334 static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1335 { 1336 struct pci_dev *pdev = idev->pdev; 1337 struct net_device *ndev = pci_get_drvdata(pdev); 1338 unsigned iobase = ndev->base_addr; 1339 unsigned long flags; 1340 1341 spin_lock_irqsave(&idev->lock,flags); 1342 outw(0, iobase+VLSI_PIO_IRENABLE); 1343 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1344 1345 /* disable and w/c irqs */ 1346 outb(0, iobase+VLSI_PIO_IRINTR); 1347 wmb(); 1348 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1349 spin_unlock_irqrestore(&idev->lock,flags); 1350 1351 vlsi_unarm_tx(idev); 1352 vlsi_unarm_rx(idev); 1353 1354 vlsi_clear_regs(iobase); 1355 vlsi_stop_clock(pdev); 1356 1357 pci_disable_device(pdev); 1358 1359 return 0; 1360 } 1361 1362 /**************************************************************/ 1363 1364 static void vlsi_tx_timeout(struct net_device *ndev) 1365 { 1366 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1367 1368 1369 vlsi_reg_debug(ndev->base_addr, __func__); 1370 vlsi_ring_debug(idev->tx_ring); 1371 1372 if (netif_running(ndev)) 1373 netif_stop_queue(ndev); 1374 1375 vlsi_stop_hw(idev); 1376 1377 /* now simply restart the whole thing */ 1378 1379 if (!idev->new_baud) 1380 idev->new_baud = idev->baud; /* keep current baudrate */ 1381 1382 if (vlsi_start_hw(idev)) 1383 net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n", 1384 __func__, pci_name(idev->pdev), ndev->name); 1385 else 1386 netif_start_queue(ndev); 1387 } 1388 1389 static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1390 { 1391 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1392 struct if_irda_req *irq = (struct if_irda_req *) rq; 1393 unsigned long flags; 1394 u16 fifocnt; 1395 int ret = 0; 1396 1397 switch (cmd) { 1398 case SIOCSBANDWIDTH: 1399 if (!capable(CAP_NET_ADMIN)) { 1400 ret = -EPERM; 1401 break; 1402 } 1403 spin_lock_irqsave(&idev->lock, flags); 1404 idev->new_baud = irq->ifr_baudrate; 1405 /* when called from userland there might be a minor race window here 1406 * if the stack tries to change speed concurrently - which would be 1407 * pretty strange anyway with the userland having full control... 1408 */ 1409 vlsi_set_baud(idev, ndev->base_addr); 1410 spin_unlock_irqrestore(&idev->lock, flags); 1411 break; 1412 case SIOCSMEDIABUSY: 1413 if (!capable(CAP_NET_ADMIN)) { 1414 ret = -EPERM; 1415 break; 1416 } 1417 irda_device_set_media_busy(ndev, TRUE); 1418 break; 1419 case SIOCGRECEIVING: 1420 /* the best we can do: check whether there are any bytes in rx fifo. 1421 * The trustable window (in case some data arrives just afterwards) 1422 * may be as short as 1usec or so at 4Mbps. 1423 */ 1424 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1425 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1426 break; 1427 default: 1428 net_warn_ratelimited("%s: notsupp - cmd=%04x\n", 1429 __func__, cmd); 1430 ret = -EOPNOTSUPP; 1431 } 1432 1433 return ret; 1434 } 1435 1436 /********************************************************/ 1437 1438 static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1439 { 1440 struct net_device *ndev = dev_instance; 1441 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1442 unsigned iobase; 1443 u8 irintr; 1444 int boguscount = 5; 1445 unsigned long flags; 1446 int handled = 0; 1447 1448 iobase = ndev->base_addr; 1449 spin_lock_irqsave(&idev->lock,flags); 1450 do { 1451 irintr = inb(iobase+VLSI_PIO_IRINTR); 1452 mb(); 1453 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1454 1455 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1456 break; 1457 1458 handled = 1; 1459 1460 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1461 break; /* nothing todo if only activity */ 1462 1463 if (irintr&IRINTR_RPKTINT) 1464 vlsi_rx_interrupt(ndev); 1465 1466 if (irintr&IRINTR_TPKTINT) 1467 vlsi_tx_interrupt(ndev); 1468 1469 } while (--boguscount > 0); 1470 spin_unlock_irqrestore(&idev->lock,flags); 1471 1472 if (boguscount <= 0) 1473 net_info_ratelimited("%s: too much work in interrupt!\n", 1474 __func__); 1475 return IRQ_RETVAL(handled); 1476 } 1477 1478 /********************************************************/ 1479 1480 static int vlsi_open(struct net_device *ndev) 1481 { 1482 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1483 int err = -EAGAIN; 1484 char hwname[32]; 1485 1486 if (pci_request_regions(idev->pdev, drivername)) { 1487 net_warn_ratelimited("%s: io resource busy\n", __func__); 1488 goto errout; 1489 } 1490 ndev->base_addr = pci_resource_start(idev->pdev,0); 1491 ndev->irq = idev->pdev->irq; 1492 1493 /* under some rare occasions the chip apparently comes up with 1494 * IRQ's pending. We better w/c pending IRQ and disable them all 1495 */ 1496 1497 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1498 1499 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1500 drivername, ndev)) { 1501 net_warn_ratelimited("%s: couldn't get IRQ: %d\n", 1502 __func__, ndev->irq); 1503 goto errout_io; 1504 } 1505 1506 if ((err = vlsi_create_hwif(idev)) != 0) 1507 goto errout_irq; 1508 1509 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1510 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1511 if (!idev->irlap) 1512 goto errout_free_ring; 1513 1514 idev->last_rx = ktime_get(); /* first mtt may start from now on */ 1515 1516 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1517 1518 if ((err = vlsi_start_hw(idev)) != 0) 1519 goto errout_close_irlap; 1520 1521 netif_start_queue(ndev); 1522 1523 net_info_ratelimited("%s: device %s operational\n", 1524 __func__, ndev->name); 1525 1526 return 0; 1527 1528 errout_close_irlap: 1529 irlap_close(idev->irlap); 1530 errout_free_ring: 1531 vlsi_destroy_hwif(idev); 1532 errout_irq: 1533 free_irq(ndev->irq,ndev); 1534 errout_io: 1535 pci_release_regions(idev->pdev); 1536 errout: 1537 return err; 1538 } 1539 1540 static int vlsi_close(struct net_device *ndev) 1541 { 1542 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1543 1544 netif_stop_queue(ndev); 1545 1546 if (idev->irlap) 1547 irlap_close(idev->irlap); 1548 idev->irlap = NULL; 1549 1550 vlsi_stop_hw(idev); 1551 1552 vlsi_destroy_hwif(idev); 1553 1554 free_irq(ndev->irq,ndev); 1555 1556 pci_release_regions(idev->pdev); 1557 1558 net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name); 1559 1560 return 0; 1561 } 1562 1563 static const struct net_device_ops vlsi_netdev_ops = { 1564 .ndo_open = vlsi_open, 1565 .ndo_stop = vlsi_close, 1566 .ndo_start_xmit = vlsi_hard_start_xmit, 1567 .ndo_do_ioctl = vlsi_ioctl, 1568 .ndo_tx_timeout = vlsi_tx_timeout, 1569 }; 1570 1571 static int vlsi_irda_init(struct net_device *ndev) 1572 { 1573 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1574 struct pci_dev *pdev = idev->pdev; 1575 1576 ndev->irq = pdev->irq; 1577 ndev->base_addr = pci_resource_start(pdev,0); 1578 1579 /* PCI busmastering 1580 * see include file for details why we need these 2 masks, in this order! 1581 */ 1582 1583 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || 1584 pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1585 net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n", 1586 __func__); 1587 return -1; 1588 } 1589 1590 irda_init_max_qos_capabilies(&idev->qos); 1591 1592 /* the VLSI82C147 does not support 576000! */ 1593 1594 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1595 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1596 | IR_1152000 | (IR_4000000 << 8); 1597 1598 idev->qos.min_turn_time.bits = qos_mtt_bits; 1599 1600 irda_qos_bits_to_value(&idev->qos); 1601 1602 /* currently no public media definitions for IrDA */ 1603 1604 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1605 ndev->if_port = IF_PORT_UNKNOWN; 1606 1607 ndev->netdev_ops = &vlsi_netdev_ops; 1608 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1609 1610 SET_NETDEV_DEV(ndev, &pdev->dev); 1611 1612 return 0; 1613 } 1614 1615 /**************************************************************/ 1616 1617 static int 1618 vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1619 { 1620 struct net_device *ndev; 1621 vlsi_irda_dev_t *idev; 1622 1623 if (pci_enable_device(pdev)) 1624 goto out; 1625 else 1626 pdev->current_state = 0; /* hw must be running now */ 1627 1628 net_info_ratelimited("%s: IrDA PCI controller %s detected\n", 1629 drivername, pci_name(pdev)); 1630 1631 if ( !pci_resource_start(pdev,0) || 1632 !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1633 net_err_ratelimited("%s: bar 0 invalid", __func__); 1634 goto out_disable; 1635 } 1636 1637 ndev = alloc_irdadev(sizeof(*idev)); 1638 if (ndev==NULL) { 1639 net_err_ratelimited("%s: Unable to allocate device memory.\n", 1640 __func__); 1641 goto out_disable; 1642 } 1643 1644 idev = netdev_priv(ndev); 1645 1646 spin_lock_init(&idev->lock); 1647 mutex_init(&idev->mtx); 1648 mutex_lock(&idev->mtx); 1649 idev->pdev = pdev; 1650 1651 if (vlsi_irda_init(ndev) < 0) 1652 goto out_freedev; 1653 1654 if (register_netdev(ndev) < 0) { 1655 net_err_ratelimited("%s: register_netdev failed\n", __func__); 1656 goto out_freedev; 1657 } 1658 1659 if (vlsi_proc_root != NULL) { 1660 struct proc_dir_entry *ent; 1661 1662 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, 1663 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1664 if (!ent) { 1665 net_warn_ratelimited("%s: failed to create proc entry\n", 1666 __func__); 1667 } else { 1668 proc_set_size(ent, 0); 1669 } 1670 idev->proc_entry = ent; 1671 } 1672 net_info_ratelimited("%s: registered device %s\n", 1673 drivername, ndev->name); 1674 1675 pci_set_drvdata(pdev, ndev); 1676 mutex_unlock(&idev->mtx); 1677 1678 return 0; 1679 1680 out_freedev: 1681 mutex_unlock(&idev->mtx); 1682 free_netdev(ndev); 1683 out_disable: 1684 pci_disable_device(pdev); 1685 out: 1686 return -ENODEV; 1687 } 1688 1689 static void vlsi_irda_remove(struct pci_dev *pdev) 1690 { 1691 struct net_device *ndev = pci_get_drvdata(pdev); 1692 vlsi_irda_dev_t *idev; 1693 1694 if (!ndev) { 1695 net_err_ratelimited("%s: lost netdevice?\n", drivername); 1696 return; 1697 } 1698 1699 unregister_netdev(ndev); 1700 1701 idev = netdev_priv(ndev); 1702 mutex_lock(&idev->mtx); 1703 if (idev->proc_entry) { 1704 remove_proc_entry(ndev->name, vlsi_proc_root); 1705 idev->proc_entry = NULL; 1706 } 1707 mutex_unlock(&idev->mtx); 1708 1709 free_netdev(ndev); 1710 1711 net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev)); 1712 } 1713 1714 #ifdef CONFIG_PM 1715 1716 /* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1717 * Some of the Linux PCI-PM code however depends on this, for example in 1718 * pci_set_power_state(). So we have to take care to perform the required 1719 * operations on our own (particularly reflecting the pdev->current_state) 1720 * otherwise we might get cheated by pci-pm. 1721 */ 1722 1723 1724 static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1725 { 1726 struct net_device *ndev = pci_get_drvdata(pdev); 1727 vlsi_irda_dev_t *idev; 1728 1729 if (!ndev) { 1730 net_err_ratelimited("%s - %s: no netdevice\n", 1731 __func__, pci_name(pdev)); 1732 return 0; 1733 } 1734 idev = netdev_priv(ndev); 1735 mutex_lock(&idev->mtx); 1736 if (pdev->current_state != 0) { /* already suspended */ 1737 if (state.event > pdev->current_state) { /* simply go deeper */ 1738 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1739 pdev->current_state = state.event; 1740 } 1741 else 1742 net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n", 1743 __func__, pci_name(pdev), 1744 pdev->current_state, state.event); 1745 mutex_unlock(&idev->mtx); 1746 return 0; 1747 } 1748 1749 if (netif_running(ndev)) { 1750 netif_device_detach(ndev); 1751 vlsi_stop_hw(idev); 1752 pci_save_state(pdev); 1753 if (!idev->new_baud) 1754 /* remember speed settings to restore on resume */ 1755 idev->new_baud = idev->baud; 1756 } 1757 1758 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1759 pdev->current_state = state.event; 1760 idev->resume_ok = 1; 1761 mutex_unlock(&idev->mtx); 1762 return 0; 1763 } 1764 1765 static int vlsi_irda_resume(struct pci_dev *pdev) 1766 { 1767 struct net_device *ndev = pci_get_drvdata(pdev); 1768 vlsi_irda_dev_t *idev; 1769 1770 if (!ndev) { 1771 net_err_ratelimited("%s - %s: no netdevice\n", 1772 __func__, pci_name(pdev)); 1773 return 0; 1774 } 1775 idev = netdev_priv(ndev); 1776 mutex_lock(&idev->mtx); 1777 if (pdev->current_state == 0) { 1778 mutex_unlock(&idev->mtx); 1779 net_warn_ratelimited("%s - %s: already resumed\n", 1780 __func__, pci_name(pdev)); 1781 return 0; 1782 } 1783 1784 pci_set_power_state(pdev, PCI_D0); 1785 pdev->current_state = PM_EVENT_ON; 1786 1787 if (!idev->resume_ok) { 1788 /* should be obsolete now - but used to happen due to: 1789 * - pci layer initially setting pdev->current_state = 4 (unknown) 1790 * - pci layer did not walk the save_state-tree (might be APM problem) 1791 * so we could not refuse to suspend from undefined state 1792 * - vlsi_irda_suspend detected invalid state and refused to save 1793 * configuration for resume - but was too late to stop suspending 1794 * - vlsi_irda_resume got screwed when trying to resume from garbage 1795 * 1796 * now we explicitly set pdev->current_state = 0 after enabling the 1797 * device and independently resume_ok should catch any garbage config. 1798 */ 1799 net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__); 1800 mutex_unlock(&idev->mtx); 1801 return 0; 1802 } 1803 1804 if (netif_running(ndev)) { 1805 pci_restore_state(pdev); 1806 vlsi_start_hw(idev); 1807 netif_device_attach(ndev); 1808 } 1809 idev->resume_ok = 0; 1810 mutex_unlock(&idev->mtx); 1811 return 0; 1812 } 1813 1814 #endif /* CONFIG_PM */ 1815 1816 /*********************************************************/ 1817 1818 static struct pci_driver vlsi_irda_driver = { 1819 .name = drivername, 1820 .id_table = vlsi_irda_table, 1821 .probe = vlsi_irda_probe, 1822 .remove = vlsi_irda_remove, 1823 #ifdef CONFIG_PM 1824 .suspend = vlsi_irda_suspend, 1825 .resume = vlsi_irda_resume, 1826 #endif 1827 }; 1828 1829 #define PROC_DIR ("driver/" DRIVER_NAME) 1830 1831 static int __init vlsi_mod_init(void) 1832 { 1833 int i, ret; 1834 1835 if (clksrc < 0 || clksrc > 3) { 1836 net_err_ratelimited("%s: invalid clksrc=%d\n", 1837 drivername, clksrc); 1838 return -1; 1839 } 1840 1841 for (i = 0; i < 2; i++) { 1842 switch(ringsize[i]) { 1843 case 4: 1844 case 8: 1845 case 16: 1846 case 32: 1847 case 64: 1848 break; 1849 default: 1850 net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n", 1851 drivername, 1852 i ? "rx" : "tx", 1853 ringsize[i]); 1854 ringsize[i] = 8; 1855 break; 1856 } 1857 } 1858 1859 sirpulse = !!sirpulse; 1860 1861 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1862 * Failure to create the procfs entry is handled like running 1863 * without procfs - it's not required for the driver to work. 1864 */ 1865 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1866 1867 ret = pci_register_driver(&vlsi_irda_driver); 1868 1869 if (ret && vlsi_proc_root) 1870 remove_proc_entry(PROC_DIR, NULL); 1871 return ret; 1872 1873 } 1874 1875 static void __exit vlsi_mod_exit(void) 1876 { 1877 pci_unregister_driver(&vlsi_irda_driver); 1878 if (vlsi_proc_root) 1879 remove_proc_entry(PROC_DIR, NULL); 1880 } 1881 1882 module_init(vlsi_mod_init); 1883 module_exit(vlsi_mod_exit); 1884 1885 1886 1887 1888 1889 /* LDV_COMMENT_BEGIN_MAIN */ 1890 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 1891 1892 /*###########################################################################*/ 1893 1894 /*############## Driver Environment Generator 0.2 output ####################*/ 1895 1896 /*###########################################################################*/ 1897 1898 1899 1900 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1901 void ldv_check_final_state(void); 1902 1903 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1904 void ldv_check_return_value(int res); 1905 1906 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1907 void ldv_check_return_value_probe(int res); 1908 1909 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1910 void ldv_initialize(void); 1911 1912 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1913 void ldv_handler_precall(void); 1914 1915 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1916 int nondet_int(void); 1917 1918 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1919 int LDV_IN_INTERRUPT; 1920 1921 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1922 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1923 1924 1925 1926 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1927 /*============================= VARIABLE DECLARATION PART =============================*/ 1928 /** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/ 1929 /* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/ 1930 /* LDV_COMMENT_BEGIN_PREP */ 1931 #define DRIVER_NAME "vlsi_ir" 1932 #define DRIVER_VERSION "v0.5" 1933 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1934 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1935 #ifdef CONFIG_PROC_FS 1936 /* LDV_COMMENT_END_PREP */ 1937 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */ 1938 struct inode * var_group1; 1939 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */ 1940 struct file * var_group2; 1941 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_seq_open" */ 1942 static int res_vlsi_seq_open_6; 1943 /* LDV_COMMENT_BEGIN_PREP */ 1944 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1945 #else 1946 #define VLSI_PROC_FOPS NULL 1947 #endif 1948 #ifdef CONFIG_PM 1949 #endif 1950 #ifdef CONFIG_PM 1951 #endif 1952 #define PROC_DIR ("driver/" DRIVER_NAME) 1953 /* LDV_COMMENT_END_PREP */ 1954 1955 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 1956 /* content: static int vlsi_open(struct net_device *ndev)*/ 1957 /* LDV_COMMENT_BEGIN_PREP */ 1958 #define DRIVER_NAME "vlsi_ir" 1959 #define DRIVER_VERSION "v0.5" 1960 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1961 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1962 #ifdef CONFIG_PROC_FS 1963 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1964 #else 1965 #define VLSI_PROC_FOPS NULL 1966 #endif 1967 /* LDV_COMMENT_END_PREP */ 1968 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_open" */ 1969 struct net_device * var_group3; 1970 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_open" */ 1971 static int res_vlsi_open_29; 1972 /* LDV_COMMENT_BEGIN_PREP */ 1973 #ifdef CONFIG_PM 1974 #endif 1975 #ifdef CONFIG_PM 1976 #endif 1977 #define PROC_DIR ("driver/" DRIVER_NAME) 1978 /* LDV_COMMENT_END_PREP */ 1979 /* content: static int vlsi_close(struct net_device *ndev)*/ 1980 /* LDV_COMMENT_BEGIN_PREP */ 1981 #define DRIVER_NAME "vlsi_ir" 1982 #define DRIVER_VERSION "v0.5" 1983 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 1984 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 1985 #ifdef CONFIG_PROC_FS 1986 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 1987 #else 1988 #define VLSI_PROC_FOPS NULL 1989 #endif 1990 /* LDV_COMMENT_END_PREP */ 1991 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_close" */ 1992 static int res_vlsi_close_30; 1993 /* LDV_COMMENT_BEGIN_PREP */ 1994 #ifdef CONFIG_PM 1995 #endif 1996 #ifdef CONFIG_PM 1997 #endif 1998 #define PROC_DIR ("driver/" DRIVER_NAME) 1999 /* LDV_COMMENT_END_PREP */ 2000 /* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/ 2001 /* LDV_COMMENT_BEGIN_PREP */ 2002 #define DRIVER_NAME "vlsi_ir" 2003 #define DRIVER_VERSION "v0.5" 2004 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2005 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2006 #ifdef CONFIG_PROC_FS 2007 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2008 #else 2009 #define VLSI_PROC_FOPS NULL 2010 #endif 2011 /* LDV_COMMENT_END_PREP */ 2012 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_hard_start_xmit" */ 2013 struct sk_buff * var_group4; 2014 /* LDV_COMMENT_BEGIN_PREP */ 2015 #ifdef CONFIG_PM 2016 #endif 2017 #ifdef CONFIG_PM 2018 #endif 2019 #define PROC_DIR ("driver/" DRIVER_NAME) 2020 /* LDV_COMMENT_END_PREP */ 2021 /* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/ 2022 /* LDV_COMMENT_BEGIN_PREP */ 2023 #define DRIVER_NAME "vlsi_ir" 2024 #define DRIVER_VERSION "v0.5" 2025 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2026 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2027 #ifdef CONFIG_PROC_FS 2028 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2029 #else 2030 #define VLSI_PROC_FOPS NULL 2031 #endif 2032 /* LDV_COMMENT_END_PREP */ 2033 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */ 2034 struct ifreq * var_group5; 2035 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */ 2036 int var_vlsi_ioctl_27_p2; 2037 /* LDV_COMMENT_BEGIN_PREP */ 2038 #ifdef CONFIG_PM 2039 #endif 2040 #ifdef CONFIG_PM 2041 #endif 2042 #define PROC_DIR ("driver/" DRIVER_NAME) 2043 /* LDV_COMMENT_END_PREP */ 2044 /* content: static void vlsi_tx_timeout(struct net_device *ndev)*/ 2045 /* LDV_COMMENT_BEGIN_PREP */ 2046 #define DRIVER_NAME "vlsi_ir" 2047 #define DRIVER_VERSION "v0.5" 2048 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2049 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2050 #ifdef CONFIG_PROC_FS 2051 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2052 #else 2053 #define VLSI_PROC_FOPS NULL 2054 #endif 2055 /* LDV_COMMENT_END_PREP */ 2056 /* LDV_COMMENT_BEGIN_PREP */ 2057 #ifdef CONFIG_PM 2058 #endif 2059 #ifdef CONFIG_PM 2060 #endif 2061 #define PROC_DIR ("driver/" DRIVER_NAME) 2062 /* LDV_COMMENT_END_PREP */ 2063 2064 /** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/ 2065 /* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/ 2066 /* LDV_COMMENT_BEGIN_PREP */ 2067 #define DRIVER_NAME "vlsi_ir" 2068 #define DRIVER_VERSION "v0.5" 2069 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2070 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2071 #ifdef CONFIG_PROC_FS 2072 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2073 #else 2074 #define VLSI_PROC_FOPS NULL 2075 #endif 2076 /* LDV_COMMENT_END_PREP */ 2077 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */ 2078 struct pci_dev * var_group6; 2079 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */ 2080 const struct pci_device_id * var_vlsi_irda_probe_32_p1; 2081 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_irda_probe" */ 2082 static int res_vlsi_irda_probe_32; 2083 /* LDV_COMMENT_BEGIN_PREP */ 2084 #ifdef CONFIG_PM 2085 #endif 2086 #ifdef CONFIG_PM 2087 #endif 2088 #define PROC_DIR ("driver/" DRIVER_NAME) 2089 /* LDV_COMMENT_END_PREP */ 2090 /* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/ 2091 /* LDV_COMMENT_BEGIN_PREP */ 2092 #define DRIVER_NAME "vlsi_ir" 2093 #define DRIVER_VERSION "v0.5" 2094 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2095 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2096 #ifdef CONFIG_PROC_FS 2097 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2098 #else 2099 #define VLSI_PROC_FOPS NULL 2100 #endif 2101 /* LDV_COMMENT_END_PREP */ 2102 /* LDV_COMMENT_BEGIN_PREP */ 2103 #ifdef CONFIG_PM 2104 #endif 2105 #ifdef CONFIG_PM 2106 #endif 2107 #define PROC_DIR ("driver/" DRIVER_NAME) 2108 /* LDV_COMMENT_END_PREP */ 2109 /* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/ 2110 /* LDV_COMMENT_BEGIN_PREP */ 2111 #define DRIVER_NAME "vlsi_ir" 2112 #define DRIVER_VERSION "v0.5" 2113 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2114 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2115 #ifdef CONFIG_PROC_FS 2116 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2117 #else 2118 #define VLSI_PROC_FOPS NULL 2119 #endif 2120 #ifdef CONFIG_PM 2121 /* LDV_COMMENT_END_PREP */ 2122 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_suspend" */ 2123 pm_message_t var_vlsi_irda_suspend_34_p1; 2124 /* LDV_COMMENT_BEGIN_PREP */ 2125 #endif 2126 #ifdef CONFIG_PM 2127 #endif 2128 #define PROC_DIR ("driver/" DRIVER_NAME) 2129 /* LDV_COMMENT_END_PREP */ 2130 /* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/ 2131 /* LDV_COMMENT_BEGIN_PREP */ 2132 #define DRIVER_NAME "vlsi_ir" 2133 #define DRIVER_VERSION "v0.5" 2134 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2135 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2136 #ifdef CONFIG_PROC_FS 2137 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2138 #else 2139 #define VLSI_PROC_FOPS NULL 2140 #endif 2141 #ifdef CONFIG_PM 2142 /* LDV_COMMENT_END_PREP */ 2143 /* LDV_COMMENT_BEGIN_PREP */ 2144 #endif 2145 #ifdef CONFIG_PM 2146 #endif 2147 #define PROC_DIR ("driver/" DRIVER_NAME) 2148 /* LDV_COMMENT_END_PREP */ 2149 2150 /** CALLBACK SECTION request_irq **/ 2151 /* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/ 2152 /* LDV_COMMENT_BEGIN_PREP */ 2153 #define DRIVER_NAME "vlsi_ir" 2154 #define DRIVER_VERSION "v0.5" 2155 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2156 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2157 #ifdef CONFIG_PROC_FS 2158 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2159 #else 2160 #define VLSI_PROC_FOPS NULL 2161 #endif 2162 /* LDV_COMMENT_END_PREP */ 2163 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */ 2164 int var_vlsi_interrupt_28_p0; 2165 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */ 2166 void * var_vlsi_interrupt_28_p1; 2167 /* LDV_COMMENT_BEGIN_PREP */ 2168 #ifdef CONFIG_PM 2169 #endif 2170 #ifdef CONFIG_PM 2171 #endif 2172 #define PROC_DIR ("driver/" DRIVER_NAME) 2173 /* LDV_COMMENT_END_PREP */ 2174 2175 2176 2177 2178 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 2179 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 2180 /*============================= VARIABLE INITIALIZING PART =============================*/ 2181 LDV_IN_INTERRUPT=1; 2182 2183 2184 2185 2186 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 2187 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 2188 /*============================= FUNCTION CALL SECTION =============================*/ 2189 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 2190 ldv_initialize(); 2191 2192 /** INIT: init_type: ST_MODULE_INIT **/ 2193 /* content: static int __init vlsi_mod_init(void)*/ 2194 /* LDV_COMMENT_BEGIN_PREP */ 2195 #define DRIVER_NAME "vlsi_ir" 2196 #define DRIVER_VERSION "v0.5" 2197 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2198 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2199 #ifdef CONFIG_PROC_FS 2200 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2201 #else 2202 #define VLSI_PROC_FOPS NULL 2203 #endif 2204 #ifdef CONFIG_PM 2205 #endif 2206 #ifdef CONFIG_PM 2207 #endif 2208 #define PROC_DIR ("driver/" DRIVER_NAME) 2209 /* LDV_COMMENT_END_PREP */ 2210 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 2211 ldv_handler_precall(); 2212 if(vlsi_mod_init()) 2213 goto ldv_final; 2214 int ldv_s_vlsi_proc_fops_file_operations = 0; 2215 2216 int ldv_s_vlsi_netdev_ops_net_device_ops = 0; 2217 2218 2219 int ldv_s_vlsi_irda_driver_pci_driver = 0; 2220 2221 2222 2223 2224 2225 while( nondet_int() 2226 || !(ldv_s_vlsi_proc_fops_file_operations == 0) 2227 || !(ldv_s_vlsi_netdev_ops_net_device_ops == 0) 2228 || !(ldv_s_vlsi_irda_driver_pci_driver == 0) 2229 ) { 2230 2231 switch(nondet_int()) { 2232 2233 case 0: { 2234 2235 /** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/ 2236 if(ldv_s_vlsi_proc_fops_file_operations==0) { 2237 2238 /* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/ 2239 /* LDV_COMMENT_BEGIN_PREP */ 2240 #define DRIVER_NAME "vlsi_ir" 2241 #define DRIVER_VERSION "v0.5" 2242 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2243 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2244 #ifdef CONFIG_PROC_FS 2245 /* LDV_COMMENT_END_PREP */ 2246 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "vlsi_proc_fops". Standart function test for correct return result. */ 2247 ldv_handler_precall(); 2248 res_vlsi_seq_open_6 = vlsi_seq_open( var_group1, var_group2); 2249 ldv_check_return_value(res_vlsi_seq_open_6); 2250 if(res_vlsi_seq_open_6) 2251 goto ldv_module_exit; 2252 /* LDV_COMMENT_BEGIN_PREP */ 2253 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2254 #else 2255 #define VLSI_PROC_FOPS NULL 2256 #endif 2257 #ifdef CONFIG_PM 2258 #endif 2259 #ifdef CONFIG_PM 2260 #endif 2261 #define PROC_DIR ("driver/" DRIVER_NAME) 2262 /* LDV_COMMENT_END_PREP */ 2263 ldv_s_vlsi_proc_fops_file_operations=0; 2264 2265 } 2266 2267 } 2268 2269 break; 2270 case 1: { 2271 2272 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2273 if(ldv_s_vlsi_netdev_ops_net_device_ops==0) { 2274 2275 /* content: static int vlsi_open(struct net_device *ndev)*/ 2276 /* LDV_COMMENT_BEGIN_PREP */ 2277 #define DRIVER_NAME "vlsi_ir" 2278 #define DRIVER_VERSION "v0.5" 2279 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2280 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2281 #ifdef CONFIG_PROC_FS 2282 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2283 #else 2284 #define VLSI_PROC_FOPS NULL 2285 #endif 2286 /* LDV_COMMENT_END_PREP */ 2287 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */ 2288 ldv_handler_precall(); 2289 res_vlsi_open_29 = vlsi_open( var_group3); 2290 ldv_check_return_value(res_vlsi_open_29); 2291 if(res_vlsi_open_29 < 0) 2292 goto ldv_module_exit; 2293 /* LDV_COMMENT_BEGIN_PREP */ 2294 #ifdef CONFIG_PM 2295 #endif 2296 #ifdef CONFIG_PM 2297 #endif 2298 #define PROC_DIR ("driver/" DRIVER_NAME) 2299 /* LDV_COMMENT_END_PREP */ 2300 ldv_s_vlsi_netdev_ops_net_device_ops++; 2301 2302 } 2303 2304 } 2305 2306 break; 2307 case 2: { 2308 2309 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2310 if(ldv_s_vlsi_netdev_ops_net_device_ops==1) { 2311 2312 /* content: static int vlsi_close(struct net_device *ndev)*/ 2313 /* LDV_COMMENT_BEGIN_PREP */ 2314 #define DRIVER_NAME "vlsi_ir" 2315 #define DRIVER_VERSION "v0.5" 2316 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2317 #define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 2318 #ifdef CONFIG_PROC_FS 2319 #define VLSI_PROC_FOPS (&vlsi_proc_fops) 2320 #else 2321 #define VLSI_PROC_FOPS NULL 2322 #endif 2323 /* LDV_COMMENT_END_PREP */ 2324 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */ 2325 ldv_handler_precall(); 2326 res_vlsi_close_30 = vlsi_close( var_group3); 2327 ldv_check_return_value(res_vlsi_close_30); 2328 if(res_vlsi_close_30) 2329 goto ldv_module_exit; 2330 /* LDV_COMMENT_BEGIN_PREP */ 2331 #ifdef CONFIG_PM 2332 #endif 2333 #ifdef CONFIG_PM 2334 #endif 2335 #define PROC_DIR ("driver/" DRIVER_NAME) 2336 /* LDV_COMMENT_END_PREP */ 2337 ldv_s_vlsi_netdev_ops_net_device_ops=0; 2338 2339 } 2340 2341 } 2342 2343 break; 2344 case 3: { 2345 2346 /** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/ 2347 2348 2349 /* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/ 2350 /* LDV_COMMENT_BEGIN_PREP */ 2351 #define DRIVER_NAME "vlsi_ir" 2352 #define DRIVER_VERSION "v0.5" 2353 #define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 2354 #define DRIVER_AUTHOR