Error Trace

[Home]

Bug # 9

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
95 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
19 typedef signed char __s8;
20 typedef unsigned char __u8;
22 typedef short __s16;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
32 typedef __u16 __le16;
33 typedef __u16 __be16;
34 typedef __u32 __le32;
35 typedef __u32 __be32;
36 typedef __u64 __le64;
39 typedef __u16 __sum16;
40 typedef __u32 __wsum;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
147 typedef u64 dma_addr_t;
158 typedef unsigned int gfp_t;
159 typedef unsigned int fmode_t;
160 typedef unsigned int oom_flags_t;
163 typedef u64 phys_addr_t;
168 typedef phys_addr_t resource_size_t;
178 struct __anonstruct_atomic_t_6 { int counter; } ;
178 typedef struct __anonstruct_atomic_t_6 atomic_t;
183 struct __anonstruct_atomic64_t_7 { long counter; } ;
183 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
184 struct list_head { struct list_head *next; struct list_head *prev; } ;
189 struct hlist_node ;
189 struct hlist_head { struct hlist_node *first; } ;
193 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
204 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
59 struct __anonstruct_ldv_1016_9 { unsigned int a; unsigned int b; } ;
59 struct __anonstruct_ldv_1031_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
59 union __anonunion_ldv_1032_8 { struct __anonstruct_ldv_1016_9 ldv_1016; struct __anonstruct_ldv_1031_10 ldv_1031; } ;
59 struct desc_struct { union __anonunion_ldv_1032_8 ldv_1032; } ;
12 typedef unsigned long pteval_t;
15 typedef unsigned long pgdval_t;
16 typedef unsigned long pgprotval_t;
18 struct __anonstruct_pte_t_11 { pteval_t pte; } ;
18 typedef struct __anonstruct_pte_t_11 pte_t;
20 struct pgprot { pgprotval_t pgprot; } ;
242 typedef struct pgprot pgprot_t;
244 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ;
244 typedef struct __anonstruct_pgd_t_12 pgd_t;
332 struct page ;
332 typedef struct page *pgtable_t;
340 struct file ;
353 struct seq_file ;
390 struct thread_struct ;
392 struct mm_struct ;
393 struct task_struct ;
394 struct cpumask ;
395 struct paravirt_callee_save { void *func; } ;
196 struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save restore_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; void (*safe_halt)(); void (*halt)(); void (*adjust_exception_frame)(); } ;
327 struct arch_spinlock ;
18 typedef u16 __ticket_t;
19 typedef u32 __ticketpair_t;
20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ;
32 union __anonunion_ldv_1452_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ;
32 struct arch_spinlock { union __anonunion_ldv_1452_15 ldv_1452; } ;
33 typedef struct arch_spinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
142 typedef void (*ctor_fn_t)();
48 struct device ;
54 struct net_device ;
400 struct file_operations ;
412 struct completion ;
416 struct pid ;
527 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
102 struct timespec ;
127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ;
79 union __anonunion_ldv_2961_20 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ;
79 struct math_emu_info { long ___orig_eip; union __anonunion_ldv_2961_20 ldv_2961; } ;
306 struct cpumask { unsigned long bits[128U]; } ;
14 typedef struct cpumask cpumask_t;
671 typedef struct cpumask *cpumask_var_t;
162 struct seq_operations ;
294 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
312 struct __anonstruct_ldv_5248_25 { u64 rip; u64 rdp; } ;
312 struct __anonstruct_ldv_5254_26 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
312 union __anonunion_ldv_5255_24 { struct __anonstruct_ldv_5248_25 ldv_5248; struct __anonstruct_ldv_5254_26 ldv_5254; } ;
312 union __anonunion_ldv_5264_27 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
312 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion_ldv_5255_24 ldv_5255; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion_ldv_5264_27 ldv_5264; } ;
346 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
367 struct ymmh_struct { u32 ymmh_space[64U]; } ;
372 struct lwp_struct { u8 reserved[128U]; } ;
377 struct bndregs_struct { u64 bndregs[8U]; } ;
381 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ;
386 struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2U]; u64 reserved2[5U]; } ;
392 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ;
401 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ;
409 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ;
465 struct kmem_cache ;
466 struct perf_event ;
467 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ;
23 typedef atomic64_t atomic_long_t;
35 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
26 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ;
530 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct_ldv_6305_31 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion_ldv_6306_30 { struct raw_spinlock rlock; struct __anonstruct_ldv_6305_31 ldv_6305; } ;
33 struct spinlock { union __anonunion_ldv_6306_30 ldv_6306; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_32 rwlock_t;
135 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
51 typedef struct seqcount seqcount_t;
259 struct __anonstruct_seqlock_t_33 { struct seqcount seqcount; spinlock_t lock; } ;
259 typedef struct __anonstruct_seqlock_t_33 seqlock_t;
433 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_34 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_34 kuid_t;
27 struct __anonstruct_kgid_t_35 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_35 kgid_t;
127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
34 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
39 typedef struct __wait_queue_head wait_queue_head_t;
98 struct __anonstruct_nodemask_t_36 { unsigned long bits[16U]; } ;
98 typedef struct __anonstruct_nodemask_t_36 nodemask_t;
814 struct optimistic_spin_queue ;
815 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ;
68 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
178 struct rw_semaphore ;
179 struct rw_semaphore { long count; raw_spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; struct optimistic_spin_queue *osq; struct lockdep_map dep_map; } ;
174 struct completion { unsigned int done; wait_queue_head_t wait; } ;
105 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
72 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ;
172 struct pci_dev ;
323 union ktime { s64 tv64; } ;
59 typedef union ktime ktime_t;
412 struct tvec_base ;
413 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
266 struct workqueue_struct ;
267 struct work_struct ;
53 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
106 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
546 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ;
553 struct dev_pm_qos ;
553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
614 struct dev_pm_domain { struct dev_pm_ops ops; } ;
133 struct pci_bus ;
22 struct __anonstruct_mm_context_t_101 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ;
22 typedef struct __anonstruct_mm_context_t_101 mm_context_t;
18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
40 struct rb_root { struct rb_node *rb_node; } ;
87 struct vm_area_struct ;
22 struct bio_vec ;
167 struct notifier_block ;
51 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
63 struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block *head; } ;
906 struct ctl_table ;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
59 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
98 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
119 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
124 struct __anonstruct_ldv_14188_129 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
124 union __anonunion_ldv_14190_128 { struct __anonstruct_ldv_14188_129 ldv_14188; struct callback_head rcu; } ;
124 struct ctl_table_set ;
124 struct ctl_table_header { union __anonunion_ldv_14190_128 ldv_14190; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
145 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
151 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
156 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
37 struct cred ;
24 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct_ldv_14434_136 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct_ldv_14438_137 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion_ldv_14439_135 { struct __anonstruct_ldv_14434_136 ldv_14434; struct __anonstruct_ldv_14438_137 ldv_14438; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion_ldv_14439_135 ldv_14439; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct xol_area ;
95 struct uprobes_state { struct xol_area *xol_area; } ;
133 struct address_space ;
134 union __anonunion_ldv_14548_138 { struct address_space *mapping; void *s_mem; } ;
134 union __anonunion_ldv_14554_140 { unsigned long index; void *freelist; bool pfmemalloc; } ;
134 struct __anonstruct_ldv_14564_144 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
134 union __anonunion_ldv_14566_143 { atomic_t _mapcount; struct __anonstruct_ldv_14564_144 ldv_14564; int units; } ;
134 struct __anonstruct_ldv_14568_142 { union __anonunion_ldv_14566_143 ldv_14566; atomic_t _count; } ;
134 union __anonunion_ldv_14570_141 { unsigned long counters; struct __anonstruct_ldv_14568_142 ldv_14568; unsigned int active; } ;
134 struct __anonstruct_ldv_14571_139 { union __anonunion_ldv_14554_140 ldv_14554; union __anonunion_ldv_14570_141 ldv_14570; } ;
134 struct __anonstruct_ldv_14578_146 { struct page *next; int pages; int pobjects; } ;
134 struct slab ;
134 union __anonunion_ldv_14583_145 { struct list_head lru; struct __anonstruct_ldv_14578_146 ldv_14578; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ;
134 union __anonunion_ldv_14589_147 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ;
134 struct page { unsigned long flags; union __anonunion_ldv_14548_138 ldv_14548; struct __anonstruct_ldv_14571_139 ldv_14571; union __anonunion_ldv_14583_145 ldv_14583; union __anonunion_ldv_14589_147 ldv_14589; unsigned long debug_flags; } ;
187 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
239 struct __anonstruct_linear_149 { struct rb_node rb; unsigned long rb_subtree_last; } ;
239 union __anonunion_shared_148 { struct __anonstruct_linear_149 linear; struct list_head nonlinear; } ;
239 struct anon_vma ;
239 struct vm_operations_struct ;
239 struct mempolicy ;
239 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_148 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ;
311 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
317 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
330 struct task_rss_stat { int events; int count[3U]; } ;
338 struct mm_rss_stat { atomic_long_t count[3U]; } ;
343 struct kioctx_table ;
344 struct linux_binfmt ;
344 struct mmu_notifier_mm ;
344 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
48 union __anonunion_ldv_14952_153 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
48 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion_ldv_14952_153 ldv_14952; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
185 struct dentry ;
186 struct iattr ;
187 struct super_block ;
188 struct file_system_type ;
189 struct kernfs_open_node ;
190 struct kernfs_iattrs ;
213 struct kernfs_root ;
213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; } ;
95 union __anonunion_ldv_15096_154 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
95 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion_ldv_15096_154 ldv_15096; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
137 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;
154 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
170 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
186 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
462 struct sock ;
463 struct kobject ;
464 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
470 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
131 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
470 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
222 struct kernel_param ;
227 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
58 struct kparam_string ;
58 struct kparam_array ;
58 union __anonunion_ldv_15771_155 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion_ldv_15771_155 ldv_15771; } ;
70 struct kparam_string { unsigned int maxlen; char *string; } ;
76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
461 struct mod_arch_specific { } ;
36 struct module_param_attrs ;
36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
72 struct exception_table_entry ;
205 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
212 struct module_ref { unsigned long incs; unsigned long decs; } ;
226 struct module_sect_attrs ;
226 struct module_notes_attrs ;
226 struct tracepoint ;
226 struct ftrace_event_call ;
226 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
218 struct plist_head { struct list_head node_list; } ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
24 struct __anonstruct_sigset_t_157 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_157 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_159 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_160 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_161 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_162 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__sigfault_163 { void *_addr; short _addr_lsb; } ;
11 struct __anonstruct__sigpoll_164 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_165 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_158 { int _pad[28U]; struct __anonstruct__kill_159 _kill; struct __anonstruct__timer_160 _timer; struct __anonstruct__rt_161 _rt; struct __anonstruct__sigchld_162 _sigchld; struct __anonstruct__sigfault_163 _sigfault; struct __anonstruct__sigpoll_164 _sigpoll; struct __anonstruct__sigsys_165 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_158 _sifields; } ;
109 typedef struct siginfo siginfo_t;
11 struct user_struct ;
21 struct sigpending { struct list_head list; sigset_t signal; } ;
246 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
260 struct k_sigaction { struct sigaction sa; } ;
459 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
466 struct pid_namespace ;
466 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
174 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
46 struct seccomp_filter ;
47 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ;
132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ;
163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
463 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
39 struct assoc_array_ptr ;
39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
123 union __anonunion_ldv_17540_168 { struct list_head graveyard_link; struct rb_node serial_node; } ;
123 struct key_user ;
123 union __anonunion_ldv_17548_169 { time_t expiry; time_t revoked_at; } ;
123 struct __anonstruct_ldv_17561_171 { struct key_type *type; char *description; } ;
123 union __anonunion_ldv_17562_170 { struct keyring_index_key index_key; struct __anonstruct_ldv_17561_171 ldv_17561; } ;
123 union __anonunion_type_data_172 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ;
123 union __anonunion_payload_174 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ;
123 union __anonunion_ldv_17577_173 { union __anonunion_payload_174 payload; struct assoc_array keys; } ;
123 struct key { atomic_t usage; key_serial_t serial; union __anonunion_ldv_17540_168 ldv_17540; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion_ldv_17548_169 ldv_17548; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion_ldv_17562_170 ldv_17562; union __anonunion_type_data_172 type_data; union __anonunion_ldv_17577_173 ldv_17577; } ;
356 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
125 struct futex_pi_state ;
126 struct robust_list_head ;
127 struct bio_list ;
128 struct fs_struct ;
129 struct perf_event_context ;
130 struct blk_plug ;
180 struct cfs_rq ;
181 struct task_group ;
426 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
465 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
473 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
480 struct cputime { cputime_t utime; cputime_t stime; } ;
492 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
512 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ;
554 struct autogroup ;
555 struct tty_struct ;
555 struct taskstats ;
555 struct tty_audit_buf ;
555 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
735 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
778 struct backing_dev_info ;
779 struct reclaim_state ;
780 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
794 struct task_delay_info { spinlock_t lock; unsigned int flags; struct timespec blkio_start; struct timespec blkio_end; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; struct timespec freepages_start; struct timespec freepages_end; u64 freepages_delay; u32 freepages_count; } ;
1026 struct io_context ;
1060 struct pipe_inode_info ;
1061 struct uts_namespace ;
1062 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1069 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ;
1081 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1116 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1148 struct rt_rq ;
1148 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1164 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1222 struct mem_cgroup ;
1222 struct memcg_batch_info { int do_batch; struct mem_cgroup *memcg; unsigned long nr_pages; unsigned long memsw_nr_pages; } ;
1643 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ;
1650 struct sched_class ;
1650 struct files_struct ;
1650 struct css_set ;
1650 struct compat_robust_list_head ;
1650 struct numa_group ;
1650 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char no_new_privs; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct task_struct *pi_top_task; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults_memory; unsigned long total_numa_faults; unsigned long *numa_faults_buffer_memory; unsigned long *numa_faults_cpu; unsigned long *numa_faults_buffer_cpu; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; struct memcg_batch_info memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ;
2998 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
359 struct proc_dir_entry ;
62 struct exception_table_entry { int insn; int fixup; } ;
450 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
13 typedef unsigned long kernel_ulong_t;
14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ;
219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
628 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
67 struct path ;
68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ;
35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
196 struct pinctrl ;
197 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
42 struct dma_map_ops ;
42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
14 struct device_private ;
15 struct device_driver ;
16 struct driver_private ;
17 struct class ;
18 struct subsys_private ;
19 struct bus_type ;
20 struct device_node ;
21 struct iommu_ops ;
22 struct iommu_group ;
60 struct device_attribute ;
60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
138 struct device_type ;
195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
321 struct class_attribute ;
321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
640 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
649 struct acpi_device ;
650 struct acpi_dev_node { struct acpi_device *companion; } ;
656 struct dma_coherent_mem ;
656 struct cma ;
656 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
803 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
69 struct hotplug_slot ;
69 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ;
109 typedef int pci_power_t;
136 typedef unsigned int pci_channel_state_t;
137 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ;
162 typedef unsigned short pci_dev_flags_t;
185 typedef unsigned short pci_bus_flags_t;
242 struct pcie_link_state ;
243 struct pci_vpd ;
244 struct pci_sriov ;
245 struct pci_ats ;
246 struct pci_driver ;
246 union __anonunion_ldv_22518_181 { struct pci_sriov *sriov; struct pci_dev *physfn; } ;
246 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; u8 dma_alias_devfn; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; struct list_head msi_list; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion_ldv_22518_181 ldv_22518; struct pci_ats *ats; phys_addr_t rom; size_t romlen; char *driver_override; } ;
436 struct pci_ops ;
436 struct msi_chip ;
436 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_chip *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ;
553 struct pci_ops { int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;
574 struct pci_dynids { spinlock_t lock; struct list_head list; } ;
588 typedef unsigned int pci_ers_result_t;
598 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ;
631 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ;
1153 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ;
26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
71 struct file_ra_state ;
72 struct writeback_control ;
188 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; unsigned long max_pgoff; pte_t *pte; } ;
221 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;
368 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ;
48 struct kmem_cache_order_objects { unsigned long x; } ;
58 struct memcg_cache_params ;
58 struct kmem_cache_node ;
58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; struct kset *memcg_kset; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ;
501 struct __anonstruct_ldv_26538_183 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ;
501 struct __anonstruct_ldv_26544_184 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; } ;
501 union __anonunion_ldv_26545_182 { struct __anonstruct_ldv_26538_183 ldv_26538; struct __anonstruct_ldv_26544_184 ldv_26544; } ;
501 struct memcg_cache_params { bool is_root_cache; union __anonunion_ldv_26545_182 ldv_26545; } ;
34 struct dma_attrs { unsigned long flags[1U]; } ;
70 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
77 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
351 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
84 struct pm_qos_request { struct plist_node node; int pm_qos_class; struct delayed_work work; } ;
48 struct pm_qos_flags_request { struct list_head node; s32 flags; } ;
53 enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_FLAGS = 3 } ;
59 union __anonunion_data_185 { struct plist_node pnode; struct pm_qos_flags_request flr; } ;
59 struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union __anonunion_data_185 data; struct device *dev; } ;
68 enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ;
74 struct pm_qos_constraints { struct plist_head list; s32 target_value; s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; } ;
88 struct pm_qos_flags { struct list_head list; s32 effective_flags; } ;
93 struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; } ;
54 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
27 union __anonunion_ldv_28086_186 { const struct iovec *iov; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion_ldv_28086_186 ldv_28086; unsigned long nr_segs; } ;
38 typedef s32 dma_cookie_t;
1153 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
11 typedef unsigned short __kernel_sa_family_t;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
43 struct __anonstruct_sync_serial_settings_188 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_188 sync_serial_settings;
50 struct __anonstruct_te1_settings_189 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_189 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_190 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_190 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_191 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_191 fr_proto;
69 struct __anonstruct_fr_proto_pvc_192 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_192 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_193 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_193 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_194 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_194 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
177 union __anonunion_ifs_ifsu_195 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
177 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_195 ifs_ifsu; } ;
195 union __anonunion_ifr_ifrn_196 { char ifrn_name[16U]; } ;
195 union __anonunion_ifr_ifru_197 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
195 struct ifreq { union __anonunion_ifr_ifrn_196 ifr_ifrn; union __anonunion_ifr_ifru_197 ifr_ifru; } ;
91 struct hlist_bl_node ;
91 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct_ldv_29075_200 { spinlock_t lock; unsigned int count; } ;
114 union __anonunion_ldv_29076_199 { struct __anonstruct_ldv_29075_200 ldv_29075; } ;
114 struct lockref { union __anonunion_ldv_29076_199 ldv_29076; } ;
49 struct nameidata ;
50 struct vfsmount ;
51 struct __anonstruct_ldv_29099_202 { u32 hash; u32 len; } ;
51 union __anonunion_ldv_29101_201 { struct __anonstruct_ldv_29099_202 ldv_29099; u64 hash_len; } ;
51 struct qstr { union __anonunion_ldv_29101_201 ldv_29101; const unsigned char *name; } ;
90 struct dentry_operations ;
90 union __anonunion_d_u_203 { struct list_head d_child; struct callback_head d_rcu; } ;
90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_203 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ;
142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ;
477 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
27 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ;
30 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ;
58 struct __anonstruct_ldv_29462_205 { struct radix_tree_node *parent; void *private_data; } ;
58 union __anonunion_ldv_29464_204 { struct __anonstruct_ldv_29462_205 ldv_29462; struct callback_head callback_head; } ;
58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion_ldv_29464_204 ldv_29464; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
30 struct block_device ;
31 struct cgroup_subsys_state ;
19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
59 struct export_operations ;
61 struct kiocb ;
62 struct poll_table_struct ;
63 struct kstatfs ;
64 struct swap_info_struct ;
69 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
253 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ;
76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ;
151 typedef struct fs_qfilestat fs_qfilestat_t;
152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ;
166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ;
196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ;
212 struct dquot ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_206 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_206 kprojid_t;
119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ;
152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
60 typedef long long qsize_t;
61 union __anonunion_ldv_29991_207 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
61 struct kqid { union __anonunion_ldv_29991_207 ldv_29991; enum quota_type type; } ;
178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ;
200 struct quota_format_type ;
201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ;
264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ;
302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ;
316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
334 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
380 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ;
323 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t ); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
382 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
405 struct request_queue ;
406 struct hd_struct ;
406 struct gendisk ;
406 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
478 struct posix_acl ;
479 struct inode_operations ;
479 union __anonunion_ldv_30405_210 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
479 union __anonunion_ldv_30425_211 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
479 struct file_lock ;
479 struct cdev ;
479 union __anonunion_ldv_30442_212 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ;
479 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion_ldv_30405_210 ldv_30405; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion_ldv_30425_211 ldv_30425; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion_ldv_30442_212 ldv_30442; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ;
715 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
723 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
746 union __anonunion_f_u_213 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
746 struct file { union __anonunion_f_u_213 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
836 typedef struct files_struct *fl_owner_t;
837 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
842 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ;
855 struct net ;
860 struct nlm_lockowner ;
861 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_215 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_214 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_215 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_214 fl_u; } ;
963 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1157 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ;
1173 struct super_operations ;
1173 struct xattr_handler ;
1173 struct mtd_info ;
1173 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ;
1403 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1441 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1446 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ;
1488 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1535 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ;
1749 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
39 typedef s32 compat_long_t;
44 typedef u32 compat_uptr_t;
276 struct compat_robust_list { compat_uptr_t next; } ;
280 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
140 struct sk_buff ;
15 typedef u64 netdev_features_t;
18 struct nf_conntrack { atomic_t use; } ;
137 struct nf_bridge_info { atomic_t use; unsigned int mask; struct net_device *physindev; struct net_device *physoutdev; unsigned long data[4U]; } ;
147 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
173 struct skb_frag_struct ;
173 typedef struct skb_frag_struct skb_frag_t;
174 struct __anonstruct_page_231 { struct page *p; } ;
174 struct skb_frag_struct { struct __anonstruct_page_231 page; __u32 page_offset; __u32 size; } ;
207 struct skb_shared_hwtstamps { ktime_t hwtstamp; ktime_t syststamp; } ;
276 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ;
360 typedef unsigned int sk_buff_data_t;
361 struct __anonstruct_ldv_34296_233 { u32 stamp_us; u32 stamp_jiffies; } ;
361 union __anonunion_ldv_34297_232 { u64 v64; struct __anonstruct_ldv_34296_233 ldv_34296; } ;
361 struct skb_mstamp { union __anonunion_ldv_34297_232 ldv_34297; } ;
414 union __anonunion_ldv_34316_234 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
414 struct sec_path ;
414 struct __anonstruct_ldv_34332_236 { __u16 csum_start; __u16 csum_offset; } ;
414 union __anonunion_ldv_34333_235 { __wsum csum; struct __anonstruct_ldv_34332_236 ldv_34332; } ;
414 union __anonunion_ldv_34372_237 { unsigned int napi_id; dma_cookie_t dma_cookie; } ;
414 union __anonunion_ldv_34378_238 { __u32 mark; __u32 dropcount; __u32 reserved_tailroom; } ;
414 struct sk_buff { struct sk_buff *next; struct sk_buff *prev; union __anonunion_ldv_34316_234 ldv_34316; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; struct sec_path *sp; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; union __anonunion_ldv_34333_235 ldv_34333; __u32 priority; unsigned char ignore_df; unsigned char cloned; unsigned char ip_summed; unsigned char nohdr; unsigned char nfctinfo; unsigned char pkt_type; unsigned char fclone; unsigned char ipvs_property; unsigned char peeked; unsigned char nf_trace; __be16 protocol; void (*destructor)(struct sk_buff *); struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; __u16 tc_index; __u16 tc_verd; __u16 queue_mapping; unsigned char ndisc_nodetype; unsigned char pfmemalloc; unsigned char ooo_okay; unsigned char l4_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char head_frag; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; union __anonunion_ldv_34372_237 ldv_34372; __u32 secmark; union __anonunion_ldv_34378_238 ldv_34378; __be16 inner_protocol; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __u16 transport_header; __u16 network_header; __u16 mac_header; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
641 struct dst_entry ;
3161 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
34 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
125 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char reserved1[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
187 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
211 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
233 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
259 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
288 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
305 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
404 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
441 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
469 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
568 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
600 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
642 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
675 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
691 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
711 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
722 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
741 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
767 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
933 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
941 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1017 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
44 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
79 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); } ;
235 struct prot_inuse ;
236 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
145 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[8U]; } ;
106 struct linux_mib { unsigned long mibs[103U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { int nqueues; struct list_head lru_list; spinlock_t lru_lock; struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; } ;
180 struct tcpm_hash_bucket ;
181 struct ipv4_devconf ;
182 struct fib_rules_ops ;
183 struct fib_table ;
184 struct local_ports { seqlock_t lock; int range[2U]; } ;
22 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
27 struct inet_peer_base ;
27 struct xt_table ;
27 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; struct sock *fibnl; struct sock **icmp_sk; struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; unsigned int tcp_metrics_hash_log; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; atomic_t rt_genid; } ;
102 struct neighbour ;
102 struct dst_ops { unsigned short family; __be16 protocol; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int icmpv6_time; int anycast_src_echo_reply; int fwmark_reflect; } ;
35 struct ipv6_devconf ;
35 struct rt6_info ;
35 struct rt6_statistics ;
35 struct fib6_table ;
35 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct dst_ops ip6_dst_ops; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t rt_genid; } ;
80 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
86 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; u16 max_dsize; } ;
21 struct sctp_mib ;
22 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
133 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
324 struct nlattr ;
337 struct nf_logger ;
338 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; } ;
17 struct ebt_table ;
18 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; bool ulog_warn_deprecated; bool ebt_ulog_warn_deprecated; } ;
24 struct hlist_nulls_node ;
24 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
20 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ;
24 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
29 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
43 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
48 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
53 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ;
64 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; struct hlist_nulls_head tmpl; } ;
72 struct ip_conntrack_stat ;
72 struct nf_ct_event_notifier ;
72 struct nf_exp_event_notifier ;
72 struct netns_ct { atomic_t count; unsigned int expect_count; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; char *slabname; unsigned int sysctl_log_invalid; unsigned int sysctl_events_retry_timeout; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; unsigned int htable_size; seqcount_t generation; struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; struct hlist_head *nat_bysource; unsigned int nat_htable_size; } ;
111 struct nft_af_info ;
112 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; u8 gencursor; u8 genctr; } ;
499 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; } ;
17 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[6U]; struct xfrm_policy_hash policy_bydst[6U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
74 struct net_generic ;
75 struct netns_ipvs ;
76 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; unsigned int proc_inum; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
400 struct dsa_chip_data { struct device *mii_bus; int sw_addr; char *port_names[12U]; s8 *rtable; } ;
46 struct dsa_platform_data { struct device *netdev; int nr_chips; struct dsa_chip_data *chip; } ;
61 struct dsa_switch ;
61 struct dsa_switch_tree { struct dsa_platform_data *pd; struct net_device *master_netdev; __be16 tag_protocol; s8 cpu_switch; s8 cpu_port; int link_poll_needed; struct work_struct link_poll_work; struct timer_list link_poll_timer; struct dsa_switch *ds[4U]; } ;
94 struct dsa_switch_driver ;
94 struct mii_bus ;
94 struct dsa_switch { struct dsa_switch_tree *dst; int index; struct dsa_chip_data *pd; struct dsa_switch_driver *drv; struct mii_bus *master_mii_bus; u32 dsa_port_mask; u32 phys_port_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[12U]; } ;
146 struct dsa_switch_driver { struct list_head list; __be16 tag_protocol; int priv_size; char * (*probe)(struct mii_bus *, int); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*poll_link)(struct dsa_switch *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); } ;
205 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
80 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
100 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
123 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
138 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
167 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); u8 (*setapp)(struct net_device *, u8 , u16 , u8 ); u8 (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
102 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct percpu_ref ;
54 typedef void percpu_ref_func_t(struct percpu_ref *);
55 struct percpu_ref { atomic_t count; unsigned int *pcpu_count; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct callback_head rcu; } ;
205 struct cgroup_root ;
206 struct cgroup_subsys ;
207 struct cgroup ;
58 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ;
167 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct kernfs_node *populated_kn; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[12U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[12U]; struct list_head release_list; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; } ;
253 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
355 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[12U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[12U]; struct callback_head callback_head; } ;
438 struct cftype { char name[64U]; int private; umode_t mode; size_t max_write_len; unsigned int flags; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
609 struct cgroup_taskset ;
617 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*fork)(struct task_struct *); void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int disabled; int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *base_cftypes; } ;
919 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
3161 struct mnt_namespace ;
3162 struct ipc_namespace ;
3163 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; } ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
145 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
104 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
180 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
39 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; } ;
547 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; } ;
28 struct netpoll_info ;
29 struct phy_device ;
30 struct wireless_dev ;
61 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ;
106 typedef enum netdev_tx netdev_tx_t;
125 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
186 struct neigh_parms ;
187 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ;
207 struct netdev_hw_addr_list { struct list_head list; int count; } ;
212 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
241 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*rebuild)(struct sk_buff *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); } ;
292 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
336 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
384 typedef enum rx_handler_result rx_handler_result_t;
385 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
522 struct Qdisc ;
522 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long trans_timeout; unsigned long state; struct dql dql; } ;
591 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
603 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
615 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
666 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
689 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
702 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
713 struct netdev_tc_txq { u16 count; u16 offset; } ;
724 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
740 struct netdev_phys_port_id { unsigned char id[32U]; unsigned char id_len; } ;
753 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_setup_tc)(struct net_device *, u8 ); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct neighbour *); void (*ndo_neigh_destroy)(struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 ); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_port_id *); void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); } ;
1187 struct __anonstruct_adj_list_250 { struct list_head upper; struct list_head lower; } ;
1187 struct __anonstruct_all_adj_list_251 { struct list_head upper; struct list_head lower; } ;
1187 struct iw_handler_def ;
1187 struct iw_public_data ;
1187 struct forwarding_accel_ops ;
1187 struct vlan_info ;
1187 struct tipc_bearer ;
1187 struct in_device ;
1187 struct dn_dev ;
1187 struct inet6_dev ;
1187 struct cpu_rmap ;
1187 struct pcpu_lstats ;
1187 struct pcpu_sw_netstats ;
1187 struct pcpu_dstats ;
1187 struct pcpu_vstats ;
1187 union __anonunion_ldv_42047_252 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1187 struct garp_port ;
1187 struct mrp_port ;
1187 struct rtnl_link_ops ;
1187 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct __anonstruct_adj_list_250 adj_list; struct __anonstruct_all_adj_list_251 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; int ifindex; int iflink; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_t carrier_changes; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct forwarding_accel_ops *fwd_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; rx_handler_func_t *rx_handler; void *rx_handler_data; struct netdev_queue *ingress_queue; unsigned char broadcast[32U]; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; struct xps_dev_maps *xps_maps; struct cpu_rmap *rx_cpu_rmap; unsigned long trans_start; int watchdog_timeo; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct hlist_node index_hlist; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; struct net *nd_net; union __anonunion_ldv_42047_252 ldv_42047; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; int group; struct pm_qos_request pm_qos_req; } ;
1806 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
2548 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ;
55 struct firmware { size_t size; const u8 *data; struct page **pages; void *priv; } ;
73 struct basic_ring { u8 *ringBase; u32 lastWrite; } ;
35 struct transmit_ring { u8 *ringBase; u32 lastWrite; u32 lastRead; int writeRegister; } ;
46 struct typhoon_indexes { volatile __le32 rxHiCleared; volatile __le32 rxLoCleared; volatile __le32 rxBuffReady; volatile __le32 respCleared; volatile __le32 txLoCleared; volatile __le32 txHiCleared; volatile __le32 rxLoReady; volatile __le32 rxBuffCleared; volatile __le32 cmdCleared; volatile __le32 respReady; volatile __le32 rxHiReady; } ;
81 struct typhoon_interface { __le32 ringIndex; __le32 ringIndexHi; __le32 txLoAddr; __le32 txLoAddrHi; __le32 txLoSize; __le32 txHiAddr; __le32 txHiAddrHi; __le32 txHiSize; __le32 rxLoAddr; __le32 rxLoAddrHi; __le32 rxLoSize; __le32 rxBuffAddr; __le32 rxBuffAddrHi; __le32 rxBuffSize; __le32 cmdAddr; __le32 cmdAddrHi; __le32 cmdSize; __le32 respAddr; __le32 respAddrHi; __le32 respSize; __le32 zeroAddr; __le32 zeroAddrHi; __le32 rxHiAddr; __le32 rxHiAddrHi; __le32 rxHiSize; } ;
129 struct __anonstruct_frag_258 { __le32 addr; __le32 addrHi; } ;
129 union __anonunion_ldv_44235_257 { struct __anonstruct_frag_258 frag; u64 tx_addr; } ;
129 struct tx_desc { u8 flags; u8 numDesc; __le16 len; union __anonunion_ldv_44235_257 ldv_44235; __le32 processFlags; } ;
178 struct tcpopt_desc { u8 flags; u8 numDesc; __le16 mss_flags; __le32 respAddrLo; __le32 bytesTx; __le32 status; } ;
231 struct rx_desc { u8 flags; u8 numDesc; __le16 frameLen; u32 addr; u32 addrHi; __le32 rxStatus; __le16 filterResults; __le16 ipsecResults; __be32 vlanTag; } ;
288 struct rx_free { __le32 physAddr; __le32 physAddrHi; u32 virtAddr; u32 virtAddrHi; } ;
305 struct cmd_desc { u8 flags; u8 numDesc; __le16 cmd; u16 seqNo; __le16 parm1; __le32 parm2; __le32 parm3; } ;
351 struct resp_desc { u8 flags; u8 numDesc; __le16 cmd; __le16 seqNo; __le16 parm1; __le32 parm2; __le32 parm3; } ;
363 struct stats_resp { u8 flags; u8 numDesc; __le16 cmd; __le16 seqNo; __le16 unused; __le32 txPackets; __le64 txBytes; __le32 txDeferred; __le32 txLateCollisions; __le32 txCollisions; __le32 txCarrierLost; __le32 txMultipleCollisions; __le32 txExcessiveCollisions; __le32 txFifoUnderruns; __le32 txMulticastTxOverflows; __le32 txFiltered; __le32 rxPacketsGood; __le64 rxBytesGood; __le32 rxFifoOverruns; __le32 BadSSD; __le32 rxCrcErrors; __le32 rxOversized; __le32 rxBroadcast; __le32 rxMulticast; __le32 rxOverflow; __le32 rxFiltered; __le32 linkStatus; __le32 unused2; __le32 unused3; } ;
492 struct typhoon_file_header { u8 tag[8U]; __le32 version; __le32 numSections; __le32 startAddr; __le32 hmacDigest[5U]; } ;
522 struct typhoon_section_header { __le32 len; u16 checksum; u16 reserved; __le32 startAddr; } ;
249 struct typhoon_card_info { const char *name; const int capabilities; } ;
344 struct typhoon_shared { struct typhoon_interface iface; struct typhoon_indexes indexes; struct tx_desc txLo[128U]; struct rx_desc rxLo[32U]; struct rx_desc rxHi[32U]; struct cmd_desc cmd[16U]; struct resp_desc resp[32U]; struct rx_free rxBuff[128U]; u32 zeroWord; struct tx_desc txHi[2U]; } ;
362 struct rxbuff_ent { struct sk_buff *skb; dma_addr_t dma_addr; } ;
367 struct typhoon { struct transmit_ring txLoRing; struct pci_dev *tx_pdev; void *tx_ioaddr; u32 txlo_dma_addr; void *ioaddr; struct typhoon_indexes *indexes; u8 awaiting_resp; u8 duplex; u8 speed; u8 card_state; struct basic_ring rxLoRing; struct pci_dev *pdev; struct net_device *dev; struct napi_struct napi; struct basic_ring rxHiRing; struct basic_ring rxBuffRing; struct rxbuff_ent rxbuffers[127U]; spinlock_t command_lock; struct basic_ring cmdRing; struct basic_ring respRing; struct net_device_stats stats; struct net_device_stats stats_saved; struct typhoon_shared *shared; dma_addr_t shared_dma; __le16 xcvr_select; __le16 wol_events; __le32 offload; int capabilities; struct transmit_ring txHiRing; } ;
5467 typedef int ldv_func_ret_type___0;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 long int __builtin_expect(long exp, long c);
33 extern struct module __this_module;
358 extern struct pv_irq_ops pv_irq_ops;
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
204 int test_and_set_bit(long nr, volatile unsigned long *addr);
250 int test_and_clear_bit(long nr, volatile unsigned long *addr);
308 int constant_test_bit(long nr, const volatile unsigned long *addr);
7 __u32 __arch_swab32(__u32 val);
46 __u16 __fswab16(__u16 val);
57 __u32 __fswab32(__u32 val);
139 int printk(const char *, ...);
165 void __might_sleep(const char *, int, int);
391 int snprintf(char *, size_t , const char *, ...);
71 void warn_slowpath_null(const char *, const int);
23 unsigned long int __phys_addr(unsigned long);
34 void * __memcpy(void *, const void *, size_t );
55 void * memset(void *, int, size_t );
60 int memcmp(const void *, const void *, size_t );
26 size_t strlcpy(char *, const char *, size_t );
802 unsigned long int arch_local_save_flags();
155 int arch_irqs_disabled_flags(unsigned long flags);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
37 int _raw_spin_trylock(raw_spinlock_t *);
39 void _raw_spin_unlock(raw_spinlock_t *);
290 raw_spinlock_t * spinlock_check(spinlock_t *lock);
301 void spin_lock(spinlock_t *lock);
365 void ldv_spin_lock_55(spinlock_t *lock);
387 int spin_trylock(spinlock_t *lock);
333 int ldv_spin_trylock_53(spinlock_t *lock);
349 void spin_unlock(spinlock_t *lock);
409 void ldv_spin_unlock_54(spinlock_t *lock);
5 void __ldv_spin_lock(spinlock_t *);
8 void ldv___ldv_spin_lock_7(spinlock_t *ldv_func_arg1);
12 void ldv___ldv_spin_lock_15(spinlock_t *ldv_func_arg1);
16 void ldv___ldv_spin_lock_17(spinlock_t *ldv_func_arg1);
20 void ldv___ldv_spin_lock_19(spinlock_t *ldv_func_arg1);
24 void ldv___ldv_spin_lock_21(spinlock_t *ldv_func_arg1);
28 void ldv___ldv_spin_lock_49(spinlock_t *ldv_func_arg1);
44 void ldv_spin_lock_addr_list_lock_of_net_device();
60 void ldv_spin_lock_command_lock_of_typhoon();
61 void ldv_spin_unlock_command_lock_of_typhoon();
62 int ldv_spin_trylock_command_lock_of_typhoon();
76 void ldv_spin_lock_lock();
84 void ldv_spin_lock_lock_of_NOT_ARG_SIGN();
100 void ldv_spin_lock_node_size_lock_of_pglist_data();
116 void ldv_spin_lock_siglock_of_sighand_struct();
155 int rx_copybreak = 200;
162 unsigned int use_mmio = 2U;
168 const int multicast_filter_limit = 32;
31 unsigned int ioread32(void *);
37 void iowrite32(u32 , void *);
72 void pci_iounmap(struct pci_dev *, void *);
17 void * pci_iomap(struct pci_dev *, int, unsigned long);
86 const char * kobject_name(const struct kobject *kobj);
380 long int schedule_timeout_uninterruptible(long);
123 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
128 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev);
142 void free_irq(unsigned int, void *);
806 const char * dev_name(const struct device *dev);
837 void * dev_get_drvdata(const struct device *dev);
842 void dev_set_drvdata(struct device *dev, void *data);
924 int pci_enable_device(struct pci_dev *);
941 void pci_disable_device(struct pci_dev *);
944 void pci_set_master(struct pci_dev *);
950 int pci_set_mwi(struct pci_dev *);
952 void pci_clear_mwi(struct pci_dev *);
997 int pci_save_state(struct pci_dev *);
998 void pci_restore_state(struct pci_dev *);
1009 int pci_set_power_state(struct pci_dev *, pci_power_t );
1010 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t );
1013 int __pci_enable_wake(struct pci_dev *, pci_power_t , bool , bool );
1022 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1062 int pci_request_regions(struct pci_dev *, const char *);
1064 void pci_release_regions(struct pci_dev *);
1107 int __pci_register_driver(struct pci_driver *, struct module *, const char *);
1116 void pci_unregister_driver(struct pci_driver *);
912 void * lowmem_page_address(const struct page *page);
69 int valid_dma_direction(int dma_direction);
76 int is_device_dma_capable(struct device *dev);
131 void kmemcheck_mark_initialized(void *address, unsigned int n);
37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );
44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
56 void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t );
59 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int);
63 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int);
27 extern struct device x86_dma_fallback_dev;
30 extern struct dma_map_ops *dma_ops;
32 struct dma_map_ops * get_dma_ops(struct device *dev);
10 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
29 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
97 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
109 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
61 int dma_set_mask(struct device *, u64 );
103 unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp);
115 gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp);
131 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs);
160 void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs);
16 void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
23 void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
30 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
36 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
71 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
78 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
105 int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
1426 void * pci_get_drvdata(struct pci_dev *pdev);
1431 void pci_set_drvdata(struct pci_dev *pdev, void *data);
1439 const char * pci_name(const struct pci_dev *pdev);
10 void __const_udelay(unsigned long);
46 void msleep(unsigned int);
22 __sum16 csum_fold(__wsum sum);
145 __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum );
188 unsigned int skb_frag_size(const skb_frag_t *frag);
717 void consume_skb(struct sk_buff *);
869 unsigned char * skb_end_pointer(const struct sk_buff *skb);
1445 unsigned int skb_headlen(const struct sk_buff *skb);
1565 unsigned char * skb_put(struct sk_buff *, unsigned int);
1666 void skb_reserve(struct sk_buff *skb, int len);
2016 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );
2032 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);
2129 struct page * skb_frag_page(const skb_frag_t *frag);
2187 void * skb_frag_address(const skb_frag_t *frag);
2609 void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len);
3076 bool skb_is_gso(const struct sk_buff *skb);
3118 void skb_checksum_none_assert(const struct sk_buff *skb);
113 void ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed);
121 __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep);
65 u32 ethtool_op_get_link(struct net_device *);
387 void __napi_schedule(struct napi_struct *);
389 bool napi_disable_pending(struct napi_struct *n);
403 bool napi_schedule_prep(struct napi_struct *n);
439 void napi_complete(struct napi_struct *);
474 void napi_disable(struct napi_struct *n);
490 void napi_enable(struct napi_struct *n);
1621 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);
1687 void * netdev_priv(const struct net_device *dev);
1718 void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int);
1975 void free_netdev(struct net_device *);
2140 void __netif_schedule(struct Qdisc *);
2156 void netif_tx_start_queue(struct netdev_queue *dev_queue);
2167 void netif_start_queue(struct net_device *dev);
2182 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2195 void netif_wake_queue(struct net_device *dev);
2210 void netif_tx_stop_queue(struct netdev_queue *dev_queue);
2226 void netif_stop_queue(struct net_device *dev);
2241 bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue);
2252 bool netif_queue_stopped(const struct net_device *dev);
2398 bool netif_running(const struct net_device *dev);
2554 void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );
2555 void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );
2576 void dev_kfree_skb_irq(struct sk_buff *skb);
2586 void dev_kfree_skb_any(struct sk_buff *skb);
2598 int netif_receive_skb(struct sk_buff *);
2695 void netif_carrier_on(struct net_device *);
2697 void netif_carrier_off(struct net_device *);
2765 void netif_device_detach(struct net_device *);
2767 void netif_device_attach(struct net_device *);
2998 int register_netdev(struct net_device *);
2999 void unregister_netdev(struct net_device *);
3395 int netdev_err(const struct net_device *, const char *, ...);
3397 int netdev_warn(const struct net_device *, const char *, ...);
3401 int netdev_info(const struct net_device *, const char *, ...);
32 __be16 eth_type_trans(struct sk_buff *, struct net_device *);
45 int eth_mac_addr(struct net_device *, void *);
46 int eth_change_mtu(struct net_device *, int);
47 int eth_validate_addr(struct net_device *);
49 struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);
89 bool is_zero_ether_addr(const u8 *addr);
107 bool is_multicast_ether_addr(const u8 *addr);
160 bool is_valid_ether_addr(const u8 *addr);
356 struct sk_buff * __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
14 u32 bitrev32(u32 );
11 u32 crc32_le(u32 , const unsigned char *, size_t );
42 int request_firmware(const struct firmware **, const char *, struct device *);
49 void release_firmware(const struct firmware *);
279 struct typhoon_card_info typhoon_card_info[13U] = { { "3Com Typhoon (3C990-TX)", 0 }, { "3Com Typhoon (3CR990-TX-95)", 1 }, { "3Com Typhoon (3CR990-TX-97)", 3 }, { "3Com Typhoon (3C990SVR)", 0 }, { "3Com Typhoon (3CR990SVR95)", 1 }, { "3Com Typhoon (3CR990SVR97)", 3 }, { "3Com Typhoon2 (3C990B-TX-M)", 4 }, { "3Com Typhoon2 (3C990BSVR)", 4 }, { "3Com Typhoon (3CR990-FX-95)", 9 }, { "3Com Typhoon (3CR990-FX-97)", 11 }, { "3Com Typhoon (3CR990-FX-95 Server)", 9 }, { "3Com Typhoon (3CR990-FX-97 Server)", 11 }, { "3Com Typhoon2 (3C990B-FX-97)", 12 } };
314 const struct pci_device_id typhoon_pci_tbl[14U] = { { 4279U, 39168U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4279U, 39170U, 4294967295U, 4294967295U, 0U, 0U, 1UL }, { 4279U, 39171U, 4294967295U, 4294967295U, 0U, 0U, 2UL }, { 4279U, 39172U, 4294967295U, 4096U, 0U, 0U, 6UL }, { 4279U, 39172U, 4294967295U, 4354U, 0U, 0U, 12UL }, { 4279U, 39172U, 4294967295U, 8192U, 0U, 0U, 7UL }, { 4279U, 39173U, 4294967295U, 4353U, 0U, 0U, 8UL }, { 4279U, 39173U, 4294967295U, 4354U, 0U, 0U, 9UL }, { 4279U, 39173U, 4294967295U, 8449U, 0U, 0U, 10UL }, { 4279U, 39173U, 4294967295U, 8450U, 0U, 0U, 11UL }, { 4279U, 39176U, 4294967295U, 4294967295U, 0U, 0U, 4UL }, { 4279U, 39177U, 4294967295U, 4294967295U, 0U, 0U, 5UL }, { 4279U, 39178U, 4294967295U, 4294967295U, 0U, 0U, 3UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };
343 const struct pci_device_id __mod_pci__typhoon_pci_tbl_device_table = { };
444 void typhoon_inc_index(u32 *index, const int count, const int num_entries);
455 void typhoon_inc_cmd_index(u32 *index, const int count);
461 void typhoon_inc_resp_index(u32 *index, const int count);
467 void typhoon_inc_rxfree_index(u32 *index, const int count);
480 void typhoon_inc_rx_index(u32 *index, const int count);
488 int typhoon_reset(void *ioaddr, int wait_type);
543 int typhoon_wait_status(void *ioaddr, u32 wait_value);
560 void typhoon_media_status(struct net_device *dev, struct resp_desc *resp);
569 void typhoon_hello(struct typhoon *tp);
590 int typhoon_process_response(struct typhoon *tp, int resp_size, struct resp_desc *resp_save);
649 int typhoon_num_free(int lastWrite, int lastRead, int ringSize);
660 int typhoon_num_free_cmd(struct typhoon *tp);
669 int typhoon_num_free_resp(struct typhoon *tp);
678 int typhoon_num_free_tx(struct transmit_ring *ring);
685 int typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, int num_resp, struct resp_desc *resp);
800 void typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, u32 ring_dma);
821 netdev_tx_t typhoon_start_tx(struct sk_buff *skb, struct net_device *dev);
968 void typhoon_set_rx_mode(struct net_device *dev);
1007 int typhoon_do_get_stats(struct typhoon *tp);
1059 struct net_device_stats * typhoon_get_stats(struct net_device *dev);
1078 void typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
1107 int typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
1165 int typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
1215 void typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
1229 int typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
1246 void typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering);
1255 const struct ethtool_ops typhoon_ethtool_ops = { &typhoon_get_settings, &typhoon_set_settings, &typhoon_get_drvinfo, 0, 0, &typhoon_get_wol, &typhoon_set_wol, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, 0, 0, &typhoon_get_ringparam, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1266 int typhoon_wait_interrupt(void *ioaddr);
1287 void typhoon_init_interface(struct typhoon *tp);
1357 void typhoon_init_rings(struct typhoon *tp);
1373 const struct firmware *typhoon_fw = 0;
1376 int typhoon_request_firmware(struct typhoon *tp);
1434 int typhoon_download_firmware(struct typhoon *tp);
1575 int typhoon_boot_3XP(struct typhoon *tp, u32 initial_status);
1611 u32 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, volatile __le32 *index);
1647 void typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, volatile __le32 *index);
1664 void typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx);
1691 int typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx);
1737 int typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 *ready, volatile __le32 *cleared, int budget);
1814 void typhoon_fill_free_ring(struct typhoon *tp);
1828 int typhoon_poll(struct napi_struct *napi, int budget);
1869 irqreturn_t typhoon_interrupt(int irq, void *dev_instance);
1893 void typhoon_free_rx_rings(struct typhoon *tp);
1909 int typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events);
1946 int typhoon_wakeup(struct typhoon *tp, int wait_type);
1967 int typhoon_start_runtime(struct typhoon *tp);
2059 int typhoon_stop_runtime(struct typhoon *tp, int wait_type);
2120 void typhoon_tx_timeout(struct net_device *dev);
2148 int typhoon_open(struct net_device *dev);
2197 int typhoon_close(struct net_device *dev);
2224 int typhoon_resume(struct pci_dev *pdev);
2253 int typhoon_suspend(struct pci_dev *pdev, pm_message_t state);
2312 int typhoon_test_mmio(struct pci_dev *pdev);
2357 const struct net_device_ops typhoon_netdev_ops = { 0, 0, &typhoon_open, &typhoon_close, &typhoon_start_tx, 0, 0, &typhoon_set_rx_mode, ð_mac_addr, ð_validate_addr, 0, 0, ð_change_mtu, 0, &typhoon_tx_timeout, 0, &typhoon_get_stats, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2370 int typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
2621 void typhoon_remove_one(struct pci_dev *pdev);
2639 struct pci_driver typhoon_driver = { { 0, 0 }, "typhoon", (const struct pci_device_id *)(&typhoon_pci_tbl), &typhoon_init_one, &typhoon_remove_one, &typhoon_suspend, 0, 0, &typhoon_resume, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };
2651 int typhoon_init();
2657 void typhoon_cleanup();
2682 void ldv_check_final_state();
2685 void ldv_check_return_value(int);
2688 void ldv_check_return_value_probe(int);
2691 void ldv_initialize();
2694 void ldv_handler_precall();
2697 int nondet_int();
2700 int LDV_IN_INTERRUPT = 0;
2703 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
25 int ldv_undef_int();
59 void __builtin_trap();
8 int ldv_spin__xmit_lock_of_netdev_queue = 0;
11 void ldv_spin_lock__xmit_lock_of_netdev_queue();
20 void ldv_spin_unlock__xmit_lock_of_netdev_queue();
29 int ldv_spin_trylock__xmit_lock_of_netdev_queue();
55 void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue();
62 int ldv_spin_is_locked__xmit_lock_of_netdev_queue();
83 int ldv_spin_can_lock__xmit_lock_of_netdev_queue();
90 int ldv_spin_is_contended__xmit_lock_of_netdev_queue();
111 int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue();
133 int ldv_spin_addr_list_lock_of_net_device = 0;
145 void ldv_spin_unlock_addr_list_lock_of_net_device();
154 int ldv_spin_trylock_addr_list_lock_of_net_device();
180 void ldv_spin_unlock_wait_addr_list_lock_of_net_device();
187 int ldv_spin_is_locked_addr_list_lock_of_net_device();
208 int ldv_spin_can_lock_addr_list_lock_of_net_device();
215 int ldv_spin_is_contended_addr_list_lock_of_net_device();
236 int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device();
258 int ldv_spin_alloc_lock_of_task_struct = 0;
261 void ldv_spin_lock_alloc_lock_of_task_struct();
270 void ldv_spin_unlock_alloc_lock_of_task_struct();
279 int ldv_spin_trylock_alloc_lock_of_task_struct();
305 void ldv_spin_unlock_wait_alloc_lock_of_task_struct();
312 int ldv_spin_is_locked_alloc_lock_of_task_struct();
333 int ldv_spin_can_lock_alloc_lock_of_task_struct();
340 int ldv_spin_is_contended_alloc_lock_of_task_struct();
361 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct();
383 int ldv_spin_command_lock_of_typhoon = 0;
430 void ldv_spin_unlock_wait_command_lock_of_typhoon();
437 int ldv_spin_is_locked_command_lock_of_typhoon();
458 int ldv_spin_can_lock_command_lock_of_typhoon();
465 int ldv_spin_is_contended_command_lock_of_typhoon();
486 int ldv_atomic_dec_and_lock_command_lock_of_typhoon();
508 int ldv_spin_i_lock_of_inode = 0;
511 void ldv_spin_lock_i_lock_of_inode();
520 void ldv_spin_unlock_i_lock_of_inode();
529 int ldv_spin_trylock_i_lock_of_inode();
555 void ldv_spin_unlock_wait_i_lock_of_inode();
562 int ldv_spin_is_locked_i_lock_of_inode();
583 int ldv_spin_can_lock_i_lock_of_inode();
590 int ldv_spin_is_contended_i_lock_of_inode();
611 int ldv_atomic_dec_and_lock_i_lock_of_inode();
633 int ldv_spin_lock = 0;
645 void ldv_spin_unlock_lock();
654 int ldv_spin_trylock_lock();
680 void ldv_spin_unlock_wait_lock();
687 int ldv_spin_is_locked_lock();
708 int ldv_spin_can_lock_lock();
715 int ldv_spin_is_contended_lock();
736 int ldv_atomic_dec_and_lock_lock();
758 int ldv_spin_lock_of_NOT_ARG_SIGN = 0;
770 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN();
779 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN();
805 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN();
812 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
833 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN();
840 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN();
861 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN();
883 int ldv_spin_lru_lock_of_netns_frags = 0;
886 void ldv_spin_lock_lru_lock_of_netns_frags();
895 void ldv_spin_unlock_lru_lock_of_netns_frags();
904 int ldv_spin_trylock_lru_lock_of_netns_frags();
930 void ldv_spin_unlock_wait_lru_lock_of_netns_frags();
937 int ldv_spin_is_locked_lru_lock_of_netns_frags();
958 int ldv_spin_can_lock_lru_lock_of_netns_frags();
965 int ldv_spin_is_contended_lru_lock_of_netns_frags();
986 int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags();
1008 int ldv_spin_node_size_lock_of_pglist_data = 0;
1020 void ldv_spin_unlock_node_size_lock_of_pglist_data();
1029 int ldv_spin_trylock_node_size_lock_of_pglist_data();
1055 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data();
1062 int ldv_spin_is_locked_node_size_lock_of_pglist_data();
1083 int ldv_spin_can_lock_node_size_lock_of_pglist_data();
1090 int ldv_spin_is_contended_node_size_lock_of_pglist_data();
1111 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data();
1133 int ldv_spin_ptl = 0;
1136 void ldv_spin_lock_ptl();
1145 void ldv_spin_unlock_ptl();
1154 int ldv_spin_trylock_ptl();
1180 void ldv_spin_unlock_wait_ptl();
1187 int ldv_spin_is_locked_ptl();
1208 int ldv_spin_can_lock_ptl();
1215 int ldv_spin_is_contended_ptl();
1236 int ldv_atomic_dec_and_lock_ptl();
1258 int ldv_spin_siglock_of_sighand_struct = 0;
1270 void ldv_spin_unlock_siglock_of_sighand_struct();
1279 int ldv_spin_trylock_siglock_of_sighand_struct();
1305 void ldv_spin_unlock_wait_siglock_of_sighand_struct();
1312 int ldv_spin_is_locked_siglock_of_sighand_struct();
1333 int ldv_spin_can_lock_siglock_of_sighand_struct();
1340 int ldv_spin_is_contended_siglock_of_sighand_struct();
1361 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct();
1383 int ldv_spin_tx_global_lock_of_net_device = 0;
1386 void ldv_spin_lock_tx_global_lock_of_net_device();
1395 void ldv_spin_unlock_tx_global_lock_of_net_device();
1404 int ldv_spin_trylock_tx_global_lock_of_net_device();
1430 void ldv_spin_unlock_wait_tx_global_lock_of_net_device();
1437 int ldv_spin_is_locked_tx_global_lock_of_net_device();
1458 int ldv_spin_can_lock_tx_global_lock_of_net_device();
1465 int ldv_spin_is_contended_tx_global_lock_of_net_device();
1486 int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device();
return ;
}
-entry_point
{
2705 struct net_device *var_group1;
2706 struct ethtool_cmd *var_group2;
2707 struct ethtool_drvinfo *var_group3;
2708 struct ethtool_wolinfo *var_group4;
2709 struct ethtool_ringparam *var_group5;
2710 int res_typhoon_open_47;
2711 int res_typhoon_close_48;
2712 struct sk_buff *var_group6;
2713 struct pci_dev *var_group7;
2714 const struct pci_device_id *var_typhoon_init_one_52_p1;
2715 int res_typhoon_init_one_52;
2716 pm_message_t var_typhoon_suspend_50_p1;
2717 int var_typhoon_interrupt_40_p0;
2718 void *var_typhoon_interrupt_40_p1;
2719 int ldv_s_typhoon_netdev_ops_net_device_ops;
2720 int ldv_s_typhoon_driver_pci_driver;
2721 int tmp;
2722 int tmp___0;
2723 int tmp___1;
3757 ldv_s_typhoon_netdev_ops_net_device_ops = 0;
3760 ldv_s_typhoon_driver_pci_driver = 0;
3686 LDV_IN_INTERRUPT = 1;
3695 -ldv_initialize()
{
1513 ldv_spin__xmit_lock_of_netdev_queue = 1;
1515 ldv_spin_addr_list_lock_of_net_device = 1;
1517 ldv_spin_alloc_lock_of_task_struct = 1;
1519 ldv_spin_command_lock_of_typhoon = 1;
1521 ldv_spin_i_lock_of_inode = 1;
1523 ldv_spin_lock = 1;
1525 ldv_spin_lock_of_NOT_ARG_SIGN = 1;
1527 ldv_spin_lru_lock_of_netns_frags = 1;
1529 ldv_spin_node_size_lock_of_pglist_data = 1;
1531 ldv_spin_ptl = 1;
1533 ldv_spin_siglock_of_sighand_struct = 1;
1535 ldv_spin_tx_global_lock_of_net_device = 1;
1536 return ;;
}
3752 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
3753 -typhoon_init()
{
2653 int tmp;
2653 tmp = __pci_register_driver(&typhoon_driver, &__this_module, "typhoon") { /* Function call is skipped due to function is undefined */}
2653 return tmp;;
}
3753 assume(!(tmp != 0));
3766 goto ldv_45041;
3766 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
3766 assume(tmp___1 != 0);
3770 goto ldv_45040;
3767 ldv_45040:;
3771 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
3771 switch (tmp___0)
3772 assume(!(tmp___0 == 0));
3843 assume(!(tmp___0 == 1));
3913 assume(!(tmp___0 == 2));
3983 assume(!(tmp___0 == 3));
4053 assume(!(tmp___0 == 4));
4123 assume(!(tmp___0 == 5));
4193 assume(!(tmp___0 == 6));
4266 assume(!(tmp___0 == 7));
4339 assume(!(tmp___0 == 8));
4409 assume(!(tmp___0 == 9));
4479 assume(!(tmp___0 == 10));
4549 assume(!(tmp___0 == 11));
4619 assume(!(tmp___0 == 12));
4692 assume(!(tmp___0 == 13));
4762 assume(tmp___0 == 14);
4818 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
4819 -typhoon_suspend(var_group7, var_typhoon_suspend_50_p1)
{
2255 struct net_device *dev;
2256 void *tmp;
2257 struct typhoon *tp;
2258 void *tmp___0;
2259 struct cmd_desc xp_cmd;
2260 bool tmp___1;
2261 int tmp___2;
2262 int tmp___3;
2263 int tmp___4;
2264 struct cmd_desc *_ptr;
2265 __u16 tmp___5;
2266 __u32 tmp___6;
2267 int tmp___7;
2268 struct cmd_desc *_ptr___0;
2269 int tmp___8;
2270 pci_power_t tmp___9;
2271 int tmp___10;
2255 -pci_get_drvdata(pdev)
{
1428 void *tmp;
1428 -dev_get_drvdata((const struct device *)(&(pdev->dev)))
{
839 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
839 return __CPAchecker_TMP_0;;
}
1428 return tmp;;
}
2255 dev = (struct net_device *)tmp;
2256 -netdev_priv((const struct net_device *)dev)
{
1689 return ((void *)dev) + 3264U;;
}
2256 tp = (struct typhoon *)tmp___0;
2261 -netif_running((const struct net_device *)dev)
{
2400 int tmp;
2400 -constant_test_bit(0L, (const volatile unsigned long *)(&(dev->state)))
{
310 return ((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1;;
}
2400 return tmp != 0;;
}
2261 assume(!(tmp___1 == 0));
2261 tmp___2 = 0;
2261 assume(tmp___2 == 0);
2265 int __CPAchecker_TMP_0 = (int)(tp->wol_events);
2265 assume((__CPAchecker_TMP_0 & 1) == 0);
2268 netif_device_detach(dev) { /* Function call is skipped due to function is undefined */}
2270 -typhoon_stop_runtime(tp, 1)
{
2061 struct typhoon_indexes *indexes;
2062 struct transmit_ring *txLo;
2063 void *ioaddr;
2064 struct cmd_desc xp_cmd;
2065 int i;
2066 struct cmd_desc *_ptr;
2067 struct cmd_desc *_ptr___0;
2068 size_t __len;
2069 void *__ret;
2070 struct cmd_desc *_ptr___1;
2071 int tmp;
2072 int tmp___0;
2061 indexes = tp->indexes;
2062 txLo = &(tp->txLoRing);
2063 ioaddr = tp->ioaddr;
2071 iowrite32(0U, ioaddr + 8UL) { /* Function call is skipped due to function is undefined */}
2073 _ptr = &xp_cmd;
2073 memset((void *)_ptr, 0, 16UL) { /* Function call is skipped due to function is undefined */}
2073 _ptr->flags = 130U;
2073 _ptr->cmd = 4U;
2074 -typhoon_issue_command(tp, 1, &xp_cmd, 0, (struct resp_desc *)0)
{
688 struct typhoon_indexes *indexes;
689 struct basic_ring *ring;
690 struct resp_desc local_resp;
691 int i;
692 int err;
693 int got_resp;
694 int freeCmd;
695 int freeResp;
696 int len;
697 int wrap_len;
698 long tmp;
699 size_t __len;
700 void *__ret;
701 struct cmd_desc *wrap_ptr;
702 size_t __len___0;
703 void *__ret___0;
704 long tmp___0;
705 long tmp___1;
688 indexes = tp->indexes;
689 ring = &(tp->cmdRing);
691 err = 0;
696 -ldv_spin_lock_55(&(tp->command_lock))
{
5487 -ldv_spin_lock_command_lock_of_typhoon()
{
389 assume(ldv_spin_command_lock_of_typhoon == 1);
391 ldv_spin_command_lock_of_typhoon = 2;
392 return ;;
}
5489 -spin_lock(lock)
{
303 _raw_spin_lock(&(lock->ldv_6306.rlock)) { /* Function call is skipped due to function is undefined */}
304 return ;;
}
5490 return ;;
}
698 -typhoon_num_free_cmd(tp)
{
662 int lastWrite;
663 int cmdCleared;
664 int tmp;
662 lastWrite = (int)(tp->cmdRing.lastWrite);
663 int __CPAchecker_TMP_0 = (int)(tp->indexes->cmdCleared);
663 cmdCleared = __CPAchecker_TMP_0;
665 -typhoon_num_free(lastWrite, cmdCleared, 16)
{
654 lastWrite = (int)(((unsigned long)lastWrite) / 16UL);
655 lastRead = (int)(((unsigned long)lastRead) / 16UL);
656 return (((ringSize + lastRead) - lastWrite) + -1) % ringSize;;
}
665 return tmp;;
}
699 -typhoon_num_free_resp(tp)
{
671 int respReady;
672 int respCleared;
673 int tmp;
671 int __CPAchecker_TMP_0 = (int)(tp->indexes->respReady);
671 respReady = __CPAchecker_TMP_0;
672 int __CPAchecker_TMP_1 = (int)(tp->indexes->respCleared);
672 respCleared = __CPAchecker_TMP_1;
674 -typhoon_num_free(respReady, respCleared, 32)
{
654 lastWrite = (int)(((unsigned long)lastWrite) / 16UL);
655 lastRead = (int)(((unsigned long)lastRead) / 16UL);
656 return (((ringSize + lastRead) - lastWrite) + -1) % ringSize;;
}
674 return tmp;;
}
701 assume(!(freeCmd < num_cmd));
701 assume(!(freeResp < num_resp));
708 int __CPAchecker_TMP_1 = (int)(cmd->flags);
708 assume((__CPAchecker_TMP_1 & 64) != 0);
712 tp->awaiting_resp = 1U;
713 assume(((unsigned long)resp) == 0UL);
714 resp = &local_resp;
715 num_resp = 1;
719 wrap_len = 0;
720 len = (int)(((unsigned int)num_cmd) * 16U);
721 -__builtin_expect(((ring->lastWrite) + ((u32 )len)) > 256U, 0L)
{
51 return exp;;
}
721 assume(!(tmp != 0L));
726 __len = (size_t )len;
726 void *__CPAchecker_TMP_2 = (void *)(ring->ringBase);
726 unsigned long __CPAchecker_TMP_3 = (unsigned long)(ring->lastWrite);
726 __ret = __builtin_memcpy(__CPAchecker_TMP_2 + __CPAchecker_TMP_3, (const void *)cmd, __len) { /* Function call is skipped due to function is undefined */}
727 -__builtin_expect(wrap_len != 0, 0L)
{
51 return exp;;
}
727 assume(!(tmp___0 != 0L));
733 -typhoon_inc_cmd_index(&(ring->lastWrite), num_cmd)
{
457 -typhoon_inc_index(index, count, 16)
{
450 *index = (*index) + (((u32 )((unsigned long)count)) * 16U);
451 *index = (u32 )(((unsigned long)(*index)) % (((unsigned long)num_entries) * 16UL));
452 return ;;
}
458 return ;;
}
737 Ignored inline assembler code
738 iowrite32(ring->lastWrite, (tp->ioaddr) + 40UL) { /* Function call is skipped due to function is undefined */}
739 -__builtin_expect(use_mmio != 0U, 1L)
{
51 return exp;;
}
739 assume(tmp___1 != 0L);
739 ioread32((tp->ioaddr) + 52UL) { /* Function call is skipped due to function is undefined */}
741 int __CPAchecker_TMP_5 = (int)(cmd->flags);
741 assume(!((__CPAchecker_TMP_5 & 64) == 0));
760 got_resp = 0;
761 i = 0;
761 goto ldv_44570;
761 assume(i <= 9999);
761 assume(got_resp == 0);
763 goto ldv_44569;
762 ldv_44569:;
762 unsigned int __CPAchecker_TMP_6 = (unsigned int)(indexes->respCleared);
762 unsigned int __CPAchecker_TMP_7 = (unsigned int)(indexes->respReady);
762 assume(__CPAchecker_TMP_6 != __CPAchecker_TMP_7);
763 -typhoon_process_response(tp, num_resp, resp)
{
592 struct typhoon_indexes *indexes;
593 struct resp_desc *resp;
594 u8 *base;
595 int count;
596 int len;
597 int wrap_len;
598 u32 cleared;
599 u32 ready;
600 long tmp;
601 size_t __len;
602 void *__ret;
603 size_t __len___0;
604 void *__ret___0;
605 long tmp___0;
593 indexes = tp->indexes;
595 base = tp->respRing.ringBase;
600 cleared = indexes->respCleared;
601 ready = indexes->respReady;
602 goto ldv_44524;
602 assume(cleared != ready);
604 goto ldv_44523;
603 ldv_44523:;
603 resp = ((struct resp_desc *)base) + ((unsigned long)cleared);
604 int __CPAchecker_TMP_0 = (int)(resp->numDesc);
604 count = __CPAchecker_TMP_0 + 1;
605 assume(((unsigned long)resp_save) != 0UL);
605 unsigned int __CPAchecker_TMP_1 = (unsigned int)(resp->seqNo);
605 assume(!(__CPAchecker_TMP_1 != 0U));
625 unsigned int __CPAchecker_TMP_2 = (unsigned int)(resp->cmd);
625 assume(!(__CPAchecker_TMP_2 == 27U));
627 unsigned int __CPAchecker_TMP_3 = (unsigned int)(resp->cmd);
627 assume(__CPAchecker_TMP_3 == 87U);
628 -typhoon_hello(tp)
{
571 struct basic_ring *ring;
572 struct cmd_desc *cmd;
573 struct cmd_desc *_ptr;
574 int tmp;
571 ring = &(tp->cmdRing);
578 -ldv_spin_trylock_53(&(tp->command_lock))
{
5467 ldv_func_ret_type___0 ldv_func_res;
5468 int tmp;
5469 int tmp___0;
5469 -spin_trylock(lock)
{
389 int tmp;
389 tmp = _raw_spin_trylock(&(lock->ldv_6306.rlock)) { /* Function call is skipped due to function is undefined */}
389 return tmp;;
}
5469 ldv_func_res = tmp;
5471 -ldv_spin_trylock_command_lock_of_typhoon()
{
406 int is_spin_held_by_another_thread;
409 assume(!(ldv_spin_command_lock_of_typhoon == 1));
409 -ldv_error()
{
15 LDV_ERROR:;
12 goto LDV_ERROR;
}
}
}
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_BITOPS_H 2 #define _ASM_X86_BITOPS_H 3 4 /* 5 * Copyright 1992, Linus Torvalds. 6 * 7 * Note: inlines with more than a single statement should be marked 8 * __always_inline to avoid problems with older gcc's inlining heuristics. 9 */ 10 11 #ifndef _LINUX_BITOPS_H 12 #error only <linux/bitops.h> can be included directly 13 #endif 14 15 #include <linux/compiler.h> 16 #include <asm/alternative.h> 17 #include <asm/rmwcc.h> 18 #include <asm/barrier.h> 19 20 #if BITS_PER_LONG == 32 21 # define _BITOPS_LONG_SHIFT 5 22 #elif BITS_PER_LONG == 64 23 # define _BITOPS_LONG_SHIFT 6 24 #else 25 # error "Unexpected BITS_PER_LONG" 26 #endif 27 28 #define BIT_64(n) (U64_C(1) << (n)) 29 30 /* 31 * These have to be done with inline assembly: that way the bit-setting 32 * is guaranteed to be atomic. All bit operations return 0 if the bit 33 * was cleared before the operation and != 0 if it was not. 34 * 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 36 */ 37 38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 39 /* Technically wrong, but this avoids compilation errors on some gcc 40 versions. */ 41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) 42 #else 43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) 44 #endif 45 46 #define ADDR BITOP_ADDR(addr) 47 48 /* 49 * We do the locked ops that don't return the old value as 50 * a mask operation on a byte. 51 */ 52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) 53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) 54 #define CONST_MASK(nr) (1 << ((nr) & 7)) 55 56 /** 57 * set_bit - Atomically set a bit in memory 58 * @nr: the bit to set 59 * @addr: the address to start counting from 60 * 61 * This function is atomic and may not be reordered. See __set_bit() 62 * if you do not require the atomic guarantees. 63 * 64 * Note: there are no guarantees that this function will not be reordered 65 * on non x86 architectures, so if you are writing portable code, 66 * make sure not to rely on its reordering guarantees. 67 * 68 * Note that @nr may be almost arbitrarily large; this function is not 69 * restricted to acting on a single-word quantity. 70 */ 71 static __always_inline void 72 set_bit(long nr, volatile unsigned long *addr) 73 { 74 if (IS_IMMEDIATE(nr)) { 75 asm volatile(LOCK_PREFIX "orb %1,%0" 76 : CONST_MASK_ADDR(nr, addr) 77 : "iq" ((u8)CONST_MASK(nr)) 78 : "memory"); 79 } else { 80 asm volatile(LOCK_PREFIX "bts %1,%0" 81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); 82 } 83 } 84 85 /** 86 * __set_bit - Set a bit in memory 87 * @nr: the bit to set 88 * @addr: the address to start counting from 89 * 90 * Unlike set_bit(), this function is non-atomic and may be reordered. 91 * If it's called on the same region of memory simultaneously, the effect 92 * may be that only one operation succeeds. 93 */ 94 static inline void __set_bit(long nr, volatile unsigned long *addr) 95 { 96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 97 } 98 99 /** 100 * clear_bit - Clears a bit in memory 101 * @nr: Bit to clear 102 * @addr: Address to start counting from 103 * 104 * clear_bit() is atomic and may not be reordered. However, it does 105 * not contain a memory barrier, so if it is used for locking purposes, 106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 107 * in order to ensure changes are visible on other processors. 108 */ 109 static __always_inline void 110 clear_bit(long nr, volatile unsigned long *addr) 111 { 112 if (IS_IMMEDIATE(nr)) { 113 asm volatile(LOCK_PREFIX "andb %1,%0" 114 : CONST_MASK_ADDR(nr, addr) 115 : "iq" ((u8)~CONST_MASK(nr))); 116 } else { 117 asm volatile(LOCK_PREFIX "btr %1,%0" 118 : BITOP_ADDR(addr) 119 : "Ir" (nr)); 120 } 121 } 122 123 /* 124 * clear_bit_unlock - Clears a bit in memory 125 * @nr: Bit to clear 126 * @addr: Address to start counting from 127 * 128 * clear_bit() is atomic and implies release semantics before the memory 129 * operation. It can be used for an unlock. 130 */ 131 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) 132 { 133 barrier(); 134 clear_bit(nr, addr); 135 } 136 137 static inline void __clear_bit(long nr, volatile unsigned long *addr) 138 { 139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 140 } 141 142 /* 143 * __clear_bit_unlock - Clears a bit in memory 144 * @nr: Bit to clear 145 * @addr: Address to start counting from 146 * 147 * __clear_bit() is non-atomic and implies release semantics before the memory 148 * operation. It can be used for an unlock if no other CPUs can concurrently 149 * modify other bits in the word. 150 * 151 * No memory barrier is required here, because x86 cannot reorder stores past 152 * older loads. Same principle as spin_unlock. 153 */ 154 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) 155 { 156 barrier(); 157 __clear_bit(nr, addr); 158 } 159 160 /** 161 * __change_bit - Toggle a bit in memory 162 * @nr: the bit to change 163 * @addr: the address to start counting from 164 * 165 * Unlike change_bit(), this function is non-atomic and may be reordered. 166 * If it's called on the same region of memory simultaneously, the effect 167 * may be that only one operation succeeds. 168 */ 169 static inline void __change_bit(long nr, volatile unsigned long *addr) 170 { 171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 172 } 173 174 /** 175 * change_bit - Toggle a bit in memory 176 * @nr: Bit to change 177 * @addr: Address to start counting from 178 * 179 * change_bit() is atomic and may not be reordered. 180 * Note that @nr may be almost arbitrarily large; this function is not 181 * restricted to acting on a single-word quantity. 182 */ 183 static inline void change_bit(long nr, volatile unsigned long *addr) 184 { 185 if (IS_IMMEDIATE(nr)) { 186 asm volatile(LOCK_PREFIX "xorb %1,%0" 187 : CONST_MASK_ADDR(nr, addr) 188 : "iq" ((u8)CONST_MASK(nr))); 189 } else { 190 asm volatile(LOCK_PREFIX "btc %1,%0" 191 : BITOP_ADDR(addr) 192 : "Ir" (nr)); 193 } 194 } 195 196 /** 197 * test_and_set_bit - Set a bit and return its old value 198 * @nr: Bit to set 199 * @addr: Address to count from 200 * 201 * This operation is atomic and cannot be reordered. 202 * It also implies a memory barrier. 203 */ 204 static inline int test_and_set_bit(long nr, volatile unsigned long *addr) 205 { 206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); 207 } 208 209 /** 210 * test_and_set_bit_lock - Set a bit and return its old value for lock 211 * @nr: Bit to set 212 * @addr: Address to count from 213 * 214 * This is the same as test_and_set_bit on x86. 215 */ 216 static __always_inline int 217 test_and_set_bit_lock(long nr, volatile unsigned long *addr) 218 { 219 return test_and_set_bit(nr, addr); 220 } 221 222 /** 223 * __test_and_set_bit - Set a bit and return its old value 224 * @nr: Bit to set 225 * @addr: Address to count from 226 * 227 * This operation is non-atomic and can be reordered. 228 * If two examples of this operation race, one can appear to succeed 229 * but actually fail. You must protect multiple accesses with a lock. 230 */ 231 static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) 232 { 233 int oldbit; 234 235 asm("bts %2,%1\n\t" 236 "sbb %0,%0" 237 : "=r" (oldbit), ADDR 238 : "Ir" (nr)); 239 return oldbit; 240 } 241 242 /** 243 * test_and_clear_bit - Clear a bit and return its old value 244 * @nr: Bit to clear 245 * @addr: Address to count from 246 * 247 * This operation is atomic and cannot be reordered. 248 * It also implies a memory barrier. 249 */ 250 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) 251 { 252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); 253 } 254 255 /** 256 * __test_and_clear_bit - Clear a bit and return its old value 257 * @nr: Bit to clear 258 * @addr: Address to count from 259 * 260 * This operation is non-atomic and can be reordered. 261 * If two examples of this operation race, one can appear to succeed 262 * but actually fail. You must protect multiple accesses with a lock. 263 * 264 * Note: the operation is performed atomically with respect to 265 * the local CPU, but not other CPUs. Portable code should not 266 * rely on this behaviour. 267 * KVM relies on this behaviour on x86 for modifying memory that is also 268 * accessed from a hypervisor on the same CPU if running in a VM: don't change 269 * this without also updating arch/x86/kernel/kvm.c 270 */ 271 static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) 272 { 273 int oldbit; 274 275 asm volatile("btr %2,%1\n\t" 276 "sbb %0,%0" 277 : "=r" (oldbit), ADDR 278 : "Ir" (nr)); 279 return oldbit; 280 } 281 282 /* WARNING: non atomic and it can be reordered! */ 283 static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) 284 { 285 int oldbit; 286 287 asm volatile("btc %2,%1\n\t" 288 "sbb %0,%0" 289 : "=r" (oldbit), ADDR 290 : "Ir" (nr) : "memory"); 291 292 return oldbit; 293 } 294 295 /** 296 * test_and_change_bit - Change a bit and return its old value 297 * @nr: Bit to change 298 * @addr: Address to count from 299 * 300 * This operation is atomic and cannot be reordered. 301 * It also implies a memory barrier. 302 */ 303 static inline int test_and_change_bit(long nr, volatile unsigned long *addr) 304 { 305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); 306 } 307 308 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) 309 { 310 return ((1UL << (nr & (BITS_PER_LONG-1))) & 311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 312 } 313 314 static inline int variable_test_bit(long nr, volatile const unsigned long *addr) 315 { 316 int oldbit; 317 318 asm volatile("bt %2,%1\n\t" 319 "sbb %0,%0" 320 : "=r" (oldbit) 321 : "m" (*(unsigned long *)addr), "Ir" (nr)); 322 323 return oldbit; 324 } 325 326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */ 327 /** 328 * test_bit - Determine whether a bit is set 329 * @nr: bit number to test 330 * @addr: Address to start counting from 331 */ 332 static int test_bit(int nr, const volatile unsigned long *addr); 333 #endif 334 335 #define test_bit(nr, addr) \ 336 (__builtin_constant_p((nr)) \ 337 ? constant_test_bit((nr), (addr)) \ 338 : variable_test_bit((nr), (addr))) 339 340 /** 341 * __ffs - find first set bit in word 342 * @word: The word to search 343 * 344 * Undefined if no bit exists, so code should check against 0 first. 345 */ 346 static inline unsigned long __ffs(unsigned long word) 347 { 348 asm("rep; bsf %1,%0" 349 : "=r" (word) 350 : "rm" (word)); 351 return word; 352 } 353 354 /** 355 * ffz - find first zero bit in word 356 * @word: The word to search 357 * 358 * Undefined if no zero exists, so code should check against ~0UL first. 359 */ 360 static inline unsigned long ffz(unsigned long word) 361 { 362 asm("rep; bsf %1,%0" 363 : "=r" (word) 364 : "r" (~word)); 365 return word; 366 } 367 368 /* 369 * __fls: find last set bit in word 370 * @word: The word to search 371 * 372 * Undefined if no set bit exists, so code should check against 0 first. 373 */ 374 static inline unsigned long __fls(unsigned long word) 375 { 376 asm("bsr %1,%0" 377 : "=r" (word) 378 : "rm" (word)); 379 return word; 380 } 381 382 #undef ADDR 383 384 #ifdef __KERNEL__ 385 /** 386 * ffs - find first set bit in word 387 * @x: the word to search 388 * 389 * This is defined the same way as the libc and compiler builtin ffs 390 * routines, therefore differs in spirit from the other bitops. 391 * 392 * ffs(value) returns 0 if value is 0 or the position of the first 393 * set bit if value is nonzero. The first (least significant) bit 394 * is at position 1. 395 */ 396 static inline int ffs(int x) 397 { 398 int r; 399 400 #ifdef CONFIG_X86_64 401 /* 402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the 403 * dest reg is undefined if x==0, but their CPU architect says its 404 * value is written to set it to the same as before, except that the 405 * top 32 bits will be cleared. 406 * 407 * We cannot do this on 32 bits because at the very least some 408 * 486 CPUs did not behave this way. 409 */ 410 asm("bsfl %1,%0" 411 : "=r" (r) 412 : "rm" (x), "0" (-1)); 413 #elif defined(CONFIG_X86_CMOV) 414 asm("bsfl %1,%0\n\t" 415 "cmovzl %2,%0" 416 : "=&r" (r) : "rm" (x), "r" (-1)); 417 #else 418 asm("bsfl %1,%0\n\t" 419 "jnz 1f\n\t" 420 "movl $-1,%0\n" 421 "1:" : "=r" (r) : "rm" (x)); 422 #endif 423 return r + 1; 424 } 425 426 /** 427 * fls - find last set bit in word 428 * @x: the word to search 429 * 430 * This is defined in a similar way as the libc and compiler builtin 431 * ffs, but returns the position of the most significant set bit. 432 * 433 * fls(value) returns 0 if value is 0 or the position of the last 434 * set bit if value is nonzero. The last (most significant) bit is 435 * at position 32. 436 */ 437 static inline int fls(int x) 438 { 439 int r; 440 441 #ifdef CONFIG_X86_64 442 /* 443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the 444 * dest reg is undefined if x==0, but their CPU architect says its 445 * value is written to set it to the same as before, except that the 446 * top 32 bits will be cleared. 447 * 448 * We cannot do this on 32 bits because at the very least some 449 * 486 CPUs did not behave this way. 450 */ 451 asm("bsrl %1,%0" 452 : "=r" (r) 453 : "rm" (x), "0" (-1)); 454 #elif defined(CONFIG_X86_CMOV) 455 asm("bsrl %1,%0\n\t" 456 "cmovzl %2,%0" 457 : "=&r" (r) : "rm" (x), "rm" (-1)); 458 #else 459 asm("bsrl %1,%0\n\t" 460 "jnz 1f\n\t" 461 "movl $-1,%0\n" 462 "1:" : "=r" (r) : "rm" (x)); 463 #endif 464 return r + 1; 465 } 466 467 /** 468 * fls64 - find last set bit in a 64-bit word 469 * @x: the word to search 470 * 471 * This is defined in a similar way as the libc and compiler builtin 472 * ffsll, but returns the position of the most significant set bit. 473 * 474 * fls64(value) returns 0 if value is 0 or the position of the last 475 * set bit if value is nonzero. The last (most significant) bit is 476 * at position 64. 477 */ 478 #ifdef CONFIG_X86_64 479 static __always_inline int fls64(__u64 x) 480 { 481 int bitpos = -1; 482 /* 483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the 484 * dest reg is undefined if x==0, but their CPU architect says its 485 * value is written to set it to the same as before. 486 */ 487 asm("bsrq %1,%q0" 488 : "+r" (bitpos) 489 : "rm" (x)); 490 return bitpos + 1; 491 } 492 #else 493 #include <asm-generic/bitops/fls64.h> 494 #endif 495 496 #include <asm-generic/bitops/find.h> 497 498 #include <asm-generic/bitops/sched.h> 499 500 #define ARCH_HAS_FAST_MULTIPLIER 1 501 502 #include <asm/arch_hweight.h> 503 504 #include <asm-generic/bitops/const_hweight.h> 505 506 #include <asm-generic/bitops/le.h> 507 508 #include <asm-generic/bitops/ext2-atomic-setbit.h> 509 510 #endif /* __KERNEL__ */ 511 #endif /* _ASM_X86_BITOPS_H */
1 2 #include <linux/kernel.h> 3 #include <linux/spinlock.h> 4 5 extern void __ldv_spin_lock(spinlock_t *lock); 6 extern void __ldv_spin_unlock(spinlock_t *lock); 7 extern int __ldv_spin_trylock(spinlock_t *lock); 8 extern void __ldv_spin_unlock_wait(spinlock_t *lock); 9 extern void __ldv_spin_can_lock(spinlock_t *lock); 10 extern int __ldv_atomic_dec_and_lock(spinlock_t *lock); 11 12 extern void ldv_spin_lock__xmit_lock_of_netdev_queue(void); 13 extern void ldv_spin_unlock__xmit_lock_of_netdev_queue(void); 14 extern int ldv_spin_trylock__xmit_lock_of_netdev_queue(void); 15 extern void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void); 16 extern int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void); 17 extern int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void); 18 extern int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void); 19 extern int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void); 20 extern void ldv_spin_lock_addr_list_lock_of_net_device(void); 21 extern void ldv_spin_unlock_addr_list_lock_of_net_device(void); 22 extern int ldv_spin_trylock_addr_list_lock_of_net_device(void); 23 extern void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void); 24 extern int ldv_spin_is_locked_addr_list_lock_of_net_device(void); 25 extern int ldv_spin_can_lock_addr_list_lock_of_net_device(void); 26 extern int ldv_spin_is_contended_addr_list_lock_of_net_device(void); 27 extern int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void); 28 extern void ldv_spin_lock_alloc_lock_of_task_struct(void); 29 extern void ldv_spin_unlock_alloc_lock_of_task_struct(void); 30 extern int ldv_spin_trylock_alloc_lock_of_task_struct(void); 31 extern void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void); 32 extern int ldv_spin_is_locked_alloc_lock_of_task_struct(void); 33 extern int ldv_spin_can_lock_alloc_lock_of_task_struct(void); 34 extern int ldv_spin_is_contended_alloc_lock_of_task_struct(void); 35 extern int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void); 36 extern void ldv_spin_lock_command_lock_of_typhoon(void); 37 extern void ldv_spin_unlock_command_lock_of_typhoon(void); 38 extern int ldv_spin_trylock_command_lock_of_typhoon(void); 39 extern void ldv_spin_unlock_wait_command_lock_of_typhoon(void); 40 extern int ldv_spin_is_locked_command_lock_of_typhoon(void); 41 extern int ldv_spin_can_lock_command_lock_of_typhoon(void); 42 extern int ldv_spin_is_contended_command_lock_of_typhoon(void); 43 extern int ldv_atomic_dec_and_lock_command_lock_of_typhoon(void); 44 extern void ldv_spin_lock_i_lock_of_inode(void); 45 extern void ldv_spin_unlock_i_lock_of_inode(void); 46 extern int ldv_spin_trylock_i_lock_of_inode(void); 47 extern void ldv_spin_unlock_wait_i_lock_of_inode(void); 48 extern int ldv_spin_is_locked_i_lock_of_inode(void); 49 extern int ldv_spin_can_lock_i_lock_of_inode(void); 50 extern int ldv_spin_is_contended_i_lock_of_inode(void); 51 extern int ldv_atomic_dec_and_lock_i_lock_of_inode(void); 52 extern void ldv_spin_lock_lock(void); 53 extern void ldv_spin_unlock_lock(void); 54 extern int ldv_spin_trylock_lock(void); 55 extern void ldv_spin_unlock_wait_lock(void); 56 extern int ldv_spin_is_locked_lock(void); 57 extern int ldv_spin_can_lock_lock(void); 58 extern int ldv_spin_is_contended_lock(void); 59 extern int ldv_atomic_dec_and_lock_lock(void); 60 extern void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void); 61 extern void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void); 62 extern int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void); 63 extern void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void); 64 extern int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void); 65 extern int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void); 66 extern int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void); 67 extern int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void); 68 extern void ldv_spin_lock_lru_lock_of_netns_frags(void); 69 extern void ldv_spin_unlock_lru_lock_of_netns_frags(void); 70 extern int ldv_spin_trylock_lru_lock_of_netns_frags(void); 71 extern void ldv_spin_unlock_wait_lru_lock_of_netns_frags(void); 72 extern int ldv_spin_is_locked_lru_lock_of_netns_frags(void); 73 extern int ldv_spin_can_lock_lru_lock_of_netns_frags(void); 74 extern int ldv_spin_is_contended_lru_lock_of_netns_frags(void); 75 extern int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(void); 76 extern void ldv_spin_lock_node_size_lock_of_pglist_data(void); 77 extern void ldv_spin_unlock_node_size_lock_of_pglist_data(void); 78 extern int ldv_spin_trylock_node_size_lock_of_pglist_data(void); 79 extern void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void); 80 extern int ldv_spin_is_locked_node_size_lock_of_pglist_data(void); 81 extern int ldv_spin_can_lock_node_size_lock_of_pglist_data(void); 82 extern int ldv_spin_is_contended_node_size_lock_of_pglist_data(void); 83 extern int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void); 84 extern void ldv_spin_lock_ptl(void); 85 extern void ldv_spin_unlock_ptl(void); 86 extern int ldv_spin_trylock_ptl(void); 87 extern void ldv_spin_unlock_wait_ptl(void); 88 extern int ldv_spin_is_locked_ptl(void); 89 extern int ldv_spin_can_lock_ptl(void); 90 extern int ldv_spin_is_contended_ptl(void); 91 extern int ldv_atomic_dec_and_lock_ptl(void); 92 extern void ldv_spin_lock_siglock_of_sighand_struct(void); 93 extern void ldv_spin_unlock_siglock_of_sighand_struct(void); 94 extern int ldv_spin_trylock_siglock_of_sighand_struct(void); 95 extern void ldv_spin_unlock_wait_siglock_of_sighand_struct(void); 96 extern int ldv_spin_is_locked_siglock_of_sighand_struct(void); 97 extern int ldv_spin_can_lock_siglock_of_sighand_struct(void); 98 extern int ldv_spin_is_contended_siglock_of_sighand_struct(void); 99 extern int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void); 100 extern void ldv_spin_lock_tx_global_lock_of_net_device(void); 101 extern void ldv_spin_unlock_tx_global_lock_of_net_device(void); 102 extern int ldv_spin_trylock_tx_global_lock_of_net_device(void); 103 extern void ldv_spin_unlock_wait_tx_global_lock_of_net_device(void); 104 extern int ldv_spin_is_locked_tx_global_lock_of_net_device(void); 105 extern int ldv_spin_can_lock_tx_global_lock_of_net_device(void); 106 extern int ldv_spin_is_contended_tx_global_lock_of_net_device(void); 107 extern int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(void); 108 109 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */ 110 /* 111 Written 2002-2004 by David Dillow <dave@thedillows.org> 112 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and 113 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>. 114 115 This software may be used and distributed according to the terms of 116 the GNU General Public License (GPL), incorporated herein by reference. 117 Drivers based on or derived from this code fall under the GPL and must 118 retain the authorship, copyright and license notice. This file is not 119 a complete program and may only be used when the entire operating 120 system is licensed under the GPL. 121 122 This software is available on a public web site. It may enable 123 cryptographic capabilities of the 3Com hardware, and may be 124 exported from the United States under License Exception "TSU" 125 pursuant to 15 C.F.R. Section 740.13(e). 126 127 This work was funded by the National Library of Medicine under 128 the Department of Energy project number 0274DD06D1 and NLM project 129 number Y1-LM-2015-01. 130 131 This driver is designed for the 3Com 3CR990 Family of cards with the 132 3XP Processor. It has been tested on x86 and sparc64. 133 134 KNOWN ISSUES: 135 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware 136 issue. Hopefully 3Com will fix it. 137 *) Waiting for a command response takes 8ms due to non-preemptable 138 polling. Only significant for getting stats and creating 139 SAs, but an ugly wart never the less. 140 141 TODO: 142 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming. 143 *) Add more support for ethtool (especially for NIC stats) 144 *) Allow disabling of RX checksum offloading 145 *) Fix MAC changing to work while the interface is up 146 (Need to put commands on the TX ring, which changes 147 the locking) 148 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See 149 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org 150 */ 151 152 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 153 * Setting to > 1518 effectively disables this feature. 154 */ 155 static int rx_copybreak = 200; 156 157 /* Should we use MMIO or Port IO? 158 * 0: Port IO 159 * 1: MMIO 160 * 2: Try MMIO, fallback to Port IO 161 */ 162 static unsigned int use_mmio = 2; 163 164 /* end user-configurable values */ 165 166 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 167 */ 168 static const int multicast_filter_limit = 32; 169 170 /* Operational parameters that are set at compile time. */ 171 172 /* Keep the ring sizes a power of two for compile efficiency. 173 * The compiler will convert <unsigned>'%'<2^N> into a bit mask. 174 * Making the Tx ring too large decreases the effectiveness of channel 175 * bonding and packet priority. 176 * There are no ill effects from too-large receive rings. 177 * 178 * We don't currently use the Hi Tx ring so, don't make it very big. 179 * 180 * Beware that if we start using the Hi Tx ring, we will need to change 181 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that. 182 */ 183 #define TXHI_ENTRIES 2 184 #define TXLO_ENTRIES 128 185 #define RX_ENTRIES 32 186 #define COMMAND_ENTRIES 16 187 #define RESPONSE_ENTRIES 32 188 189 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 190 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 191 192 /* The 3XP will preload and remove 64 entries from the free buffer 193 * list, and we need one entry to keep the ring from wrapping, so 194 * to keep this a power of two, we use 128 entries. 195 */ 196 #define RXFREE_ENTRIES 128 197 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 198 199 /* Operational parameters that usually are not changed. */ 200 201 /* Time in jiffies before concluding the transmitter is hung. */ 202 #define TX_TIMEOUT (2*HZ) 203 204 #define PKT_BUF_SZ 1536 205 #define FIRMWARE_NAME "3com/typhoon.bin" 206 207 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 208 209 #include <linux/module.h> 210 #include <linux/kernel.h> 211 #include <linux/sched.h> 212 #include <linux/string.h> 213 #include <linux/timer.h> 214 #include <linux/errno.h> 215 #include <linux/ioport.h> 216 #include <linux/interrupt.h> 217 #include <linux/pci.h> 218 #include <linux/netdevice.h> 219 #include <linux/etherdevice.h> 220 #include <linux/skbuff.h> 221 #include <linux/mm.h> 222 #include <linux/init.h> 223 #include <linux/delay.h> 224 #include <linux/ethtool.h> 225 #include <linux/if_vlan.h> 226 #include <linux/crc32.h> 227 #include <linux/bitops.h> 228 #include <asm/processor.h> 229 #include <asm/io.h> 230 #include <asm/uaccess.h> 231 #include <linux/in6.h> 232 #include <linux/dma-mapping.h> 233 #include <linux/firmware.h> 234 235 #include "typhoon.h" 236 237 MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 238 MODULE_VERSION("1.0"); 239 MODULE_LICENSE("GPL"); 240 MODULE_FIRMWARE(FIRMWARE_NAME); 241 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); 242 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and " 243 "the buffer given back to the NIC. Default " 244 "is 200."); 245 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. " 246 "Default is to try MMIO and fallback to PIO."); 247 module_param(rx_copybreak, int, 0); 248 module_param(use_mmio, int, 0); 249 250 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 251 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 252 #undef NETIF_F_TSO 253 #endif 254 255 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 256 #error TX ring too small! 257 #endif 258 259 struct typhoon_card_info { 260 const char *name; 261 const int capabilities; 262 }; 263 264 #define TYPHOON_CRYPTO_NONE 0x00 265 #define TYPHOON_CRYPTO_DES 0x01 266 #define TYPHOON_CRYPTO_3DES 0x02 267 #define TYPHOON_CRYPTO_VARIABLE 0x04 268 #define TYPHOON_FIBER 0x08 269 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 270 271 enum typhoon_cards { 272 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR, 273 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR, 274 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR, 275 TYPHOON_FXM, 276 }; 277 278 /* directly indexed by enum typhoon_cards, above */ 279 static struct typhoon_card_info typhoon_card_info[] = { 280 { "3Com Typhoon (3C990-TX)", 281 TYPHOON_CRYPTO_NONE}, 282 { "3Com Typhoon (3CR990-TX-95)", 283 TYPHOON_CRYPTO_DES}, 284 { "3Com Typhoon (3CR990-TX-97)", 285 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, 286 { "3Com Typhoon (3C990SVR)", 287 TYPHOON_CRYPTO_NONE}, 288 { "3Com Typhoon (3CR990SVR95)", 289 TYPHOON_CRYPTO_DES}, 290 { "3Com Typhoon (3CR990SVR97)", 291 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, 292 { "3Com Typhoon2 (3C990B-TX-M)", 293 TYPHOON_CRYPTO_VARIABLE}, 294 { "3Com Typhoon2 (3C990BSVR)", 295 TYPHOON_CRYPTO_VARIABLE}, 296 { "3Com Typhoon (3CR990-FX-95)", 297 TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, 298 { "3Com Typhoon (3CR990-FX-97)", 299 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, 300 { "3Com Typhoon (3CR990-FX-95 Server)", 301 TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, 302 { "3Com Typhoon (3CR990-FX-97 Server)", 303 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, 304 { "3Com Typhoon2 (3C990B-FX-97)", 305 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER}, 306 }; 307 308 /* Notes on the new subsystem numbering scheme: 309 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES 310 * bit 4 indicates if this card has secured firmware (we don't support it) 311 * bit 8 indicates if this is a (0) copper or (1) fiber card 312 * bits 12-16 indicate card type: (0) client and (1) server 313 */ 314 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = { 315 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, 316 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, 317 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, 318 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 }, 319 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97, 320 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 }, 321 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, 322 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM }, 323 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, 324 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM }, 325 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, 326 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR }, 327 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, 328 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 }, 329 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, 330 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 }, 331 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, 332 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR }, 333 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, 334 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR }, 335 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95, 336 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 }, 337 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97, 338 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 }, 339 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR, 340 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR }, 341 { 0, } 342 }; 343 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl); 344 345 /* Define the shared memory area 346 * Align everything the 3XP will normally be using. 347 * We'll need to move/align txHi if we start using that ring. 348 */ 349 #define __3xp_aligned ____cacheline_aligned 350 struct typhoon_shared { 351 struct typhoon_interface iface; 352 struct typhoon_indexes indexes __3xp_aligned; 353 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned; 354 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned; 355 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned; 356 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned; 357 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned; 358 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; 359 u32 zeroWord; 360 struct tx_desc txHi[TXHI_ENTRIES]; 361 } __packed; 362 363 struct rxbuff_ent { 364 struct sk_buff *skb; 365 dma_addr_t dma_addr; 366 }; 367 368 struct typhoon { 369 /* Tx cache line section */ 370 struct transmit_ring txLoRing ____cacheline_aligned; 371 struct pci_dev * tx_pdev; 372 void __iomem *tx_ioaddr; 373 u32 txlo_dma_addr; 374 375 /* Irq/Rx cache line section */ 376 void __iomem *ioaddr ____cacheline_aligned; 377 struct typhoon_indexes *indexes; 378 u8 awaiting_resp; 379 u8 duplex; 380 u8 speed; 381 u8 card_state; 382 struct basic_ring rxLoRing; 383 struct pci_dev * pdev; 384 struct net_device * dev; 385 struct napi_struct napi; 386 struct basic_ring rxHiRing; 387 struct basic_ring rxBuffRing; 388 struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; 389 390 /* general section */ 391 spinlock_t command_lock ____cacheline_aligned; 392 struct basic_ring cmdRing; 393 struct basic_ring respRing; 394 struct net_device_stats stats; 395 struct net_device_stats stats_saved; 396 struct typhoon_shared * shared; 397 dma_addr_t shared_dma; 398 __le16 xcvr_select; 399 __le16 wol_events; 400 __le32 offload; 401 402 /* unused stuff (future use) */ 403 int capabilities; 404 struct transmit_ring txHiRing; 405 }; 406 407 enum completion_wait_values { 408 NoWait = 0, WaitNoSleep, WaitSleep, 409 }; 410 411 /* These are the values for the typhoon.card_state variable. 412 * These determine where the statistics will come from in get_stats(). 413 * The sleep image does not support the statistics we need. 414 */ 415 enum state_values { 416 Sleeping = 0, Running, 417 }; 418 419 /* PCI writes are not guaranteed to be posted in order, but outstanding writes 420 * cannot pass a read, so this forces current writes to post. 421 */ 422 #define typhoon_post_pci_writes(x) \ 423 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 424 425 /* We'll wait up to six seconds for a reset, and half a second normally. 426 */ 427 #define TYPHOON_UDELAY 50 428 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 429 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 430 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 431 432 #if defined(NETIF_F_TSO) 433 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 434 #define TSO_NUM_DESCRIPTORS 2 435 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 436 #else 437 #define NETIF_F_TSO 0 438 #define skb_tso_size(x) 0 439 #define TSO_NUM_DESCRIPTORS 0 440 #define TSO_OFFLOAD_ON 0 441 #endif 442 443 static inline void 444 typhoon_inc_index(u32 *index, const int count, const int num_entries) 445 { 446 /* Increment a ring index -- we can use this for all rings execept 447 * the Rx rings, as they use different size descriptors 448 * otherwise, everything is the same size as a cmd_desc 449 */ 450 *index += count * sizeof(struct cmd_desc); 451 *index %= num_entries * sizeof(struct cmd_desc); 452 } 453 454 static inline void 455 typhoon_inc_cmd_index(u32 *index, const int count) 456 { 457 typhoon_inc_index(index, count, COMMAND_ENTRIES); 458 } 459 460 static inline void 461 typhoon_inc_resp_index(u32 *index, const int count) 462 { 463 typhoon_inc_index(index, count, RESPONSE_ENTRIES); 464 } 465 466 static inline void 467 typhoon_inc_rxfree_index(u32 *index, const int count) 468 { 469 typhoon_inc_index(index, count, RXFREE_ENTRIES); 470 } 471 472 static inline void 473 typhoon_inc_tx_index(u32 *index, const int count) 474 { 475 /* if we start using the Hi Tx ring, this needs updating */ 476 typhoon_inc_index(index, count, TXLO_ENTRIES); 477 } 478 479 static inline void 480 typhoon_inc_rx_index(u32 *index, const int count) 481 { 482 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */ 483 *index += count * sizeof(struct rx_desc); 484 *index %= RX_ENTRIES * sizeof(struct rx_desc); 485 } 486 487 static int 488 typhoon_reset(void __iomem *ioaddr, int wait_type) 489 { 490 int i, err = 0; 491 int timeout; 492 493 if(wait_type == WaitNoSleep) 494 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP; 495 else 496 timeout = TYPHOON_RESET_TIMEOUT_SLEEP; 497 498 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 499 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); 500 501 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET); 502 typhoon_post_pci_writes(ioaddr); 503 udelay(1); 504 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET); 505 506 if(wait_type != NoWait) { 507 for(i = 0; i < timeout; i++) { 508 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == 509 TYPHOON_STATUS_WAITING_FOR_HOST) 510 goto out; 511 512 if(wait_type == WaitSleep) 513 schedule_timeout_uninterruptible(1); 514 else 515 udelay(TYPHOON_UDELAY); 516 } 517 518 err = -ETIMEDOUT; 519 } 520 521 out: 522 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 523 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); 524 525 /* The 3XP seems to need a little extra time to complete the load 526 * of the sleep image before we can reliably boot it. Failure to 527 * do this occasionally results in a hung adapter after boot in 528 * typhoon_init_one() while trying to read the MAC address or 529 * putting the card to sleep. 3Com's driver waits 5ms, but 530 * that seems to be overkill. However, if we can sleep, we might 531 * as well give it that much time. Otherwise, we'll give it 500us, 532 * which should be enough (I've see it work well at 100us, but still 533 * saw occasional problems.) 534 */ 535 if(wait_type == WaitSleep) 536 msleep(5); 537 else 538 udelay(500); 539 return err; 540 } 541 542 static int 543 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value) 544 { 545 int i, err = 0; 546 547 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { 548 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value) 549 goto out; 550 udelay(TYPHOON_UDELAY); 551 } 552 553 err = -ETIMEDOUT; 554 555 out: 556 return err; 557 } 558 559 static inline void 560 typhoon_media_status(struct net_device *dev, struct resp_desc *resp) 561 { 562 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK) 563 netif_carrier_off(dev); 564 else 565 netif_carrier_on(dev); 566 } 567 568 static inline void 569 typhoon_hello(struct typhoon *tp) 570 { 571 struct basic_ring *ring = &tp->cmdRing; 572 struct cmd_desc *cmd; 573 574 /* We only get a hello request if we've not sent anything to the 575 * card in a long while. If the lock is held, then we're in the 576 * process of issuing a command, so we don't need to respond. 577 */ 578 if(spin_trylock(&tp->command_lock)) { 579 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite); 580 typhoon_inc_cmd_index(&ring->lastWrite, 1); 581 582 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP); 583 wmb(); 584 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); 585 spin_unlock(&tp->command_lock); 586 } 587 } 588 589 static int 590 typhoon_process_response(struct typhoon *tp, int resp_size, 591 struct resp_desc *resp_save) 592 { 593 struct typhoon_indexes *indexes = tp->indexes; 594 struct resp_desc *resp; 595 u8 *base = tp->respRing.ringBase; 596 int count, len, wrap_len; 597 u32 cleared; 598 u32 ready; 599 600 cleared = le32_to_cpu(indexes->respCleared); 601 ready = le32_to_cpu(indexes->respReady); 602 while(cleared != ready) { 603 resp = (struct resp_desc *)(base + cleared); 604 count = resp->numDesc + 1; 605 if(resp_save && resp->seqNo) { 606 if(count > resp_size) { 607 resp_save->flags = TYPHOON_RESP_ERROR; 608 goto cleanup; 609 } 610 611 wrap_len = 0; 612 len = count * sizeof(*resp); 613 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) { 614 wrap_len = cleared + len - RESPONSE_RING_SIZE; 615 len = RESPONSE_RING_SIZE - cleared; 616 } 617 618 memcpy(resp_save, resp, len); 619 if(unlikely(wrap_len)) { 620 resp_save += len / sizeof(*resp); 621 memcpy(resp_save, base, wrap_len); 622 } 623 624 resp_save = NULL; 625 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) { 626 typhoon_media_status(tp->dev, resp); 627 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) { 628 typhoon_hello(tp); 629 } else { 630 netdev_err(tp->dev, 631 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n", 632 le16_to_cpu(resp->cmd), 633 resp->numDesc, resp->flags, 634 le16_to_cpu(resp->parm1), 635 le32_to_cpu(resp->parm2), 636 le32_to_cpu(resp->parm3)); 637 } 638 639 cleanup: 640 typhoon_inc_resp_index(&cleared, count); 641 } 642 643 indexes->respCleared = cpu_to_le32(cleared); 644 wmb(); 645 return resp_save == NULL; 646 } 647 648 static inline int 649 typhoon_num_free(int lastWrite, int lastRead, int ringSize) 650 { 651 /* this works for all descriptors but rx_desc, as they are a 652 * different size than the cmd_desc -- everyone else is the same 653 */ 654 lastWrite /= sizeof(struct cmd_desc); 655 lastRead /= sizeof(struct cmd_desc); 656 return (ringSize + lastRead - lastWrite - 1) % ringSize; 657 } 658 659 static inline int 660 typhoon_num_free_cmd(struct typhoon *tp) 661 { 662 int lastWrite = tp->cmdRing.lastWrite; 663 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared); 664 665 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES); 666 } 667 668 static inline int 669 typhoon_num_free_resp(struct typhoon *tp) 670 { 671 int respReady = le32_to_cpu(tp->indexes->respReady); 672 int respCleared = le32_to_cpu(tp->indexes->respCleared); 673 674 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES); 675 } 676 677 static inline int 678 typhoon_num_free_tx(struct transmit_ring *ring) 679 { 680 /* if we start using the Hi Tx ring, this needs updating */ 681 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES); 682 } 683 684 static int 685 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, 686 int num_resp, struct resp_desc *resp) 687 { 688 struct typhoon_indexes *indexes = tp->indexes; 689 struct basic_ring *ring = &tp->cmdRing; 690 struct resp_desc local_resp; 691 int i, err = 0; 692 int got_resp; 693 int freeCmd, freeResp; 694 int len, wrap_len; 695 696 spin_lock(&tp->command_lock); 697 698 freeCmd = typhoon_num_free_cmd(tp); 699 freeResp = typhoon_num_free_resp(tp); 700 701 if(freeCmd < num_cmd || freeResp < num_resp) { 702 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n", 703 freeCmd, num_cmd, freeResp, num_resp); 704 err = -ENOMEM; 705 goto out; 706 } 707 708 if(cmd->flags & TYPHOON_CMD_RESPOND) { 709 /* If we're expecting a response, but the caller hasn't given 710 * us a place to put it, we'll provide one. 711 */ 712 tp->awaiting_resp = 1; 713 if(resp == NULL) { 714 resp = &local_resp; 715 num_resp = 1; 716 } 717 } 718 719 wrap_len = 0; 720 len = num_cmd * sizeof(*cmd); 721 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) { 722 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE; 723 len = COMMAND_RING_SIZE - ring->lastWrite; 724 } 725 726 memcpy(ring->ringBase + ring->lastWrite, cmd, len); 727 if(unlikely(wrap_len)) { 728 struct cmd_desc *wrap_ptr = cmd; 729 wrap_ptr += len / sizeof(*cmd); 730 memcpy(ring->ringBase, wrap_ptr, wrap_len); 731 } 732 733 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); 734 735 /* "I feel a presence... another warrior is on the mesa." 736 */ 737 wmb(); 738 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); 739 typhoon_post_pci_writes(tp->ioaddr); 740 741 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0) 742 goto out; 743 744 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to 745 * preempt or do anything other than take interrupts. So, don't 746 * wait for a response unless you have to. 747 * 748 * I've thought about trying to sleep here, but we're called 749 * from many contexts that don't allow that. Also, given the way 750 * 3Com has implemented irq coalescing, we would likely timeout -- 751 * this has been observed in real life! 752 * 753 * The big killer is we have to wait to get stats from the card, 754 * though we could go to a periodic refresh of those if we don't 755 * mind them getting somewhat stale. The rest of the waiting 756 * commands occur during open/close/suspend/resume, so they aren't 757 * time critical. Creating SAs in the future will also have to 758 * wait here. 759 */ 760 got_resp = 0; 761 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) { 762 if(indexes->respCleared != indexes->respReady) 763 got_resp = typhoon_process_response(tp, num_resp, 764 resp); 765 udelay(TYPHOON_UDELAY); 766 } 767 768 if(!got_resp) { 769 err = -ETIMEDOUT; 770 goto out; 771 } 772 773 /* Collect the error response even if we don't care about the 774 * rest of the response 775 */ 776 if(resp->flags & TYPHOON_RESP_ERROR) 777 err = -EIO; 778 779 out: 780 if(tp->awaiting_resp) { 781 tp->awaiting_resp = 0; 782 smp_wmb(); 783 784 /* Ugh. If a response was added to the ring between 785 * the call to typhoon_process_response() and the clearing 786 * of tp->awaiting_resp, we could have missed the interrupt 787 * and it could hang in the ring an indeterminate amount of 788 * time. So, check for it, and interrupt ourselves if this 789 * is the case. 790 */ 791 if(indexes->respCleared != indexes->respReady) 792 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT); 793 } 794 795 spin_unlock(&tp->command_lock); 796 return err; 797 } 798 799 static inline void 800 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, 801 u32 ring_dma) 802 { 803 struct tcpopt_desc *tcpd; 804 u32 tcpd_offset = ring_dma; 805 806 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite); 807 tcpd_offset += txRing->lastWrite; 808 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx); 809 typhoon_inc_tx_index(&txRing->lastWrite, 1); 810 811 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG; 812 tcpd->numDesc = 1; 813 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb)); 814 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST; 815 tcpd->respAddrLo = cpu_to_le32(tcpd_offset); 816 tcpd->bytesTx = cpu_to_le32(skb->len); 817 tcpd->status = 0; 818 } 819 820 static netdev_tx_t 821 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) 822 { 823 struct typhoon *tp = netdev_priv(dev); 824 struct transmit_ring *txRing; 825 struct tx_desc *txd, *first_txd; 826 dma_addr_t skb_dma; 827 int numDesc; 828 829 /* we have two rings to choose from, but we only use txLo for now 830 * If we start using the Hi ring as well, we'll need to update 831 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), 832 * and TXHI_ENTRIES to match, as well as update the TSO code below 833 * to get the right DMA address 834 */ 835 txRing = &tp->txLoRing; 836 837 /* We need one descriptor for each fragment of the sk_buff, plus the 838 * one for the ->data area of it. 839 * 840 * The docs say a maximum of 16 fragment descriptors per TCP option 841 * descriptor, then make a new packet descriptor and option descriptor 842 * for the next 16 fragments. The engineers say just an option 843 * descriptor is needed. I've tested up to 26 fragments with a single 844 * packet descriptor/option descriptor combo, so I use that for now. 845 * 846 * If problems develop with TSO, check this first. 847 */ 848 numDesc = skb_shinfo(skb)->nr_frags + 1; 849 if (skb_is_gso(skb)) 850 numDesc++; 851 852 /* When checking for free space in the ring, we need to also 853 * account for the initial Tx descriptor, and we always must leave 854 * at least one descriptor unused in the ring so that it doesn't 855 * wrap and look empty. 856 * 857 * The only time we should loop here is when we hit the race 858 * between marking the queue awake and updating the cleared index. 859 * Just loop and it will appear. This comes from the acenic driver. 860 */ 861 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2))) 862 smp_rmb(); 863 864 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); 865 typhoon_inc_tx_index(&txRing->lastWrite, 1); 866 867 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID; 868 first_txd->numDesc = 0; 869 first_txd->len = 0; 870 first_txd->tx_addr = (u64)((unsigned long) skb); 871 first_txd->processFlags = 0; 872 873 if(skb->ip_summed == CHECKSUM_PARTIAL) { 874 /* The 3XP will figure out if this is UDP/TCP */ 875 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; 876 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; 877 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; 878 } 879 880 if(vlan_tx_tag_present(skb)) { 881 first_txd->processFlags |= 882 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; 883 first_txd->processFlags |= 884 cpu_to_le32(htons(vlan_tx_tag_get(skb)) << 885 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 886 } 887 888 if (skb_is_gso(skb)) { 889 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; 890 first_txd->numDesc++; 891 892 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr); 893 } 894 895 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); 896 typhoon_inc_tx_index(&txRing->lastWrite, 1); 897 898 /* No need to worry about padding packet -- the firmware pads 899 * it with zeros to ETH_ZLEN for us. 900 */ 901 if(skb_shinfo(skb)->nr_frags == 0) { 902 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len, 903 PCI_DMA_TODEVICE); 904 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; 905 txd->len = cpu_to_le16(skb->len); 906 txd->frag.addr = cpu_to_le32(skb_dma); 907 txd->frag.addrHi = 0; 908 first_txd->numDesc++; 909 } else { 910 int i, len; 911 912 len = skb_headlen(skb); 913 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len, 914 PCI_DMA_TODEVICE); 915 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; 916 txd->len = cpu_to_le16(len); 917 txd->frag.addr = cpu_to_le32(skb_dma); 918 txd->frag.addrHi = 0; 919 first_txd->numDesc++; 920 921 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 922 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 923 void *frag_addr; 924 925 txd = (struct tx_desc *) (txRing->ringBase + 926 txRing->lastWrite); 927 typhoon_inc_tx_index(&txRing->lastWrite, 1); 928 929 len = skb_frag_size(frag); 930 frag_addr = skb_frag_address(frag); 931 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len, 932 PCI_DMA_TODEVICE); 933 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; 934 txd->len = cpu_to_le16(len); 935 txd->frag.addr = cpu_to_le32(skb_dma); 936 txd->frag.addrHi = 0; 937 first_txd->numDesc++; 938 } 939 } 940 941 /* Kick the 3XP 942 */ 943 wmb(); 944 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister); 945 946 /* If we don't have room to put the worst case packet on the 947 * queue, then we must stop the queue. We need 2 extra 948 * descriptors -- one to prevent ring wrap, and one for the 949 * Tx header. 950 */ 951 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1; 952 953 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) { 954 netif_stop_queue(dev); 955 956 /* A Tx complete IRQ could have gotten between, making 957 * the ring free again. Only need to recheck here, since 958 * Tx is serialized. 959 */ 960 if(typhoon_num_free_tx(txRing) >= (numDesc + 2)) 961 netif_wake_queue(dev); 962 } 963 964 return NETDEV_TX_OK; 965 } 966 967 static void 968 typhoon_set_rx_mode(struct net_device *dev) 969 { 970 struct typhoon *tp = netdev_priv(dev); 971 struct cmd_desc xp_cmd; 972 u32 mc_filter[2]; 973 __le16 filter; 974 975 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; 976 if(dev->flags & IFF_PROMISC) { 977 filter |= TYPHOON_RX_FILTER_PROMISCOUS; 978 } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 979 (dev->flags & IFF_ALLMULTI)) { 980 /* Too many to match, or accept all multicasts. */ 981 filter |= TYPHOON_RX_FILTER_ALL_MCAST; 982 } else if (!netdev_mc_empty(dev)) { 983 struct netdev_hw_addr *ha; 984 985 memset(mc_filter, 0, sizeof(mc_filter)); 986 netdev_for_each_mc_addr(ha, dev) { 987 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; 988 mc_filter[bit >> 5] |= 1 << (bit & 0x1f); 989 } 990 991 INIT_COMMAND_NO_RESPONSE(&xp_cmd, 992 TYPHOON_CMD_SET_MULTICAST_HASH); 993 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET; 994 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]); 995 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]); 996 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 997 998 filter |= TYPHOON_RX_FILTER_MCAST_HASH; 999 } 1000 1001 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); 1002 xp_cmd.parm1 = filter; 1003 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1004 } 1005 1006 static int 1007 typhoon_do_get_stats(struct typhoon *tp) 1008 { 1009 struct net_device_stats *stats = &tp->stats; 1010 struct net_device_stats *saved = &tp->stats_saved; 1011 struct cmd_desc xp_cmd; 1012 struct resp_desc xp_resp[7]; 1013 struct stats_resp *s = (struct stats_resp *) xp_resp; 1014 int err; 1015 1016 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS); 1017 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp); 1018 if(err < 0) 1019 return err; 1020 1021 /* 3Com's Linux driver uses txMultipleCollisions as it's 1022 * collisions value, but there is some other collision info as well... 1023 * 1024 * The extra status reported would be a good candidate for 1025 * ethtool_ops->get_{strings,stats}() 1026 */ 1027 stats->tx_packets = le32_to_cpu(s->txPackets) + 1028 saved->tx_packets; 1029 stats->tx_bytes = le64_to_cpu(s->txBytes) + 1030 saved->tx_bytes; 1031 stats->tx_errors = le32_to_cpu(s->txCarrierLost) + 1032 saved->tx_errors; 1033 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) + 1034 saved->tx_carrier_errors; 1035 stats->collisions = le32_to_cpu(s->txMultipleCollisions) + 1036 saved->collisions; 1037 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) + 1038 saved->rx_packets; 1039 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) + 1040 saved->rx_bytes; 1041 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) + 1042 saved->rx_fifo_errors; 1043 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + 1044 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) + 1045 saved->rx_errors; 1046 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) + 1047 saved->rx_crc_errors; 1048 stats->rx_length_errors = le32_to_cpu(s->rxOversized) + 1049 saved->rx_length_errors; 1050 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? 1051 SPEED_100 : SPEED_10; 1052 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? 1053 DUPLEX_FULL : DUPLEX_HALF; 1054 1055 return 0; 1056 } 1057 1058 static struct net_device_stats * 1059 typhoon_get_stats(struct net_device *dev) 1060 { 1061 struct typhoon *tp = netdev_priv(dev); 1062 struct net_device_stats *stats = &tp->stats; 1063 struct net_device_stats *saved = &tp->stats_saved; 1064 1065 smp_rmb(); 1066 if(tp->card_state == Sleeping) 1067 return saved; 1068 1069 if(typhoon_do_get_stats(tp) < 0) { 1070 netdev_err(dev, "error getting stats\n"); 1071 return saved; 1072 } 1073 1074 return stats; 1075 } 1076 1077 static void 1078 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1079 { 1080 struct typhoon *tp = netdev_priv(dev); 1081 struct pci_dev *pci_dev = tp->pdev; 1082 struct cmd_desc xp_cmd; 1083 struct resp_desc xp_resp[3]; 1084 1085 smp_rmb(); 1086 if(tp->card_state == Sleeping) { 1087 strlcpy(info->fw_version, "Sleep image", 1088 sizeof(info->fw_version)); 1089 } else { 1090 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); 1091 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { 1092 strlcpy(info->fw_version, "Unknown runtime", 1093 sizeof(info->fw_version)); 1094 } else { 1095 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); 1096 snprintf(info->fw_version, sizeof(info->fw_version), 1097 "%02x.%03x.%03x", sleep_ver >> 24, 1098 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff); 1099 } 1100 } 1101 1102 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1103 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); 1104 } 1105 1106 static int 1107 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1108 { 1109 struct typhoon *tp = netdev_priv(dev); 1110 1111 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 1112 SUPPORTED_Autoneg; 1113 1114 switch (tp->xcvr_select) { 1115 case TYPHOON_XCVR_10HALF: 1116 cmd->advertising = ADVERTISED_10baseT_Half; 1117 break; 1118 case TYPHOON_XCVR_10FULL: 1119 cmd->advertising = ADVERTISED_10baseT_Full; 1120 break; 1121 case TYPHOON_XCVR_100HALF: 1122 cmd->advertising = ADVERTISED_100baseT_Half; 1123 break; 1124 case TYPHOON_XCVR_100FULL: 1125 cmd->advertising = ADVERTISED_100baseT_Full; 1126 break; 1127 case TYPHOON_XCVR_AUTONEG: 1128 cmd->advertising = ADVERTISED_10baseT_Half | 1129 ADVERTISED_10baseT_Full | 1130 ADVERTISED_100baseT_Half | 1131 ADVERTISED_100baseT_Full | 1132 ADVERTISED_Autoneg; 1133 break; 1134 } 1135 1136 if(tp->capabilities & TYPHOON_FIBER) { 1137 cmd->supported |= SUPPORTED_FIBRE; 1138 cmd->advertising |= ADVERTISED_FIBRE; 1139 cmd->port = PORT_FIBRE; 1140 } else { 1141 cmd->supported |= SUPPORTED_10baseT_Half | 1142 SUPPORTED_10baseT_Full | 1143 SUPPORTED_TP; 1144 cmd->advertising |= ADVERTISED_TP; 1145 cmd->port = PORT_TP; 1146 } 1147 1148 /* need to get stats to make these link speed/duplex valid */ 1149 typhoon_do_get_stats(tp); 1150 ethtool_cmd_speed_set(cmd, tp->speed); 1151 cmd->duplex = tp->duplex; 1152 cmd->phy_address = 0; 1153 cmd->transceiver = XCVR_INTERNAL; 1154 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG) 1155 cmd->autoneg = AUTONEG_ENABLE; 1156 else 1157 cmd->autoneg = AUTONEG_DISABLE; 1158 cmd->maxtxpkt = 1; 1159 cmd->maxrxpkt = 1; 1160 1161 return 0; 1162 } 1163 1164 static int 1165 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1166 { 1167 struct typhoon *tp = netdev_priv(dev); 1168 u32 speed = ethtool_cmd_speed(cmd); 1169 struct cmd_desc xp_cmd; 1170 __le16 xcvr; 1171 int err; 1172 1173 err = -EINVAL; 1174 if (cmd->autoneg == AUTONEG_ENABLE) { 1175 xcvr = TYPHOON_XCVR_AUTONEG; 1176 } else { 1177 if (cmd->duplex == DUPLEX_HALF) { 1178 if (speed == SPEED_10) 1179 xcvr = TYPHOON_XCVR_10HALF; 1180 else if (speed == SPEED_100) 1181 xcvr = TYPHOON_XCVR_100HALF; 1182 else 1183 goto out; 1184 } else if (cmd->duplex == DUPLEX_FULL) { 1185 if (speed == SPEED_10) 1186 xcvr = TYPHOON_XCVR_10FULL; 1187 else if (speed == SPEED_100) 1188 xcvr = TYPHOON_XCVR_100FULL; 1189 else 1190 goto out; 1191 } else 1192 goto out; 1193 } 1194 1195 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); 1196 xp_cmd.parm1 = xcvr; 1197 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1198 if(err < 0) 1199 goto out; 1200 1201 tp->xcvr_select = xcvr; 1202 if(cmd->autoneg == AUTONEG_ENABLE) { 1203 tp->speed = 0xff; /* invalid */ 1204 tp->duplex = 0xff; /* invalid */ 1205 } else { 1206 tp->speed = speed; 1207 tp->duplex = cmd->duplex; 1208 } 1209 1210 out: 1211 return err; 1212 } 1213 1214 static void 1215 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1216 { 1217 struct typhoon *tp = netdev_priv(dev); 1218 1219 wol->supported = WAKE_PHY | WAKE_MAGIC; 1220 wol->wolopts = 0; 1221 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT) 1222 wol->wolopts |= WAKE_PHY; 1223 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) 1224 wol->wolopts |= WAKE_MAGIC; 1225 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1226 } 1227 1228 static int 1229 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1230 { 1231 struct typhoon *tp = netdev_priv(dev); 1232 1233 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) 1234 return -EINVAL; 1235 1236 tp->wol_events = 0; 1237 if(wol->wolopts & WAKE_PHY) 1238 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT; 1239 if(wol->wolopts & WAKE_MAGIC) 1240 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT; 1241 1242 return 0; 1243 } 1244 1245 static void 1246 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 1247 { 1248 ering->rx_max_pending = RXENT_ENTRIES; 1249 ering->tx_max_pending = TXLO_ENTRIES - 1; 1250 1251 ering->rx_pending = RXENT_ENTRIES; 1252 ering->tx_pending = TXLO_ENTRIES - 1; 1253 } 1254 1255 static const struct ethtool_ops typhoon_ethtool_ops = { 1256 .get_settings = typhoon_get_settings, 1257 .set_settings = typhoon_set_settings, 1258 .get_drvinfo = typhoon_get_drvinfo, 1259 .get_wol = typhoon_get_wol, 1260 .set_wol = typhoon_set_wol, 1261 .get_link = ethtool_op_get_link, 1262 .get_ringparam = typhoon_get_ringparam, 1263 }; 1264 1265 static int 1266 typhoon_wait_interrupt(void __iomem *ioaddr) 1267 { 1268 int i, err = 0; 1269 1270 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { 1271 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) & 1272 TYPHOON_INTR_BOOTCMD) 1273 goto out; 1274 udelay(TYPHOON_UDELAY); 1275 } 1276 1277 err = -ETIMEDOUT; 1278 1279 out: 1280 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); 1281 return err; 1282 } 1283 1284 #define shared_offset(x) offsetof(struct typhoon_shared, x) 1285 1286 static void 1287 typhoon_init_interface(struct typhoon *tp) 1288 { 1289 struct typhoon_interface *iface = &tp->shared->iface; 1290 dma_addr_t shared_dma; 1291 1292 memset(tp->shared, 0, sizeof(struct typhoon_shared)); 1293 1294 /* The *Hi members of iface are all init'd to zero by the memset(). 1295 */ 1296 shared_dma = tp->shared_dma + shared_offset(indexes); 1297 iface->ringIndex = cpu_to_le32(shared_dma); 1298 1299 shared_dma = tp->shared_dma + shared_offset(txLo); 1300 iface->txLoAddr = cpu_to_le32(shared_dma); 1301 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc)); 1302 1303 shared_dma = tp->shared_dma + shared_offset(txHi); 1304 iface->txHiAddr = cpu_to_le32(shared_dma); 1305 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc)); 1306 1307 shared_dma = tp->shared_dma + shared_offset(rxBuff); 1308 iface->rxBuffAddr = cpu_to_le32(shared_dma); 1309 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES * 1310 sizeof(struct rx_free)); 1311 1312 shared_dma = tp->shared_dma + shared_offset(rxLo); 1313 iface->rxLoAddr = cpu_to_le32(shared_dma); 1314 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); 1315 1316 shared_dma = tp->shared_dma + shared_offset(rxHi); 1317 iface->rxHiAddr = cpu_to_le32(shared_dma); 1318 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); 1319 1320 shared_dma = tp->shared_dma + shared_offset(cmd); 1321 iface->cmdAddr = cpu_to_le32(shared_dma); 1322 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE); 1323 1324 shared_dma = tp->shared_dma + shared_offset(resp); 1325 iface->respAddr = cpu_to_le32(shared_dma); 1326 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE); 1327 1328 shared_dma = tp->shared_dma + shared_offset(zeroWord); 1329 iface->zeroAddr = cpu_to_le32(shared_dma); 1330 1331 tp->indexes = &tp->shared->indexes; 1332 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo; 1333 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi; 1334 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo; 1335 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi; 1336 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff; 1337 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd; 1338 tp->respRing.ringBase = (u8 *) tp->shared->resp; 1339 1340 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY; 1341 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY; 1342 1343 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr); 1344 tp->card_state = Sleeping; 1345 1346 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; 1347 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; 1348 tp->offload |= TYPHOON_OFFLOAD_VLAN; 1349 1350 spin_lock_init(&tp->command_lock); 1351 1352 /* Force the writes to the shared memory area out before continuing. */ 1353 wmb(); 1354 } 1355 1356 static void 1357 typhoon_init_rings(struct typhoon *tp) 1358 { 1359 memset(tp->indexes, 0, sizeof(struct typhoon_indexes)); 1360 1361 tp->txLoRing.lastWrite = 0; 1362 tp->txHiRing.lastWrite = 0; 1363 tp->rxLoRing.lastWrite = 0; 1364 tp->rxHiRing.lastWrite = 0; 1365 tp->rxBuffRing.lastWrite = 0; 1366 tp->cmdRing.lastWrite = 0; 1367 tp->respRing.lastWrite = 0; 1368 1369 tp->txLoRing.lastRead = 0; 1370 tp->txHiRing.lastRead = 0; 1371 } 1372 1373 static const struct firmware *typhoon_fw; 1374 1375 static int 1376 typhoon_request_firmware(struct typhoon *tp) 1377 { 1378 const struct typhoon_file_header *fHdr; 1379 const struct typhoon_section_header *sHdr; 1380 const u8 *image_data; 1381 u32 numSections; 1382 u32 section_len; 1383 u32 remaining; 1384 int err; 1385 1386 if (typhoon_fw) 1387 return 0; 1388 1389 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev); 1390 if (err) { 1391 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 1392 FIRMWARE_NAME); 1393 return err; 1394 } 1395 1396 image_data = (u8 *) typhoon_fw->data; 1397 remaining = typhoon_fw->size; 1398 if (remaining < sizeof(struct typhoon_file_header)) 1399 goto invalid_fw; 1400 1401 fHdr = (struct typhoon_file_header *) image_data; 1402 if (memcmp(fHdr->tag, "TYPHOON", 8)) 1403 goto invalid_fw; 1404 1405 numSections = le32_to_cpu(fHdr->numSections); 1406 image_data += sizeof(struct typhoon_file_header); 1407 remaining -= sizeof(struct typhoon_file_header); 1408 1409 while (numSections--) { 1410 if (remaining < sizeof(struct typhoon_section_header)) 1411 goto invalid_fw; 1412 1413 sHdr = (struct typhoon_section_header *) image_data; 1414 image_data += sizeof(struct typhoon_section_header); 1415 section_len = le32_to_cpu(sHdr->len); 1416 1417 if (remaining < section_len) 1418 goto invalid_fw; 1419 1420 image_data += section_len; 1421 remaining -= section_len; 1422 } 1423 1424 return 0; 1425 1426 invalid_fw: 1427 netdev_err(tp->dev, "Invalid firmware image\n"); 1428 release_firmware(typhoon_fw); 1429 typhoon_fw = NULL; 1430 return -EINVAL; 1431 } 1432 1433 static int 1434 typhoon_download_firmware(struct typhoon *tp) 1435 { 1436 void __iomem *ioaddr = tp->ioaddr; 1437 struct pci_dev *pdev = tp->pdev; 1438 const struct typhoon_file_header *fHdr; 1439 const struct typhoon_section_header *sHdr; 1440 const u8 *image_data; 1441 void *dpage; 1442 dma_addr_t dpage_dma; 1443 __sum16 csum; 1444 u32 irqEnabled; 1445 u32 irqMasked; 1446 u32 numSections; 1447 u32 section_len; 1448 u32 len; 1449 u32 load_addr; 1450 u32 hmac; 1451 int i; 1452 int err; 1453 1454 image_data = (u8 *) typhoon_fw->data; 1455 fHdr = (struct typhoon_file_header *) image_data; 1456 1457 /* Cannot just map the firmware image using pci_map_single() as 1458 * the firmware is vmalloc()'d and may not be physically contiguous, 1459 * so we allocate some consistent memory to copy the sections into. 1460 */ 1461 err = -ENOMEM; 1462 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma); 1463 if(!dpage) { 1464 netdev_err(tp->dev, "no DMA mem for firmware\n"); 1465 goto err_out; 1466 } 1467 1468 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE); 1469 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD, 1470 ioaddr + TYPHOON_REG_INTR_ENABLE); 1471 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK); 1472 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD, 1473 ioaddr + TYPHOON_REG_INTR_MASK); 1474 1475 err = -ETIMEDOUT; 1476 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 1477 netdev_err(tp->dev, "card ready timeout\n"); 1478 goto err_out_irq; 1479 } 1480 1481 numSections = le32_to_cpu(fHdr->numSections); 1482 load_addr = le32_to_cpu(fHdr->startAddr); 1483 1484 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); 1485 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR); 1486 hmac = le32_to_cpu(fHdr->hmacDigest[0]); 1487 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0); 1488 hmac = le32_to_cpu(fHdr->hmacDigest[1]); 1489 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1); 1490 hmac = le32_to_cpu(fHdr->hmacDigest[2]); 1491 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2); 1492 hmac = le32_to_cpu(fHdr->hmacDigest[3]); 1493 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3); 1494 hmac = le32_to_cpu(fHdr->hmacDigest[4]); 1495 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4); 1496 typhoon_post_pci_writes(ioaddr); 1497 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND); 1498 1499 image_data += sizeof(struct typhoon_file_header); 1500 1501 /* The ioread32() in typhoon_wait_interrupt() will force the 1502 * last write to the command register to post, so 1503 * we don't need a typhoon_post_pci_writes() after it. 1504 */ 1505 for(i = 0; i < numSections; i++) { 1506 sHdr = (struct typhoon_section_header *) image_data; 1507 image_data += sizeof(struct typhoon_section_header); 1508 load_addr = le32_to_cpu(sHdr->startAddr); 1509 section_len = le32_to_cpu(sHdr->len); 1510 1511 while(section_len) { 1512 len = min_t(u32, section_len, PAGE_SIZE); 1513 1514 if(typhoon_wait_interrupt(ioaddr) < 0 || 1515 ioread32(ioaddr + TYPHOON_REG_STATUS) != 1516 TYPHOON_STATUS_WAITING_FOR_SEGMENT) { 1517 netdev_err(tp->dev, "segment ready timeout\n"); 1518 goto err_out_irq; 1519 } 1520 1521 /* Do an pseudo IPv4 checksum on the data -- first 1522 * need to convert each u16 to cpu order before 1523 * summing. Fortunately, due to the properties of 1524 * the checksum, we can do this once, at the end. 1525 */ 1526 csum = csum_fold(csum_partial_copy_nocheck(image_data, 1527 dpage, len, 1528 0)); 1529 1530 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); 1531 iowrite32(le16_to_cpu((__force __le16)csum), 1532 ioaddr + TYPHOON_REG_BOOT_CHECKSUM); 1533 iowrite32(load_addr, 1534 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR); 1535 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI); 1536 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO); 1537 typhoon_post_pci_writes(ioaddr); 1538 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE, 1539 ioaddr + TYPHOON_REG_COMMAND); 1540 1541 image_data += len; 1542 load_addr += len; 1543 section_len -= len; 1544 } 1545 } 1546 1547 if(typhoon_wait_interrupt(ioaddr) < 0 || 1548 ioread32(ioaddr + TYPHOON_REG_STATUS) != 1549 TYPHOON_STATUS_WAITING_FOR_SEGMENT) { 1550 netdev_err(tp->dev, "final segment ready timeout\n"); 1551 goto err_out_irq; 1552 } 1553 1554 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND); 1555 1556 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { 1557 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n", 1558 ioread32(ioaddr + TYPHOON_REG_STATUS)); 1559 goto err_out_irq; 1560 } 1561 1562 err = 0; 1563 1564 err_out_irq: 1565 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK); 1566 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE); 1567 1568 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma); 1569 1570 err_out: 1571 return err; 1572 } 1573 1574 static int 1575 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status) 1576 { 1577 void __iomem *ioaddr = tp->ioaddr; 1578 1579 if(typhoon_wait_status(ioaddr, initial_status) < 0) { 1580 netdev_err(tp->dev, "boot ready timeout\n"); 1581 goto out_timeout; 1582 } 1583 1584 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI); 1585 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO); 1586 typhoon_post_pci_writes(ioaddr); 1587 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD, 1588 ioaddr + TYPHOON_REG_COMMAND); 1589 1590 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) { 1591 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n", 1592 ioread32(ioaddr + TYPHOON_REG_STATUS)); 1593 goto out_timeout; 1594 } 1595 1596 /* Clear the Transmit and Command ready registers 1597 */ 1598 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY); 1599 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY); 1600 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY); 1601 typhoon_post_pci_writes(ioaddr); 1602 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND); 1603 1604 return 0; 1605 1606 out_timeout: 1607 return -ETIMEDOUT; 1608 } 1609 1610 static u32 1611 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, 1612 volatile __le32 * index) 1613 { 1614 u32 lastRead = txRing->lastRead; 1615 struct tx_desc *tx; 1616 dma_addr_t skb_dma; 1617 int dma_len; 1618 int type; 1619 1620 while(lastRead != le32_to_cpu(*index)) { 1621 tx = (struct tx_desc *) (txRing->ringBase + lastRead); 1622 type = tx->flags & TYPHOON_TYPE_MASK; 1623 1624 if(type == TYPHOON_TX_DESC) { 1625 /* This tx_desc describes a packet. 1626 */ 1627 unsigned long ptr = tx->tx_addr; 1628 struct sk_buff *skb = (struct sk_buff *) ptr; 1629 dev_kfree_skb_irq(skb); 1630 } else if(type == TYPHOON_FRAG_DESC) { 1631 /* This tx_desc describes a memory mapping. Free it. 1632 */ 1633 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr); 1634 dma_len = le16_to_cpu(tx->len); 1635 pci_unmap_single(tp->pdev, skb_dma, dma_len, 1636 PCI_DMA_TODEVICE); 1637 } 1638 1639 tx->flags = 0; 1640 typhoon_inc_tx_index(&lastRead, 1); 1641 } 1642 1643 return lastRead; 1644 } 1645 1646 static void 1647 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, 1648 volatile __le32 * index) 1649 { 1650 u32 lastRead; 1651 int numDesc = MAX_SKB_FRAGS + 1; 1652 1653 /* This will need changing if we start to use the Hi Tx ring. */ 1654 lastRead = typhoon_clean_tx(tp, txRing, index); 1655 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite, 1656 lastRead, TXLO_ENTRIES) > (numDesc + 2)) 1657 netif_wake_queue(tp->dev); 1658 1659 txRing->lastRead = lastRead; 1660 smp_wmb(); 1661 } 1662 1663 static void 1664 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx) 1665 { 1666 struct typhoon_indexes *indexes = tp->indexes; 1667 struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; 1668 struct basic_ring *ring = &tp->rxBuffRing; 1669 struct rx_free *r; 1670 1671 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == 1672 le32_to_cpu(indexes->rxBuffCleared)) { 1673 /* no room in ring, just drop the skb 1674 */ 1675 dev_kfree_skb_any(rxb->skb); 1676 rxb->skb = NULL; 1677 return; 1678 } 1679 1680 r = (struct rx_free *) (ring->ringBase + ring->lastWrite); 1681 typhoon_inc_rxfree_index(&ring->lastWrite, 1); 1682 r->virtAddr = idx; 1683 r->physAddr = cpu_to_le32(rxb->dma_addr); 1684 1685 /* Tell the card about it */ 1686 wmb(); 1687 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); 1688 } 1689 1690 static int 1691 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) 1692 { 1693 struct typhoon_indexes *indexes = tp->indexes; 1694 struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; 1695 struct basic_ring *ring = &tp->rxBuffRing; 1696 struct rx_free *r; 1697 struct sk_buff *skb; 1698 dma_addr_t dma_addr; 1699 1700 rxb->skb = NULL; 1701 1702 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == 1703 le32_to_cpu(indexes->rxBuffCleared)) 1704 return -ENOMEM; 1705 1706 skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ); 1707 if(!skb) 1708 return -ENOMEM; 1709 1710 #if 0 1711 /* Please, 3com, fix the firmware to allow DMA to a unaligned 1712 * address! Pretty please? 1713 */ 1714 skb_reserve(skb, 2); 1715 #endif 1716 1717 dma_addr = pci_map_single(tp->pdev, skb->data, 1718 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1719 1720 /* Since no card does 64 bit DAC, the high bits will never 1721 * change from zero. 1722 */ 1723 r = (struct rx_free *) (ring->ringBase + ring->lastWrite); 1724 typhoon_inc_rxfree_index(&ring->lastWrite, 1); 1725 r->virtAddr = idx; 1726 r->physAddr = cpu_to_le32(dma_addr); 1727 rxb->skb = skb; 1728 rxb->dma_addr = dma_addr; 1729 1730 /* Tell the card about it */ 1731 wmb(); 1732 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); 1733 return 0; 1734 } 1735 1736 static int 1737 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, 1738 volatile __le32 * cleared, int budget) 1739 { 1740 struct rx_desc *rx; 1741 struct sk_buff *skb, *new_skb; 1742 struct rxbuff_ent *rxb; 1743 dma_addr_t dma_addr; 1744 u32 local_ready; 1745 u32 rxaddr; 1746 int pkt_len; 1747 u32 idx; 1748 __le32 csum_bits; 1749 int received; 1750 1751 received = 0; 1752 local_ready = le32_to_cpu(*ready); 1753 rxaddr = le32_to_cpu(*cleared); 1754 while(rxaddr != local_ready && budget > 0) { 1755 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr); 1756 idx = rx->addr; 1757 rxb = &tp->rxbuffers[idx]; 1758 skb = rxb->skb; 1759 dma_addr = rxb->dma_addr; 1760 1761 typhoon_inc_rx_index(&rxaddr, 1); 1762 1763 if(rx->flags & TYPHOON_RX_ERROR) { 1764 typhoon_recycle_rx_skb(tp, idx); 1765 continue; 1766 } 1767 1768 pkt_len = le16_to_cpu(rx->frameLen); 1769 1770 if(pkt_len < rx_copybreak && 1771 (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) { 1772 skb_reserve(new_skb, 2); 1773 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1774 PKT_BUF_SZ, 1775 PCI_DMA_FROMDEVICE); 1776 skb_copy_to_linear_data(new_skb, skb->data, pkt_len); 1777 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1778 PKT_BUF_SZ, 1779 PCI_DMA_FROMDEVICE); 1780 skb_put(new_skb, pkt_len); 1781 typhoon_recycle_rx_skb(tp, idx); 1782 } else { 1783 new_skb = skb; 1784 skb_put(new_skb, pkt_len); 1785 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ, 1786 PCI_DMA_FROMDEVICE); 1787 typhoon_alloc_rx_skb(tp, idx); 1788 } 1789 new_skb->protocol = eth_type_trans(new_skb, tp->dev); 1790 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD | 1791 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD); 1792 if(csum_bits == 1793 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) || 1794 csum_bits == 1795 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { 1796 new_skb->ip_summed = CHECKSUM_UNNECESSARY; 1797 } else 1798 skb_checksum_none_assert(new_skb); 1799 1800 if (rx->rxStatus & TYPHOON_RX_VLAN) 1801 __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), 1802 ntohl(rx->vlanTag) & 0xffff); 1803 netif_receive_skb(new_skb); 1804 1805 received++; 1806 budget--; 1807 } 1808 *cleared = cpu_to_le32(rxaddr); 1809 1810 return received; 1811 } 1812 1813 static void 1814 typhoon_fill_free_ring(struct typhoon *tp) 1815 { 1816 u32 i; 1817 1818 for(i = 0; i < RXENT_ENTRIES; i++) { 1819 struct rxbuff_ent *rxb = &tp->rxbuffers[i]; 1820 if(rxb->skb) 1821 continue; 1822 if(typhoon_alloc_rx_skb(tp, i) < 0) 1823 break; 1824 } 1825 } 1826 1827 static int 1828 typhoon_poll(struct napi_struct *napi, int budget) 1829 { 1830 struct typhoon *tp = container_of(napi, struct typhoon, napi); 1831 struct typhoon_indexes *indexes = tp->indexes; 1832 int work_done; 1833 1834 rmb(); 1835 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) 1836 typhoon_process_response(tp, 0, NULL); 1837 1838 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) 1839 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); 1840 1841 work_done = 0; 1842 1843 if(indexes->rxHiCleared != indexes->rxHiReady) { 1844 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, 1845 &indexes->rxHiCleared, budget); 1846 } 1847 1848 if(indexes->rxLoCleared != indexes->rxLoReady) { 1849 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, 1850 &indexes->rxLoCleared, budget - work_done); 1851 } 1852 1853 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { 1854 /* rxBuff ring is empty, try to fill it. */ 1855 typhoon_fill_free_ring(tp); 1856 } 1857 1858 if (work_done < budget) { 1859 napi_complete(napi); 1860 iowrite32(TYPHOON_INTR_NONE, 1861 tp->ioaddr + TYPHOON_REG_INTR_MASK); 1862 typhoon_post_pci_writes(tp->ioaddr); 1863 } 1864 1865 return work_done; 1866 } 1867 1868 static irqreturn_t 1869 typhoon_interrupt(int irq, void *dev_instance) 1870 { 1871 struct net_device *dev = dev_instance; 1872 struct typhoon *tp = netdev_priv(dev); 1873 void __iomem *ioaddr = tp->ioaddr; 1874 u32 intr_status; 1875 1876 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); 1877 if(!(intr_status & TYPHOON_INTR_HOST_INT)) 1878 return IRQ_NONE; 1879 1880 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); 1881 1882 if (napi_schedule_prep(&tp->napi)) { 1883 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 1884 typhoon_post_pci_writes(ioaddr); 1885 __napi_schedule(&tp->napi); 1886 } else { 1887 netdev_err(dev, "Error, poll already scheduled\n"); 1888 } 1889 return IRQ_HANDLED; 1890 } 1891 1892 static void 1893 typhoon_free_rx_rings(struct typhoon *tp) 1894 { 1895 u32 i; 1896 1897 for(i = 0; i < RXENT_ENTRIES; i++) { 1898 struct rxbuff_ent *rxb = &tp->rxbuffers[i]; 1899 if(rxb->skb) { 1900 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ, 1901 PCI_DMA_FROMDEVICE); 1902 dev_kfree_skb(rxb->skb); 1903 rxb->skb = NULL; 1904 } 1905 } 1906 } 1907 1908 static int 1909 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) 1910 { 1911 struct pci_dev *pdev = tp->pdev; 1912 void __iomem *ioaddr = tp->ioaddr; 1913 struct cmd_desc xp_cmd; 1914 int err; 1915 1916 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS); 1917 xp_cmd.parm1 = events; 1918 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1919 if(err < 0) { 1920 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n", 1921 err); 1922 return err; 1923 } 1924 1925 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP); 1926 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1927 if(err < 0) { 1928 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err); 1929 return err; 1930 } 1931 1932 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0) 1933 return -ETIMEDOUT; 1934 1935 /* Since we cannot monitor the status of the link while sleeping, 1936 * tell the world it went away. 1937 */ 1938 netif_carrier_off(tp->dev); 1939 1940 pci_enable_wake(tp->pdev, state, 1); 1941 pci_disable_device(pdev); 1942 return pci_set_power_state(pdev, state); 1943 } 1944 1945 static int 1946 typhoon_wakeup(struct typhoon *tp, int wait_type) 1947 { 1948 struct pci_dev *pdev = tp->pdev; 1949 void __iomem *ioaddr = tp->ioaddr; 1950 1951 pci_set_power_state(pdev, PCI_D0); 1952 pci_restore_state(pdev); 1953 1954 /* Post 2.x.x versions of the Sleep Image require a reset before 1955 * we can download the Runtime Image. But let's not make users of 1956 * the old firmware pay for the reset. 1957 */ 1958 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND); 1959 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 || 1960 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET)) 1961 return typhoon_reset(ioaddr, wait_type); 1962 1963 return 0; 1964 } 1965 1966 static int 1967 typhoon_start_runtime(struct typhoon *tp) 1968 { 1969 struct net_device *dev = tp->dev; 1970 void __iomem *ioaddr = tp->ioaddr; 1971 struct cmd_desc xp_cmd; 1972 int err; 1973 1974 typhoon_init_rings(tp); 1975 typhoon_fill_free_ring(tp); 1976 1977 err = typhoon_download_firmware(tp); 1978 if(err < 0) { 1979 netdev_err(tp->dev, "cannot load runtime on 3XP\n"); 1980 goto error_out; 1981 } 1982 1983 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { 1984 netdev_err(tp->dev, "cannot boot 3XP\n"); 1985 err = -EIO; 1986 goto error_out; 1987 } 1988 1989 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE); 1990 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ); 1991 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1992 if(err < 0) 1993 goto error_out; 1994 1995 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); 1996 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); 1997 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); 1998 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1999 if(err < 0) 2000 goto error_out; 2001 2002 /* Disable IRQ coalescing -- we can reenable it when 3Com gives 2003 * us some more information on how to control it. 2004 */ 2005 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL); 2006 xp_cmd.parm1 = 0; 2007 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2008 if(err < 0) 2009 goto error_out; 2010 2011 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); 2012 xp_cmd.parm1 = tp->xcvr_select; 2013 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2014 if(err < 0) 2015 goto error_out; 2016 2017 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); 2018 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); 2019 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2020 if(err < 0) 2021 goto error_out; 2022 2023 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); 2024 xp_cmd.parm2 = tp->offload; 2025 xp_cmd.parm3 = tp->offload; 2026 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2027 if(err < 0) 2028 goto error_out; 2029 2030 typhoon_set_rx_mode(dev); 2031 2032 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE); 2033 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2034 if(err < 0) 2035 goto error_out; 2036 2037 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE); 2038 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2039 if(err < 0) 2040 goto error_out; 2041 2042 tp->card_state = Running; 2043 smp_wmb(); 2044 2045 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); 2046 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK); 2047 typhoon_post_pci_writes(ioaddr); 2048 2049 return 0; 2050 2051 error_out: 2052 typhoon_reset(ioaddr, WaitNoSleep); 2053 typhoon_free_rx_rings(tp); 2054 typhoon_init_rings(tp); 2055 return err; 2056 } 2057 2058 static int 2059 typhoon_stop_runtime(struct typhoon *tp, int wait_type) 2060 { 2061 struct typhoon_indexes *indexes = tp->indexes; 2062 struct transmit_ring *txLo = &tp->txLoRing; 2063 void __iomem *ioaddr = tp->ioaddr; 2064 struct cmd_desc xp_cmd; 2065 int i; 2066 2067 /* Disable interrupts early, since we can't schedule a poll 2068 * when called with !netif_running(). This will be posted 2069 * when we force the posting of the command. 2070 */ 2071 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); 2072 2073 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE); 2074 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2075 2076 /* Wait 1/2 sec for any outstanding transmits to occur 2077 * We'll cleanup after the reset if this times out. 2078 */ 2079 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { 2080 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite)) 2081 break; 2082 udelay(TYPHOON_UDELAY); 2083 } 2084 2085 if(i == TYPHOON_WAIT_TIMEOUT) 2086 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n"); 2087 2088 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE); 2089 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2090 2091 /* save the statistics so when we bring the interface up again, 2092 * the values reported to userspace are correct. 2093 */ 2094 tp->card_state = Sleeping; 2095 smp_wmb(); 2096 typhoon_do_get_stats(tp); 2097 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats)); 2098 2099 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT); 2100 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 2101 2102 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0) 2103 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n"); 2104 2105 if(typhoon_reset(ioaddr, wait_type) < 0) { 2106 netdev_err(tp->dev, "unable to reset 3XP\n"); 2107 return -ETIMEDOUT; 2108 } 2109 2110 /* cleanup any outstanding Tx packets */ 2111 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) { 2112 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite); 2113 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared); 2114 } 2115 2116 return 0; 2117 } 2118 2119 static void 2120 typhoon_tx_timeout(struct net_device *dev) 2121 { 2122 struct typhoon *tp = netdev_priv(dev); 2123 2124 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { 2125 netdev_warn(dev, "could not reset in tx timeout\n"); 2126 goto truly_dead; 2127 } 2128 2129 /* If we ever start using the Hi ring, it will need cleaning too */ 2130 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared); 2131 typhoon_free_rx_rings(tp); 2132 2133 if(typhoon_start_runtime(tp) < 0) { 2134 netdev_err(dev, "could not start runtime in tx timeout\n"); 2135 goto truly_dead; 2136 } 2137 2138 netif_wake_queue(dev); 2139 return; 2140 2141 truly_dead: 2142 /* Reset the hardware, and turn off carrier to avoid more timeouts */ 2143 typhoon_reset(tp->ioaddr, NoWait); 2144 netif_carrier_off(dev); 2145 } 2146 2147 static int 2148 typhoon_open(struct net_device *dev) 2149 { 2150 struct typhoon *tp = netdev_priv(dev); 2151 int err; 2152 2153 err = typhoon_request_firmware(tp); 2154 if (err) 2155 goto out; 2156 2157 err = typhoon_wakeup(tp, WaitSleep); 2158 if(err < 0) { 2159 netdev_err(dev, "unable to wakeup device\n"); 2160 goto out_sleep; 2161 } 2162 2163 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED, 2164 dev->name, dev); 2165 if(err < 0) 2166 goto out_sleep; 2167 2168 napi_enable(&tp->napi); 2169 2170 err = typhoon_start_runtime(tp); 2171 if(err < 0) { 2172 napi_disable(&tp->napi); 2173 goto out_irq; 2174 } 2175 2176 netif_start_queue(dev); 2177 return 0; 2178 2179 out_irq: 2180 free_irq(dev->irq, dev); 2181 2182 out_sleep: 2183 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 2184 netdev_err(dev, "unable to reboot into sleep img\n"); 2185 typhoon_reset(tp->ioaddr, NoWait); 2186 goto out; 2187 } 2188 2189 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) 2190 netdev_err(dev, "unable to go back to sleep\n"); 2191 2192 out: 2193 return err; 2194 } 2195 2196 static int 2197 typhoon_close(struct net_device *dev) 2198 { 2199 struct typhoon *tp = netdev_priv(dev); 2200 2201 netif_stop_queue(dev); 2202 napi_disable(&tp->napi); 2203 2204 if(typhoon_stop_runtime(tp, WaitSleep) < 0) 2205 netdev_err(dev, "unable to stop runtime\n"); 2206 2207 /* Make sure there is no irq handler running on a different CPU. */ 2208 free_irq(dev->irq, dev); 2209 2210 typhoon_free_rx_rings(tp); 2211 typhoon_init_rings(tp); 2212 2213 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) 2214 netdev_err(dev, "unable to boot sleep image\n"); 2215 2216 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) 2217 netdev_err(dev, "unable to put card to sleep\n"); 2218 2219 return 0; 2220 } 2221 2222 #ifdef CONFIG_PM 2223 static int 2224 typhoon_resume(struct pci_dev *pdev) 2225 { 2226 struct net_device *dev = pci_get_drvdata(pdev); 2227 struct typhoon *tp = netdev_priv(dev); 2228 2229 /* If we're down, resume when we are upped. 2230 */ 2231 if(!netif_running(dev)) 2232 return 0; 2233 2234 if(typhoon_wakeup(tp, WaitNoSleep) < 0) { 2235 netdev_err(dev, "critical: could not wake up in resume\n"); 2236 goto reset; 2237 } 2238 2239 if(typhoon_start_runtime(tp) < 0) { 2240 netdev_err(dev, "critical: could not start runtime in resume\n"); 2241 goto reset; 2242 } 2243 2244 netif_device_attach(dev); 2245 return 0; 2246 2247 reset: 2248 typhoon_reset(tp->ioaddr, NoWait); 2249 return -EBUSY; 2250 } 2251 2252 static int 2253 typhoon_suspend(struct pci_dev *pdev, pm_message_t state) 2254 { 2255 struct net_device *dev = pci_get_drvdata(pdev); 2256 struct typhoon *tp = netdev_priv(dev); 2257 struct cmd_desc xp_cmd; 2258 2259 /* If we're down, we're already suspended. 2260 */ 2261 if(!netif_running(dev)) 2262 return 0; 2263 2264 /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */ 2265 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) 2266 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n"); 2267 2268 netif_device_detach(dev); 2269 2270 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) { 2271 netdev_err(dev, "unable to stop runtime\n"); 2272 goto need_resume; 2273 } 2274 2275 typhoon_free_rx_rings(tp); 2276 typhoon_init_rings(tp); 2277 2278 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 2279 netdev_err(dev, "unable to boot sleep image\n"); 2280 goto need_resume; 2281 } 2282 2283 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); 2284 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); 2285 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); 2286 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { 2287 netdev_err(dev, "unable to set mac address in suspend\n"); 2288 goto need_resume; 2289 } 2290 2291 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); 2292 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; 2293 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { 2294 netdev_err(dev, "unable to set rx filter in suspend\n"); 2295 goto need_resume; 2296 } 2297 2298 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { 2299 netdev_err(dev, "unable to put card to sleep\n"); 2300 goto need_resume; 2301 } 2302 2303 return 0; 2304 2305 need_resume: 2306 typhoon_resume(pdev); 2307 return -EBUSY; 2308 } 2309 #endif 2310 2311 static int 2312 typhoon_test_mmio(struct pci_dev *pdev) 2313 { 2314 void __iomem *ioaddr = pci_iomap(pdev, 1, 128); 2315 int mode = 0; 2316 u32 val; 2317 2318 if(!ioaddr) 2319 goto out; 2320 2321 if(ioread32(ioaddr + TYPHOON_REG_STATUS) != 2322 TYPHOON_STATUS_WAITING_FOR_HOST) 2323 goto out_unmap; 2324 2325 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 2326 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); 2327 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); 2328 2329 /* Ok, see if we can change our interrupt status register by 2330 * sending ourselves an interrupt. If so, then MMIO works. 2331 * The 50usec delay is arbitrary -- it could probably be smaller. 2332 */ 2333 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); 2334 if((val & TYPHOON_INTR_SELF) == 0) { 2335 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT); 2336 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); 2337 udelay(50); 2338 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); 2339 if(val & TYPHOON_INTR_SELF) 2340 mode = 1; 2341 } 2342 2343 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 2344 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); 2345 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); 2346 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); 2347 2348 out_unmap: 2349 pci_iounmap(pdev, ioaddr); 2350 2351 out: 2352 if(!mode) 2353 pr_info("%s: falling back to port IO\n", pci_name(pdev)); 2354 return mode; 2355 } 2356 2357 static const struct net_device_ops typhoon_netdev_ops = { 2358 .ndo_open = typhoon_open, 2359 .ndo_stop = typhoon_close, 2360 .ndo_start_xmit = typhoon_start_tx, 2361 .ndo_set_rx_mode = typhoon_set_rx_mode, 2362 .ndo_tx_timeout = typhoon_tx_timeout, 2363 .ndo_get_stats = typhoon_get_stats, 2364 .ndo_validate_addr = eth_validate_addr, 2365 .ndo_set_mac_address = eth_mac_addr, 2366 .ndo_change_mtu = eth_change_mtu, 2367 }; 2368 2369 static int 2370 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2371 { 2372 struct net_device *dev; 2373 struct typhoon *tp; 2374 int card_id = (int) ent->driver_data; 2375 void __iomem *ioaddr; 2376 void *shared; 2377 dma_addr_t shared_dma; 2378 struct cmd_desc xp_cmd; 2379 struct resp_desc xp_resp[3]; 2380 int err = 0; 2381 const char *err_msg; 2382 2383 dev = alloc_etherdev(sizeof(*tp)); 2384 if(dev == NULL) { 2385 err_msg = "unable to alloc new net device"; 2386 err = -ENOMEM; 2387 goto error_out; 2388 } 2389 SET_NETDEV_DEV(dev, &pdev->dev); 2390 2391 err = pci_enable_device(pdev); 2392 if(err < 0) { 2393 err_msg = "unable to enable device"; 2394 goto error_out_dev; 2395 } 2396 2397 err = pci_set_mwi(pdev); 2398 if(err < 0) { 2399 err_msg = "unable to set MWI"; 2400 goto error_out_disable; 2401 } 2402 2403 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2404 if(err < 0) { 2405 err_msg = "No usable DMA configuration"; 2406 goto error_out_mwi; 2407 } 2408 2409 /* sanity checks on IO and MMIO BARs 2410 */ 2411 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { 2412 err_msg = "region #1 not a PCI IO resource, aborting"; 2413 err = -ENODEV; 2414 goto error_out_mwi; 2415 } 2416 if(pci_resource_len(pdev, 0) < 128) { 2417 err_msg = "Invalid PCI IO region size, aborting"; 2418 err = -ENODEV; 2419 goto error_out_mwi; 2420 } 2421 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 2422 err_msg = "region #1 not a PCI MMIO resource, aborting"; 2423 err = -ENODEV; 2424 goto error_out_mwi; 2425 } 2426 if(pci_resource_len(pdev, 1) < 128) { 2427 err_msg = "Invalid PCI MMIO region size, aborting"; 2428 err = -ENODEV; 2429 goto error_out_mwi; 2430 } 2431 2432 err = pci_request_regions(pdev, KBUILD_MODNAME); 2433 if(err < 0) { 2434 err_msg = "could not request regions"; 2435 goto error_out_mwi; 2436 } 2437 2438 /* map our registers 2439 */ 2440 if(use_mmio != 0 && use_mmio != 1) 2441 use_mmio = typhoon_test_mmio(pdev); 2442 2443 ioaddr = pci_iomap(pdev, use_mmio, 128); 2444 if (!ioaddr) { 2445 err_msg = "cannot remap registers, aborting"; 2446 err = -EIO; 2447 goto error_out_regions; 2448 } 2449 2450 /* allocate pci dma space for rx and tx descriptor rings 2451 */ 2452 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared), 2453 &shared_dma); 2454 if(!shared) { 2455 err_msg = "could not allocate DMA memory"; 2456 err = -ENOMEM; 2457 goto error_out_remap; 2458 } 2459 2460 dev->irq = pdev->irq; 2461 tp = netdev_priv(dev); 2462 tp->shared = shared; 2463 tp->shared_dma = shared_dma; 2464 tp->pdev = pdev; 2465 tp->tx_pdev = pdev; 2466 tp->ioaddr = ioaddr; 2467 tp->tx_ioaddr = ioaddr; 2468 tp->dev = dev; 2469 2470 /* Init sequence: 2471 * 1) Reset the adapter to clear any bad juju 2472 * 2) Reload the sleep image 2473 * 3) Boot the sleep image 2474 * 4) Get the hardware address. 2475 * 5) Put the card to sleep. 2476 */ 2477 if (typhoon_reset(ioaddr, WaitSleep) < 0) { 2478 err_msg = "could not reset 3XP"; 2479 err = -EIO; 2480 goto error_out_dma; 2481 } 2482 2483 /* Now that we've reset the 3XP and are sure it's not going to 2484 * write all over memory, enable bus mastering, and save our 2485 * state for resuming after a suspend. 2486 */ 2487 pci_set_master(pdev); 2488 pci_save_state(pdev); 2489 2490 typhoon_init_interface(tp); 2491 typhoon_init_rings(tp); 2492 2493 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { 2494 err_msg = "cannot boot 3XP sleep image"; 2495 err = -EIO; 2496 goto error_out_reset; 2497 } 2498 2499 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); 2500 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { 2501 err_msg = "cannot read MAC address"; 2502 err = -EIO; 2503 goto error_out_reset; 2504 } 2505 2506 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); 2507 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); 2508 2509 if(!is_valid_ether_addr(dev->dev_addr)) { 2510 err_msg = "Could not obtain valid ethernet address, aborting"; 2511 goto error_out_reset; 2512 } 2513 2514 /* Read the Sleep Image version last, so the response is valid 2515 * later when we print out the version reported. 2516 */ 2517 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); 2518 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { 2519 err_msg = "Could not get Sleep Image version"; 2520 goto error_out_reset; 2521 } 2522 2523 tp->capabilities = typhoon_card_info[card_id].capabilities; 2524 tp->xcvr_select = TYPHOON_XCVR_AUTONEG; 2525 2526 /* Typhoon 1.0 Sleep Images return one response descriptor to the 2527 * READ_VERSIONS command. Those versions are OK after waking up 2528 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images 2529 * seem to need a little extra help to get started. Since we don't 2530 * know how to nudge it along, just kick it. 2531 */ 2532 if(xp_resp[0].numDesc != 0) 2533 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; 2534 2535 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { 2536 err_msg = "cannot put adapter to sleep"; 2537 err = -EIO; 2538 goto error_out_reset; 2539 } 2540 2541 /* The chip-specific entries in the device structure. */ 2542 dev->netdev_ops = &typhoon_netdev_ops; 2543 netif_napi_add(dev, &tp->napi, typhoon_poll, 16); 2544 dev->watchdog_timeo = TX_TIMEOUT; 2545 2546 dev->ethtool_ops = &typhoon_ethtool_ops; 2547 2548 /* We can handle scatter gather, up to 16 entries, and 2549 * we can do IP checksumming (only version 4, doh...) 2550 * 2551 * There's no way to turn off the RX VLAN offloading and stripping 2552 * on the current 3XP firmware -- it does not respect the offload 2553 * settings -- so we only allow the user to toggle the TX processing. 2554 */ 2555 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 2556 NETIF_F_HW_VLAN_CTAG_TX; 2557 dev->features = dev->hw_features | 2558 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; 2559 2560 if(register_netdev(dev) < 0) { 2561 err_msg = "unable to register netdev"; 2562 goto error_out_reset; 2563 } 2564 2565 pci_set_drvdata(pdev, dev); 2566 2567 netdev_info(dev, "%s at %s 0x%llx, %pM\n", 2568 typhoon_card_info[card_id].name, 2569 use_mmio ? "MMIO" : "IO", 2570 (unsigned long long)pci_resource_start(pdev, use_mmio), 2571 dev->dev_addr); 2572 2573 /* xp_resp still contains the response to the READ_VERSIONS command. 2574 * For debugging, let the user know what version he has. 2575 */ 2576 if(xp_resp[0].numDesc == 0) { 2577 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits 2578 * of version is Month/Day of build. 2579 */ 2580 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff; 2581 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n", 2582 monthday >> 8, monthday & 0xff); 2583 } else if(xp_resp[0].numDesc == 2) { 2584 /* This is the Typhoon 1.1+ type Sleep Image 2585 */ 2586 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); 2587 u8 *ver_string = (u8 *) &xp_resp[1]; 2588 ver_string[25] = 0; 2589 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n", 2590 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, 2591 sleep_ver & 0xfff, ver_string); 2592 } else { 2593 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n", 2594 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2)); 2595 } 2596 2597 return 0; 2598 2599 error_out_reset: 2600 typhoon_reset(ioaddr, NoWait); 2601 2602 error_out_dma: 2603 pci_free_consistent(pdev, sizeof(struct typhoon_shared), 2604 shared, shared_dma); 2605 error_out_remap: 2606 pci_iounmap(pdev, ioaddr); 2607 error_out_regions: 2608 pci_release_regions(pdev); 2609 error_out_mwi: 2610 pci_clear_mwi(pdev); 2611 error_out_disable: 2612 pci_disable_device(pdev); 2613 error_out_dev: 2614 free_netdev(dev); 2615 error_out: 2616 pr_err("%s: %s\n", pci_name(pdev), err_msg); 2617 return err; 2618 } 2619 2620 static void 2621 typhoon_remove_one(struct pci_dev *pdev) 2622 { 2623 struct net_device *dev = pci_get_drvdata(pdev); 2624 struct typhoon *tp = netdev_priv(dev); 2625 2626 unregister_netdev(dev); 2627 pci_set_power_state(pdev, PCI_D0); 2628 pci_restore_state(pdev); 2629 typhoon_reset(tp->ioaddr, NoWait); 2630 pci_iounmap(pdev, tp->ioaddr); 2631 pci_free_consistent(pdev, sizeof(struct typhoon_shared), 2632 tp->shared, tp->shared_dma); 2633 pci_release_regions(pdev); 2634 pci_clear_mwi(pdev); 2635 pci_disable_device(pdev); 2636 free_netdev(dev); 2637 } 2638 2639 static struct pci_driver typhoon_driver = { 2640 .name = KBUILD_MODNAME, 2641 .id_table = typhoon_pci_tbl, 2642 .probe = typhoon_init_one, 2643 .remove = typhoon_remove_one, 2644 #ifdef CONFIG_PM 2645 .suspend = typhoon_suspend, 2646 .resume = typhoon_resume, 2647 #endif 2648 }; 2649 2650 static int __init 2651 typhoon_init(void) 2652 { 2653 return pci_register_driver(&typhoon_driver); 2654 } 2655 2656 static void __exit 2657 typhoon_cleanup(void) 2658 { 2659 release_firmware(typhoon_fw); 2660 pci_unregister_driver(&typhoon_driver); 2661 } 2662 2663 module_init(typhoon_init); 2664 module_exit(typhoon_cleanup); 2665 2666 2667 2668 2669 2670 /* LDV_COMMENT_BEGIN_MAIN */ 2671 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 2672 2673 /*###########################################################################*/ 2674 2675 /*############## Driver Environment Generator 0.2 output ####################*/ 2676 2677 /*###########################################################################*/ 2678 2679 2680 2681 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 2682 void ldv_check_final_state(void); 2683 2684 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 2685 void ldv_check_return_value(int res); 2686 2687 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 2688 void ldv_check_return_value_probe(int res); 2689 2690 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 2691 void ldv_initialize(void); 2692 2693 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 2694 void ldv_handler_precall(void); 2695 2696 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 2697 int nondet_int(void); 2698 2699 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 2700 int LDV_IN_INTERRUPT; 2701 2702 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 2703 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 2704 2705 2706 2707 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 2708 /*============================= VARIABLE DECLARATION PART =============================*/ 2709 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 2710 /* content: static int typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/ 2711 /* LDV_COMMENT_BEGIN_PREP */ 2712 #define TXHI_ENTRIES 2 2713 #define TXLO_ENTRIES 128 2714 #define RX_ENTRIES 32 2715 #define COMMAND_ENTRIES 16 2716 #define RESPONSE_ENTRIES 32 2717 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 2718 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 2719 #define RXFREE_ENTRIES 128 2720 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 2721 #define TX_TIMEOUT (2*HZ) 2722 #define PKT_BUF_SZ 1536 2723 #define FIRMWARE_NAME "3com/typhoon.bin" 2724 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 2725 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 2726 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 2727 #undef NETIF_F_TSO 2728 #endif 2729 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 2730 #error TX ring too small! 2731 #endif 2732 #define TYPHOON_CRYPTO_NONE 0x00 2733 #define TYPHOON_CRYPTO_DES 0x01 2734 #define TYPHOON_CRYPTO_3DES 0x02 2735 #define TYPHOON_CRYPTO_VARIABLE 0x04 2736 #define TYPHOON_FIBER 0x08 2737 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 2738 #define __3xp_aligned ____cacheline_aligned 2739 #define typhoon_post_pci_writes(x) \ 2740 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 2741 #define TYPHOON_UDELAY 50 2742 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 2743 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 2744 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 2745 #if defined(NETIF_F_TSO) 2746 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 2747 #define TSO_NUM_DESCRIPTORS 2 2748 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 2749 #else 2750 #define NETIF_F_TSO 0 2751 #define skb_tso_size(x) 0 2752 #define TSO_NUM_DESCRIPTORS 0 2753 #define TSO_OFFLOAD_ON 0 2754 #endif 2755 /* LDV_COMMENT_END_PREP */ 2756 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_settings" */ 2757 struct net_device * var_group1; 2758 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_settings" */ 2759 struct ethtool_cmd * var_group2; 2760 /* LDV_COMMENT_BEGIN_PREP */ 2761 #define shared_offset(x) offsetof(struct typhoon_shared, x) 2762 #if 0 2763 #endif 2764 #ifdef CONFIG_PM 2765 #endif 2766 #ifdef CONFIG_PM 2767 #endif 2768 /* LDV_COMMENT_END_PREP */ 2769 /* content: static int typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/ 2770 /* LDV_COMMENT_BEGIN_PREP */ 2771 #define TXHI_ENTRIES 2 2772 #define TXLO_ENTRIES 128 2773 #define RX_ENTRIES 32 2774 #define COMMAND_ENTRIES 16 2775 #define RESPONSE_ENTRIES 32 2776 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 2777 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 2778 #define RXFREE_ENTRIES 128 2779 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 2780 #define TX_TIMEOUT (2*HZ) 2781 #define PKT_BUF_SZ 1536 2782 #define FIRMWARE_NAME "3com/typhoon.bin" 2783 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 2784 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 2785 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 2786 #undef NETIF_F_TSO 2787 #endif 2788 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 2789 #error TX ring too small! 2790 #endif 2791 #define TYPHOON_CRYPTO_NONE 0x00 2792 #define TYPHOON_CRYPTO_DES 0x01 2793 #define TYPHOON_CRYPTO_3DES 0x02 2794 #define TYPHOON_CRYPTO_VARIABLE 0x04 2795 #define TYPHOON_FIBER 0x08 2796 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 2797 #define __3xp_aligned ____cacheline_aligned 2798 #define typhoon_post_pci_writes(x) \ 2799 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 2800 #define TYPHOON_UDELAY 50 2801 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 2802 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 2803 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 2804 #if defined(NETIF_F_TSO) 2805 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 2806 #define TSO_NUM_DESCRIPTORS 2 2807 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 2808 #else 2809 #define NETIF_F_TSO 0 2810 #define skb_tso_size(x) 0 2811 #define TSO_NUM_DESCRIPTORS 0 2812 #define TSO_OFFLOAD_ON 0 2813 #endif 2814 /* LDV_COMMENT_END_PREP */ 2815 /* LDV_COMMENT_BEGIN_PREP */ 2816 #define shared_offset(x) offsetof(struct typhoon_shared, x) 2817 #if 0 2818 #endif 2819 #ifdef CONFIG_PM 2820 #endif 2821 #ifdef CONFIG_PM 2822 #endif 2823 /* LDV_COMMENT_END_PREP */ 2824 /* content: static void typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/ 2825 /* LDV_COMMENT_BEGIN_PREP */ 2826 #define TXHI_ENTRIES 2 2827 #define TXLO_ENTRIES 128 2828 #define RX_ENTRIES 32 2829 #define COMMAND_ENTRIES 16 2830 #define RESPONSE_ENTRIES 32 2831 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 2832 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 2833 #define RXFREE_ENTRIES 128 2834 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 2835 #define TX_TIMEOUT (2*HZ) 2836 #define PKT_BUF_SZ 1536 2837 #define FIRMWARE_NAME "3com/typhoon.bin" 2838 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 2839 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 2840 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 2841 #undef NETIF_F_TSO 2842 #endif 2843 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 2844 #error TX ring too small! 2845 #endif 2846 #define TYPHOON_CRYPTO_NONE 0x00 2847 #define TYPHOON_CRYPTO_DES 0x01 2848 #define TYPHOON_CRYPTO_3DES 0x02 2849 #define TYPHOON_CRYPTO_VARIABLE 0x04 2850 #define TYPHOON_FIBER 0x08 2851 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 2852 #define __3xp_aligned ____cacheline_aligned 2853 #define typhoon_post_pci_writes(x) \ 2854 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 2855 #define TYPHOON_UDELAY 50 2856 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 2857 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 2858 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 2859 #if defined(NETIF_F_TSO) 2860 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 2861 #define TSO_NUM_DESCRIPTORS 2 2862 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 2863 #else 2864 #define NETIF_F_TSO 0 2865 #define skb_tso_size(x) 0 2866 #define TSO_NUM_DESCRIPTORS 0 2867 #define TSO_OFFLOAD_ON 0 2868 #endif 2869 /* LDV_COMMENT_END_PREP */ 2870 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_drvinfo" */ 2871 struct ethtool_drvinfo * var_group3; 2872 /* LDV_COMMENT_BEGIN_PREP */ 2873 #define shared_offset(x) offsetof(struct typhoon_shared, x) 2874 #if 0 2875 #endif 2876 #ifdef CONFIG_PM 2877 #endif 2878 #ifdef CONFIG_PM 2879 #endif 2880 /* LDV_COMMENT_END_PREP */ 2881 /* content: static void typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/ 2882 /* LDV_COMMENT_BEGIN_PREP */ 2883 #define TXHI_ENTRIES 2 2884 #define TXLO_ENTRIES 128 2885 #define RX_ENTRIES 32 2886 #define COMMAND_ENTRIES 16 2887 #define RESPONSE_ENTRIES 32 2888 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 2889 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 2890 #define RXFREE_ENTRIES 128 2891 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 2892 #define TX_TIMEOUT (2*HZ) 2893 #define PKT_BUF_SZ 1536 2894 #define FIRMWARE_NAME "3com/typhoon.bin" 2895 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 2896 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 2897 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 2898 #undef NETIF_F_TSO 2899 #endif 2900 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 2901 #error TX ring too small! 2902 #endif 2903 #define TYPHOON_CRYPTO_NONE 0x00 2904 #define TYPHOON_CRYPTO_DES 0x01 2905 #define TYPHOON_CRYPTO_3DES 0x02 2906 #define TYPHOON_CRYPTO_VARIABLE 0x04 2907 #define TYPHOON_FIBER 0x08 2908 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 2909 #define __3xp_aligned ____cacheline_aligned 2910 #define typhoon_post_pci_writes(x) \ 2911 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 2912 #define TYPHOON_UDELAY 50 2913 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 2914 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 2915 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 2916 #if defined(NETIF_F_TSO) 2917 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 2918 #define TSO_NUM_DESCRIPTORS 2 2919 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 2920 #else 2921 #define NETIF_F_TSO 0 2922 #define skb_tso_size(x) 0 2923 #define TSO_NUM_DESCRIPTORS 0 2924 #define TSO_OFFLOAD_ON 0 2925 #endif 2926 /* LDV_COMMENT_END_PREP */ 2927 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_wol" */ 2928 struct ethtool_wolinfo * var_group4; 2929 /* LDV_COMMENT_BEGIN_PREP */ 2930 #define shared_offset(x) offsetof(struct typhoon_shared, x) 2931 #if 0 2932 #endif 2933 #ifdef CONFIG_PM 2934 #endif 2935 #ifdef CONFIG_PM 2936 #endif 2937 /* LDV_COMMENT_END_PREP */ 2938 /* content: static int typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/ 2939 /* LDV_COMMENT_BEGIN_PREP */ 2940 #define TXHI_ENTRIES 2 2941 #define TXLO_ENTRIES 128 2942 #define RX_ENTRIES 32 2943 #define COMMAND_ENTRIES 16 2944 #define RESPONSE_ENTRIES 32 2945 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 2946 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 2947 #define RXFREE_ENTRIES 128 2948 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 2949 #define TX_TIMEOUT (2*HZ) 2950 #define PKT_BUF_SZ 1536 2951 #define FIRMWARE_NAME "3com/typhoon.bin" 2952 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 2953 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 2954 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 2955 #undef NETIF_F_TSO 2956 #endif 2957 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 2958 #error TX ring too small! 2959 #endif 2960 #define TYPHOON_CRYPTO_NONE 0x00 2961 #define TYPHOON_CRYPTO_DES 0x01 2962 #define TYPHOON_CRYPTO_3DES 0x02 2963 #define TYPHOON_CRYPTO_VARIABLE 0x04 2964 #define TYPHOON_FIBER 0x08 2965 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 2966 #define __3xp_aligned ____cacheline_aligned 2967 #define typhoon_post_pci_writes(x) \ 2968 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 2969 #define TYPHOON_UDELAY 50 2970 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 2971 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 2972 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 2973 #if defined(NETIF_F_TSO) 2974 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 2975 #define TSO_NUM_DESCRIPTORS 2 2976 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 2977 #else 2978 #define NETIF_F_TSO 0 2979 #define skb_tso_size(x) 0 2980 #define TSO_NUM_DESCRIPTORS 0 2981 #define TSO_OFFLOAD_ON 0 2982 #endif 2983 /* LDV_COMMENT_END_PREP */ 2984 /* LDV_COMMENT_BEGIN_PREP */ 2985 #define shared_offset(x) offsetof(struct typhoon_shared, x) 2986 #if 0 2987 #endif 2988 #ifdef CONFIG_PM 2989 #endif 2990 #ifdef CONFIG_PM 2991 #endif 2992 /* LDV_COMMENT_END_PREP */ 2993 /* content: static void typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)*/ 2994 /* LDV_COMMENT_BEGIN_PREP */ 2995 #define TXHI_ENTRIES 2 2996 #define TXLO_ENTRIES 128 2997 #define RX_ENTRIES 32 2998 #define COMMAND_ENTRIES 16 2999 #define RESPONSE_ENTRIES 32 3000 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3001 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3002 #define RXFREE_ENTRIES 128 3003 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3004 #define TX_TIMEOUT (2*HZ) 3005 #define PKT_BUF_SZ 1536 3006 #define FIRMWARE_NAME "3com/typhoon.bin" 3007 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3008 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3009 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3010 #undef NETIF_F_TSO 3011 #endif 3012 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3013 #error TX ring too small! 3014 #endif 3015 #define TYPHOON_CRYPTO_NONE 0x00 3016 #define TYPHOON_CRYPTO_DES 0x01 3017 #define TYPHOON_CRYPTO_3DES 0x02 3018 #define TYPHOON_CRYPTO_VARIABLE 0x04 3019 #define TYPHOON_FIBER 0x08 3020 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3021 #define __3xp_aligned ____cacheline_aligned 3022 #define typhoon_post_pci_writes(x) \ 3023 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3024 #define TYPHOON_UDELAY 50 3025 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3026 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3027 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3028 #if defined(NETIF_F_TSO) 3029 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3030 #define TSO_NUM_DESCRIPTORS 2 3031 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3032 #else 3033 #define NETIF_F_TSO 0 3034 #define skb_tso_size(x) 0 3035 #define TSO_NUM_DESCRIPTORS 0 3036 #define TSO_OFFLOAD_ON 0 3037 #endif 3038 /* LDV_COMMENT_END_PREP */ 3039 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_ringparam" */ 3040 struct ethtool_ringparam * var_group5; 3041 /* LDV_COMMENT_BEGIN_PREP */ 3042 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3043 #if 0 3044 #endif 3045 #ifdef CONFIG_PM 3046 #endif 3047 #ifdef CONFIG_PM 3048 #endif 3049 /* LDV_COMMENT_END_PREP */ 3050 3051 /** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/ 3052 /* content: static int typhoon_open(struct net_device *dev)*/ 3053 /* LDV_COMMENT_BEGIN_PREP */ 3054 #define TXHI_ENTRIES 2 3055 #define TXLO_ENTRIES 128 3056 #define RX_ENTRIES 32 3057 #define COMMAND_ENTRIES 16 3058 #define RESPONSE_ENTRIES 32 3059 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3060 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3061 #define RXFREE_ENTRIES 128 3062 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3063 #define TX_TIMEOUT (2*HZ) 3064 #define PKT_BUF_SZ 1536 3065 #define FIRMWARE_NAME "3com/typhoon.bin" 3066 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3067 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3068 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3069 #undef NETIF_F_TSO 3070 #endif 3071 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3072 #error TX ring too small! 3073 #endif 3074 #define TYPHOON_CRYPTO_NONE 0x00 3075 #define TYPHOON_CRYPTO_DES 0x01 3076 #define TYPHOON_CRYPTO_3DES 0x02 3077 #define TYPHOON_CRYPTO_VARIABLE 0x04 3078 #define TYPHOON_FIBER 0x08 3079 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3080 #define __3xp_aligned ____cacheline_aligned 3081 #define typhoon_post_pci_writes(x) \ 3082 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3083 #define TYPHOON_UDELAY 50 3084 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3085 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3086 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3087 #if defined(NETIF_F_TSO) 3088 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3089 #define TSO_NUM_DESCRIPTORS 2 3090 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3091 #else 3092 #define NETIF_F_TSO 0 3093 #define skb_tso_size(x) 0 3094 #define TSO_NUM_DESCRIPTORS 0 3095 #define TSO_OFFLOAD_ON 0 3096 #endif 3097 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3098 #if 0 3099 #endif 3100 /* LDV_COMMENT_END_PREP */ 3101 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "typhoon_open" */ 3102 static int res_typhoon_open_47; 3103 /* LDV_COMMENT_BEGIN_PREP */ 3104 #ifdef CONFIG_PM 3105 #endif 3106 #ifdef CONFIG_PM 3107 #endif 3108 /* LDV_COMMENT_END_PREP */ 3109 /* content: static int typhoon_close(struct net_device *dev)*/ 3110 /* LDV_COMMENT_BEGIN_PREP */ 3111 #define TXHI_ENTRIES 2 3112 #define TXLO_ENTRIES 128 3113 #define RX_ENTRIES 32 3114 #define COMMAND_ENTRIES 16 3115 #define RESPONSE_ENTRIES 32 3116 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3117 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3118 #define RXFREE_ENTRIES 128 3119 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3120 #define TX_TIMEOUT (2*HZ) 3121 #define PKT_BUF_SZ 1536 3122 #define FIRMWARE_NAME "3com/typhoon.bin" 3123 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3124 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3125 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3126 #undef NETIF_F_TSO 3127 #endif 3128 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3129 #error TX ring too small! 3130 #endif 3131 #define TYPHOON_CRYPTO_NONE 0x00 3132 #define TYPHOON_CRYPTO_DES 0x01 3133 #define TYPHOON_CRYPTO_3DES 0x02 3134 #define TYPHOON_CRYPTO_VARIABLE 0x04 3135 #define TYPHOON_FIBER 0x08 3136 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3137 #define __3xp_aligned ____cacheline_aligned 3138 #define typhoon_post_pci_writes(x) \ 3139 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3140 #define TYPHOON_UDELAY 50 3141 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3142 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3143 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3144 #if defined(NETIF_F_TSO) 3145 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3146 #define TSO_NUM_DESCRIPTORS 2 3147 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3148 #else 3149 #define NETIF_F_TSO 0 3150 #define skb_tso_size(x) 0 3151 #define TSO_NUM_DESCRIPTORS 0 3152 #define TSO_OFFLOAD_ON 0 3153 #endif 3154 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3155 #if 0 3156 #endif 3157 /* LDV_COMMENT_END_PREP */ 3158 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "typhoon_close" */ 3159 static int res_typhoon_close_48; 3160 /* LDV_COMMENT_BEGIN_PREP */ 3161 #ifdef CONFIG_PM 3162 #endif 3163 #ifdef CONFIG_PM 3164 #endif 3165 /* LDV_COMMENT_END_PREP */ 3166 /* content: static netdev_tx_t typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)*/ 3167 /* LDV_COMMENT_BEGIN_PREP */ 3168 #define TXHI_ENTRIES 2 3169 #define TXLO_ENTRIES 128 3170 #define RX_ENTRIES 32 3171 #define COMMAND_ENTRIES 16 3172 #define RESPONSE_ENTRIES 32 3173 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3174 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3175 #define RXFREE_ENTRIES 128 3176 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3177 #define TX_TIMEOUT (2*HZ) 3178 #define PKT_BUF_SZ 1536 3179 #define FIRMWARE_NAME "3com/typhoon.bin" 3180 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3181 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3182 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3183 #undef NETIF_F_TSO 3184 #endif 3185 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3186 #error TX ring too small! 3187 #endif 3188 #define TYPHOON_CRYPTO_NONE 0x00 3189 #define TYPHOON_CRYPTO_DES 0x01 3190 #define TYPHOON_CRYPTO_3DES 0x02 3191 #define TYPHOON_CRYPTO_VARIABLE 0x04 3192 #define TYPHOON_FIBER 0x08 3193 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3194 #define __3xp_aligned ____cacheline_aligned 3195 #define typhoon_post_pci_writes(x) \ 3196 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3197 #define TYPHOON_UDELAY 50 3198 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3199 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3200 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3201 #if defined(NETIF_F_TSO) 3202 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3203 #define TSO_NUM_DESCRIPTORS 2 3204 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3205 #else 3206 #define NETIF_F_TSO 0 3207 #define skb_tso_size(x) 0 3208 #define TSO_NUM_DESCRIPTORS 0 3209 #define TSO_OFFLOAD_ON 0 3210 #endif 3211 /* LDV_COMMENT_END_PREP */ 3212 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_start_tx" */ 3213 struct sk_buff * var_group6; 3214 /* LDV_COMMENT_BEGIN_PREP */ 3215 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3216 #if 0 3217 #endif 3218 #ifdef CONFIG_PM 3219 #endif 3220 #ifdef CONFIG_PM 3221 #endif 3222 /* LDV_COMMENT_END_PREP */ 3223 /* content: static void typhoon_set_rx_mode(struct net_device *dev)*/ 3224 /* LDV_COMMENT_BEGIN_PREP */ 3225 #define TXHI_ENTRIES 2 3226 #define TXLO_ENTRIES 128 3227 #define RX_ENTRIES 32 3228 #define COMMAND_ENTRIES 16 3229 #define RESPONSE_ENTRIES 32 3230 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3231 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3232 #define RXFREE_ENTRIES 128 3233 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3234 #define TX_TIMEOUT (2*HZ) 3235 #define PKT_BUF_SZ 1536 3236 #define FIRMWARE_NAME "3com/typhoon.bin" 3237 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3238 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3239 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3240 #undef NETIF_F_TSO 3241 #endif 3242 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3243 #error TX ring too small! 3244 #endif 3245 #define TYPHOON_CRYPTO_NONE 0x00 3246 #define TYPHOON_CRYPTO_DES 0x01 3247 #define TYPHOON_CRYPTO_3DES 0x02 3248 #define TYPHOON_CRYPTO_VARIABLE 0x04 3249 #define TYPHOON_FIBER 0x08 3250 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3251 #define __3xp_aligned ____cacheline_aligned 3252 #define typhoon_post_pci_writes(x) \ 3253 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3254 #define TYPHOON_UDELAY 50 3255 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3256 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3257 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3258 #if defined(NETIF_F_TSO) 3259 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3260 #define TSO_NUM_DESCRIPTORS 2 3261 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3262 #else 3263 #define NETIF_F_TSO 0 3264 #define skb_tso_size(x) 0 3265 #define TSO_NUM_DESCRIPTORS 0 3266 #define TSO_OFFLOAD_ON 0 3267 #endif 3268 /* LDV_COMMENT_END_PREP */ 3269 /* LDV_COMMENT_BEGIN_PREP */ 3270 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3271 #if 0 3272 #endif 3273 #ifdef CONFIG_PM 3274 #endif 3275 #ifdef CONFIG_PM 3276 #endif 3277 /* LDV_COMMENT_END_PREP */ 3278 /* content: static void typhoon_tx_timeout(struct net_device *dev)*/ 3279 /* LDV_COMMENT_BEGIN_PREP */ 3280 #define TXHI_ENTRIES 2 3281 #define TXLO_ENTRIES 128 3282 #define RX_ENTRIES 32 3283 #define COMMAND_ENTRIES 16 3284 #define RESPONSE_ENTRIES 32 3285 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3286 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3287 #define RXFREE_ENTRIES 128 3288 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3289 #define TX_TIMEOUT (2*HZ) 3290 #define PKT_BUF_SZ 1536 3291 #define FIRMWARE_NAME "3com/typhoon.bin" 3292 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3293 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3294 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3295 #undef NETIF_F_TSO 3296 #endif 3297 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3298 #error TX ring too small! 3299 #endif 3300 #define TYPHOON_CRYPTO_NONE 0x00 3301 #define TYPHOON_CRYPTO_DES 0x01 3302 #define TYPHOON_CRYPTO_3DES 0x02 3303 #define TYPHOON_CRYPTO_VARIABLE 0x04 3304 #define TYPHOON_FIBER 0x08 3305 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3306 #define __3xp_aligned ____cacheline_aligned 3307 #define typhoon_post_pci_writes(x) \ 3308 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3309 #define TYPHOON_UDELAY 50 3310 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3311 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3312 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3313 #if defined(NETIF_F_TSO) 3314 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3315 #define TSO_NUM_DESCRIPTORS 2 3316 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3317 #else 3318 #define NETIF_F_TSO 0 3319 #define skb_tso_size(x) 0 3320 #define TSO_NUM_DESCRIPTORS 0 3321 #define TSO_OFFLOAD_ON 0 3322 #endif 3323 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3324 #if 0 3325 #endif 3326 /* LDV_COMMENT_END_PREP */ 3327 /* LDV_COMMENT_BEGIN_PREP */ 3328 #ifdef CONFIG_PM 3329 #endif 3330 #ifdef CONFIG_PM 3331 #endif 3332 /* LDV_COMMENT_END_PREP */ 3333 /* content: static struct net_device_stats * typhoon_get_stats(struct net_device *dev)*/ 3334 /* LDV_COMMENT_BEGIN_PREP */ 3335 #define TXHI_ENTRIES 2 3336 #define TXLO_ENTRIES 128 3337 #define RX_ENTRIES 32 3338 #define COMMAND_ENTRIES 16 3339 #define RESPONSE_ENTRIES 32 3340 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3341 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3342 #define RXFREE_ENTRIES 128 3343 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3344 #define TX_TIMEOUT (2*HZ) 3345 #define PKT_BUF_SZ 1536 3346 #define FIRMWARE_NAME "3com/typhoon.bin" 3347 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3348 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3349 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3350 #undef NETIF_F_TSO 3351 #endif 3352 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3353 #error TX ring too small! 3354 #endif 3355 #define TYPHOON_CRYPTO_NONE 0x00 3356 #define TYPHOON_CRYPTO_DES 0x01 3357 #define TYPHOON_CRYPTO_3DES 0x02 3358 #define TYPHOON_CRYPTO_VARIABLE 0x04 3359 #define TYPHOON_FIBER 0x08 3360 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3361 #define __3xp_aligned ____cacheline_aligned 3362 #define typhoon_post_pci_writes(x) \ 3363 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3364 #define TYPHOON_UDELAY 50 3365 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3366 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3367 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3368 #if defined(NETIF_F_TSO) 3369 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3370 #define TSO_NUM_DESCRIPTORS 2 3371 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3372 #else 3373 #define NETIF_F_TSO 0 3374 #define skb_tso_size(x) 0 3375 #define TSO_NUM_DESCRIPTORS 0 3376 #define TSO_OFFLOAD_ON 0 3377 #endif 3378 /* LDV_COMMENT_END_PREP */ 3379 /* LDV_COMMENT_BEGIN_PREP */ 3380 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3381 #if 0 3382 #endif 3383 #ifdef CONFIG_PM 3384 #endif 3385 #ifdef CONFIG_PM 3386 #endif 3387 /* LDV_COMMENT_END_PREP */ 3388 3389 /** STRUCT: struct type: pci_driver, struct name: typhoon_driver **/ 3390 /* content: static int typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/ 3391 /* LDV_COMMENT_BEGIN_PREP */ 3392 #define TXHI_ENTRIES 2 3393 #define TXLO_ENTRIES 128 3394 #define RX_ENTRIES 32 3395 #define COMMAND_ENTRIES 16 3396 #define RESPONSE_ENTRIES 32 3397 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3398 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3399 #define RXFREE_ENTRIES 128 3400 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3401 #define TX_TIMEOUT (2*HZ) 3402 #define PKT_BUF_SZ 1536 3403 #define FIRMWARE_NAME "3com/typhoon.bin" 3404 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3405 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3406 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3407 #undef NETIF_F_TSO 3408 #endif 3409 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3410 #error TX ring too small! 3411 #endif 3412 #define TYPHOON_CRYPTO_NONE 0x00 3413 #define TYPHOON_CRYPTO_DES 0x01 3414 #define TYPHOON_CRYPTO_3DES 0x02 3415 #define TYPHOON_CRYPTO_VARIABLE 0x04 3416 #define TYPHOON_FIBER 0x08 3417 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3418 #define __3xp_aligned ____cacheline_aligned 3419 #define typhoon_post_pci_writes(x) \ 3420 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3421 #define TYPHOON_UDELAY 50 3422 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3423 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3424 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3425 #if defined(NETIF_F_TSO) 3426 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3427 #define TSO_NUM_DESCRIPTORS 2 3428 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3429 #else 3430 #define NETIF_F_TSO 0 3431 #define skb_tso_size(x) 0 3432 #define TSO_NUM_DESCRIPTORS 0 3433 #define TSO_OFFLOAD_ON 0 3434 #endif 3435 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3436 #if 0 3437 #endif 3438 #ifdef CONFIG_PM 3439 #endif 3440 /* LDV_COMMENT_END_PREP */ 3441 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_init_one" */ 3442 struct pci_dev * var_group7; 3443 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_init_one" */ 3444 const struct pci_device_id * var_typhoon_init_one_52_p1; 3445 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "typhoon_init_one" */ 3446 static int res_typhoon_init_one_52; 3447 /* LDV_COMMENT_BEGIN_PREP */ 3448 #ifdef CONFIG_PM 3449 #endif 3450 /* LDV_COMMENT_END_PREP */ 3451 /* content: static void typhoon_remove_one(struct pci_dev *pdev)*/ 3452 /* LDV_COMMENT_BEGIN_PREP */ 3453 #define TXHI_ENTRIES 2 3454 #define TXLO_ENTRIES 128 3455 #define RX_ENTRIES 32 3456 #define COMMAND_ENTRIES 16 3457 #define RESPONSE_ENTRIES 32 3458 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3459 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3460 #define RXFREE_ENTRIES 128 3461 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3462 #define TX_TIMEOUT (2*HZ) 3463 #define PKT_BUF_SZ 1536 3464 #define FIRMWARE_NAME "3com/typhoon.bin" 3465 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3466 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3467 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3468 #undef NETIF_F_TSO 3469 #endif 3470 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3471 #error TX ring too small! 3472 #endif 3473 #define TYPHOON_CRYPTO_NONE 0x00 3474 #define TYPHOON_CRYPTO_DES 0x01 3475 #define TYPHOON_CRYPTO_3DES 0x02 3476 #define TYPHOON_CRYPTO_VARIABLE 0x04 3477 #define TYPHOON_FIBER 0x08 3478 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3479 #define __3xp_aligned ____cacheline_aligned 3480 #define typhoon_post_pci_writes(x) \ 3481 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3482 #define TYPHOON_UDELAY 50 3483 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3484 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3485 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3486 #if defined(NETIF_F_TSO) 3487 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3488 #define TSO_NUM_DESCRIPTORS 2 3489 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3490 #else 3491 #define NETIF_F_TSO 0 3492 #define skb_tso_size(x) 0 3493 #define TSO_NUM_DESCRIPTORS 0 3494 #define TSO_OFFLOAD_ON 0 3495 #endif 3496 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3497 #if 0 3498 #endif 3499 #ifdef CONFIG_PM 3500 #endif 3501 /* LDV_COMMENT_END_PREP */ 3502 /* LDV_COMMENT_BEGIN_PREP */ 3503 #ifdef CONFIG_PM 3504 #endif 3505 /* LDV_COMMENT_END_PREP */ 3506 /* content: static int typhoon_suspend(struct pci_dev *pdev, pm_message_t state)*/ 3507 /* LDV_COMMENT_BEGIN_PREP */ 3508 #define TXHI_ENTRIES 2 3509 #define TXLO_ENTRIES 128 3510 #define RX_ENTRIES 32 3511 #define COMMAND_ENTRIES 16 3512 #define RESPONSE_ENTRIES 32 3513 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3514 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3515 #define RXFREE_ENTRIES 128 3516 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3517 #define TX_TIMEOUT (2*HZ) 3518 #define PKT_BUF_SZ 1536 3519 #define FIRMWARE_NAME "3com/typhoon.bin" 3520 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3521 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3522 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3523 #undef NETIF_F_TSO 3524 #endif 3525 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3526 #error TX ring too small! 3527 #endif 3528 #define TYPHOON_CRYPTO_NONE 0x00 3529 #define TYPHOON_CRYPTO_DES 0x01 3530 #define TYPHOON_CRYPTO_3DES 0x02 3531 #define TYPHOON_CRYPTO_VARIABLE 0x04 3532 #define TYPHOON_FIBER 0x08 3533 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3534 #define __3xp_aligned ____cacheline_aligned 3535 #define typhoon_post_pci_writes(x) \ 3536 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3537 #define TYPHOON_UDELAY 50 3538 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3539 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3540 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3541 #if defined(NETIF_F_TSO) 3542 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3543 #define TSO_NUM_DESCRIPTORS 2 3544 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3545 #else 3546 #define NETIF_F_TSO 0 3547 #define skb_tso_size(x) 0 3548 #define TSO_NUM_DESCRIPTORS 0 3549 #define TSO_OFFLOAD_ON 0 3550 #endif 3551 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3552 #if 0 3553 #endif 3554 #ifdef CONFIG_PM 3555 /* LDV_COMMENT_END_PREP */ 3556 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_suspend" */ 3557 pm_message_t var_typhoon_suspend_50_p1; 3558 /* LDV_COMMENT_BEGIN_PREP */ 3559 #endif 3560 #ifdef CONFIG_PM 3561 #endif 3562 /* LDV_COMMENT_END_PREP */ 3563 /* content: static int typhoon_resume(struct pci_dev *pdev)*/ 3564 /* LDV_COMMENT_BEGIN_PREP */ 3565 #define TXHI_ENTRIES 2 3566 #define TXLO_ENTRIES 128 3567 #define RX_ENTRIES 32 3568 #define COMMAND_ENTRIES 16 3569 #define RESPONSE_ENTRIES 32 3570 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3571 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3572 #define RXFREE_ENTRIES 128 3573 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3574 #define TX_TIMEOUT (2*HZ) 3575 #define PKT_BUF_SZ 1536 3576 #define FIRMWARE_NAME "3com/typhoon.bin" 3577 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3578 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3579 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3580 #undef NETIF_F_TSO 3581 #endif 3582 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3583 #error TX ring too small! 3584 #endif 3585 #define TYPHOON_CRYPTO_NONE 0x00 3586 #define TYPHOON_CRYPTO_DES 0x01 3587 #define TYPHOON_CRYPTO_3DES 0x02 3588 #define TYPHOON_CRYPTO_VARIABLE 0x04 3589 #define TYPHOON_FIBER 0x08 3590 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3591 #define __3xp_aligned ____cacheline_aligned 3592 #define typhoon_post_pci_writes(x) \ 3593 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3594 #define TYPHOON_UDELAY 50 3595 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3596 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3597 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3598 #if defined(NETIF_F_TSO) 3599 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3600 #define TSO_NUM_DESCRIPTORS 2 3601 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3602 #else 3603 #define NETIF_F_TSO 0 3604 #define skb_tso_size(x) 0 3605 #define TSO_NUM_DESCRIPTORS 0 3606 #define TSO_OFFLOAD_ON 0 3607 #endif 3608 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3609 #if 0 3610 #endif 3611 #ifdef CONFIG_PM 3612 /* LDV_COMMENT_END_PREP */ 3613 /* LDV_COMMENT_BEGIN_PREP */ 3614 #endif 3615 #ifdef CONFIG_PM 3616 #endif 3617 /* LDV_COMMENT_END_PREP */ 3618 3619 /** CALLBACK SECTION request_irq **/ 3620 /* content: static irqreturn_t typhoon_interrupt(int irq, void *dev_instance)*/ 3621 /* LDV_COMMENT_BEGIN_PREP */ 3622 #define TXHI_ENTRIES 2 3623 #define TXLO_ENTRIES 128 3624 #define RX_ENTRIES 32 3625 #define COMMAND_ENTRIES 16 3626 #define RESPONSE_ENTRIES 32 3627 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3628 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3629 #define RXFREE_ENTRIES 128 3630 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3631 #define TX_TIMEOUT (2*HZ) 3632 #define PKT_BUF_SZ 1536 3633 #define FIRMWARE_NAME "3com/typhoon.bin" 3634 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3635 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3636 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3637 #undef NETIF_F_TSO 3638 #endif 3639 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3640 #error TX ring too small! 3641 #endif 3642 #define TYPHOON_CRYPTO_NONE 0x00 3643 #define TYPHOON_CRYPTO_DES 0x01 3644 #define TYPHOON_CRYPTO_3DES 0x02 3645 #define TYPHOON_CRYPTO_VARIABLE 0x04 3646 #define TYPHOON_FIBER 0x08 3647 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3648 #define __3xp_aligned ____cacheline_aligned 3649 #define typhoon_post_pci_writes(x) \ 3650 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3651 #define TYPHOON_UDELAY 50 3652 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3653 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3654 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3655 #if defined(NETIF_F_TSO) 3656 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3657 #define TSO_NUM_DESCRIPTORS 2 3658 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3659 #else 3660 #define NETIF_F_TSO 0 3661 #define skb_tso_size(x) 0 3662 #define TSO_NUM_DESCRIPTORS 0 3663 #define TSO_OFFLOAD_ON 0 3664 #endif 3665 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3666 #if 0 3667 #endif 3668 /* LDV_COMMENT_END_PREP */ 3669 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_interrupt" */ 3670 int var_typhoon_interrupt_40_p0; 3671 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_interrupt" */ 3672 void * var_typhoon_interrupt_40_p1; 3673 /* LDV_COMMENT_BEGIN_PREP */ 3674 #ifdef CONFIG_PM 3675 #endif 3676 #ifdef CONFIG_PM 3677 #endif 3678 /* LDV_COMMENT_END_PREP */ 3679 3680 3681 3682 3683 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 3684 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 3685 /*============================= VARIABLE INITIALIZING PART =============================*/ 3686 LDV_IN_INTERRUPT=1; 3687 3688 3689 3690 3691 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 3692 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 3693 /*============================= FUNCTION CALL SECTION =============================*/ 3694 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 3695 ldv_initialize(); 3696 3697 /** INIT: init_type: ST_MODULE_INIT **/ 3698 /* content: static int __init typhoon_init(void)*/ 3699 /* LDV_COMMENT_BEGIN_PREP */ 3700 #define TXHI_ENTRIES 2 3701 #define TXLO_ENTRIES 128 3702 #define RX_ENTRIES 32 3703 #define COMMAND_ENTRIES 16 3704 #define RESPONSE_ENTRIES 32 3705 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3706 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3707 #define RXFREE_ENTRIES 128 3708 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3709 #define TX_TIMEOUT (2*HZ) 3710 #define PKT_BUF_SZ 1536 3711 #define FIRMWARE_NAME "3com/typhoon.bin" 3712 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3713 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3714 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3715 #undef NETIF_F_TSO 3716 #endif 3717 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3718 #error TX ring too small! 3719 #endif 3720 #define TYPHOON_CRYPTO_NONE 0x00 3721 #define TYPHOON_CRYPTO_DES 0x01 3722 #define TYPHOON_CRYPTO_3DES 0x02 3723 #define TYPHOON_CRYPTO_VARIABLE 0x04 3724 #define TYPHOON_FIBER 0x08 3725 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3726 #define __3xp_aligned ____cacheline_aligned 3727 #define typhoon_post_pci_writes(x) \ 3728 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3729 #define TYPHOON_UDELAY 50 3730 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3731 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3732 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3733 #if defined(NETIF_F_TSO) 3734 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3735 #define TSO_NUM_DESCRIPTORS 2 3736 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3737 #else 3738 #define NETIF_F_TSO 0 3739 #define skb_tso_size(x) 0 3740 #define TSO_NUM_DESCRIPTORS 0 3741 #define TSO_OFFLOAD_ON 0 3742 #endif 3743 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3744 #if 0 3745 #endif 3746 #ifdef CONFIG_PM 3747 #endif 3748 #ifdef CONFIG_PM 3749 #endif 3750 /* LDV_COMMENT_END_PREP */ 3751 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 3752 ldv_handler_precall(); 3753 if(typhoon_init()) 3754 goto ldv_final; 3755 3756 3757 int ldv_s_typhoon_netdev_ops_net_device_ops = 0; 3758 3759 3760 int ldv_s_typhoon_driver_pci_driver = 0; 3761 3762 3763 3764 3765 3766 while( nondet_int() 3767 || !(ldv_s_typhoon_netdev_ops_net_device_ops == 0) 3768 || !(ldv_s_typhoon_driver_pci_driver == 0) 3769 ) { 3770 3771 switch(nondet_int()) { 3772 3773 case 0: { 3774 3775 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 3776 3777 3778 /* content: static int typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/ 3779 /* LDV_COMMENT_BEGIN_PREP */ 3780 #define TXHI_ENTRIES 2 3781 #define TXLO_ENTRIES 128 3782 #define RX_ENTRIES 32 3783 #define COMMAND_ENTRIES 16 3784 #define RESPONSE_ENTRIES 32 3785 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3786 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3787 #define RXFREE_ENTRIES 128 3788 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3789 #define TX_TIMEOUT (2*HZ) 3790 #define PKT_BUF_SZ 1536 3791 #define FIRMWARE_NAME "3com/typhoon.bin" 3792 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3793 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3794 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3795 #undef NETIF_F_TSO 3796 #endif 3797 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3798 #error TX ring too small! 3799 #endif 3800 #define TYPHOON_CRYPTO_NONE 0x00 3801 #define TYPHOON_CRYPTO_DES 0x01 3802 #define TYPHOON_CRYPTO_3DES 0x02 3803 #define TYPHOON_CRYPTO_VARIABLE 0x04 3804 #define TYPHOON_FIBER 0x08 3805 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3806 #define __3xp_aligned ____cacheline_aligned 3807 #define typhoon_post_pci_writes(x) \ 3808 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3809 #define TYPHOON_UDELAY 50 3810 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3811 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3812 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3813 #if defined(NETIF_F_TSO) 3814 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3815 #define TSO_NUM_DESCRIPTORS 2 3816 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3817 #else 3818 #define NETIF_F_TSO 0 3819 #define skb_tso_size(x) 0 3820 #define TSO_NUM_DESCRIPTORS 0 3821 #define TSO_OFFLOAD_ON 0 3822 #endif 3823 /* LDV_COMMENT_END_PREP */ 3824 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_settings" from driver structure with callbacks "typhoon_ethtool_ops" */ 3825 ldv_handler_precall(); 3826 typhoon_get_settings( var_group1, var_group2); 3827 /* LDV_COMMENT_BEGIN_PREP */ 3828 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3829 #if 0 3830 #endif 3831 #ifdef CONFIG_PM 3832 #endif 3833 #ifdef CONFIG_PM 3834 #endif 3835 /* LDV_COMMENT_END_PREP */ 3836 3837 3838 3839 3840 } 3841 3842 break; 3843 case 1: { 3844 3845 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 3846 3847 3848 /* content: static int typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/ 3849 /* LDV_COMMENT_BEGIN_PREP */ 3850 #define TXHI_ENTRIES 2 3851 #define TXLO_ENTRIES 128 3852 #define RX_ENTRIES 32 3853 #define COMMAND_ENTRIES 16 3854 #define RESPONSE_ENTRIES 32 3855 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3856 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3857 #define RXFREE_ENTRIES 128 3858 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3859 #define TX_TIMEOUT (2*HZ) 3860 #define PKT_BUF_SZ 1536 3861 #define FIRMWARE_NAME "3com/typhoon.bin" 3862 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3863 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3864 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3865 #undef NETIF_F_TSO 3866 #endif 3867 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3868 #error TX ring too small! 3869 #endif 3870 #define TYPHOON_CRYPTO_NONE 0x00 3871 #define TYPHOON_CRYPTO_DES 0x01 3872 #define TYPHOON_CRYPTO_3DES 0x02 3873 #define TYPHOON_CRYPTO_VARIABLE 0x04 3874 #define TYPHOON_FIBER 0x08 3875 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3876 #define __3xp_aligned ____cacheline_aligned 3877 #define typhoon_post_pci_writes(x) \ 3878 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3879 #define TYPHOON_UDELAY 50 3880 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3881 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3882 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3883 #if defined(NETIF_F_TSO) 3884 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3885 #define TSO_NUM_DESCRIPTORS 2 3886 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3887 #else 3888 #define NETIF_F_TSO 0 3889 #define skb_tso_size(x) 0 3890 #define TSO_NUM_DESCRIPTORS 0 3891 #define TSO_OFFLOAD_ON 0 3892 #endif 3893 /* LDV_COMMENT_END_PREP */ 3894 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_settings" from driver structure with callbacks "typhoon_ethtool_ops" */ 3895 ldv_handler_precall(); 3896 typhoon_set_settings( var_group1, var_group2); 3897 /* LDV_COMMENT_BEGIN_PREP */ 3898 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3899 #if 0 3900 #endif 3901 #ifdef CONFIG_PM 3902 #endif 3903 #ifdef CONFIG_PM 3904 #endif 3905 /* LDV_COMMENT_END_PREP */ 3906 3907 3908 3909 3910 } 3911 3912 break; 3913 case 2: { 3914 3915 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 3916 3917 3918 /* content: static void typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/ 3919 /* LDV_COMMENT_BEGIN_PREP */ 3920 #define TXHI_ENTRIES 2 3921 #define TXLO_ENTRIES 128 3922 #define RX_ENTRIES 32 3923 #define COMMAND_ENTRIES 16 3924 #define RESPONSE_ENTRIES 32 3925 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3926 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3927 #define RXFREE_ENTRIES 128 3928 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3929 #define TX_TIMEOUT (2*HZ) 3930 #define PKT_BUF_SZ 1536 3931 #define FIRMWARE_NAME "3com/typhoon.bin" 3932 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 3933 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 3934 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 3935 #undef NETIF_F_TSO 3936 #endif 3937 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 3938 #error TX ring too small! 3939 #endif 3940 #define TYPHOON_CRYPTO_NONE 0x00 3941 #define TYPHOON_CRYPTO_DES 0x01 3942 #define TYPHOON_CRYPTO_3DES 0x02 3943 #define TYPHOON_CRYPTO_VARIABLE 0x04 3944 #define TYPHOON_FIBER 0x08 3945 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 3946 #define __3xp_aligned ____cacheline_aligned 3947 #define typhoon_post_pci_writes(x) \ 3948 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 3949 #define TYPHOON_UDELAY 50 3950 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 3951 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 3952 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 3953 #if defined(NETIF_F_TSO) 3954 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 3955 #define TSO_NUM_DESCRIPTORS 2 3956 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 3957 #else 3958 #define NETIF_F_TSO 0 3959 #define skb_tso_size(x) 0 3960 #define TSO_NUM_DESCRIPTORS 0 3961 #define TSO_OFFLOAD_ON 0 3962 #endif 3963 /* LDV_COMMENT_END_PREP */ 3964 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_drvinfo" from driver structure with callbacks "typhoon_ethtool_ops" */ 3965 ldv_handler_precall(); 3966 typhoon_get_drvinfo( var_group1, var_group3); 3967 /* LDV_COMMENT_BEGIN_PREP */ 3968 #define shared_offset(x) offsetof(struct typhoon_shared, x) 3969 #if 0 3970 #endif 3971 #ifdef CONFIG_PM 3972 #endif 3973 #ifdef CONFIG_PM 3974 #endif 3975 /* LDV_COMMENT_END_PREP */ 3976 3977 3978 3979 3980 } 3981 3982 break; 3983 case 3: { 3984 3985 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 3986 3987 3988 /* content: static void typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/ 3989 /* LDV_COMMENT_BEGIN_PREP */ 3990 #define TXHI_ENTRIES 2 3991 #define TXLO_ENTRIES 128 3992 #define RX_ENTRIES 32 3993 #define COMMAND_ENTRIES 16 3994 #define RESPONSE_ENTRIES 32 3995 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 3996 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 3997 #define RXFREE_ENTRIES 128 3998 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 3999 #define TX_TIMEOUT (2*HZ) 4000 #define PKT_BUF_SZ 1536 4001 #define FIRMWARE_NAME "3com/typhoon.bin" 4002 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 4003 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 4004 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 4005 #undef NETIF_F_TSO 4006 #endif 4007 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 4008 #error TX ring too small! 4009 #endif 4010 #define TYPHOON_CRYPTO_NONE 0x00 4011 #define TYPHOON_CRYPTO_DES 0x01 4012 #define TYPHOON_CRYPTO_3DES 0x02 4013 #define TYPHOON_CRYPTO_VARIABLE 0x04 4014 #define TYPHOON_FIBER 0x08 4015 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 4016 #define __3xp_aligned ____cacheline_aligned 4017 #define typhoon_post_pci_writes(x) \ 4018 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 4019 #define TYPHOON_UDELAY 50 4020 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 4021 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 4022 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 4023 #if defined(NETIF_F_TSO) 4024 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 4025 #define TSO_NUM_DESCRIPTORS 2 4026 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 4027 #else 4028 #define NETIF_F_TSO 0 4029 #define skb_tso_size(x) 0 4030 #define TSO_NUM_DESCRIPTORS 0 4031 #define TSO_OFFLOAD_ON 0 4032 #endif 4033 /* LDV_COMMENT_END_PREP */ 4034 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_wol" from driver structure with callbacks "typhoon_ethtool_ops" */ 4035 ldv_handler_precall(); 4036 typhoon_get_wol( var_group1, var_group4); 4037 /* LDV_COMMENT_BEGIN_PREP */ 4038 #define shared_offset(x) offsetof(struct typhoon_shared, x) 4039 #if 0 4040 #endif 4041 #ifdef CONFIG_PM 4042 #endif 4043 #ifdef CONFIG_PM 4044 #endif 4045 /* LDV_COMMENT_END_PREP */ 4046 4047 4048 4049 4050 } 4051 4052 break; 4053 case 4: { 4054 4055 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 4056 4057 4058 /* content: static int typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/ 4059 /* LDV_COMMENT_BEGIN_PREP */ 4060 #define TXHI_ENTRIES 2 4061 #define TXLO_ENTRIES 128 4062 #define RX_ENTRIES 32 4063 #define COMMAND_ENTRIES 16 4064 #define RESPONSE_ENTRIES 32 4065 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 4066 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 4067 #define RXFREE_ENTRIES 128 4068 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 4069 #define TX_TIMEOUT (2*HZ) 4070 #define PKT_BUF_SZ 1536 4071 #define FIRMWARE_NAME "3com/typhoon.bin" 4072 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 4073 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 4074 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 4075 #undef NETIF_F_TSO 4076 #endif 4077 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 4078 #error TX ring too small! 4079 #endif 4080 #define TYPHOON_CRYPTO_NONE 0x00 4081 #define TYPHOON_CRYPTO_DES 0x01 4082 #define TYPHOON_CRYPTO_3DES 0x02 4083 #define TYPHOON_CRYPTO_VARIABLE 0x04 4084 #define TYPHOON_FIBER 0x08 4085 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 4086 #define __3xp_aligned ____cacheline_aligned 4087 #define typhoon_post_pci_writes(x) \ 4088 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 4089 #define TYPHOON_UDELAY 50 4090 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 4091 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 4092 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 4093 #if defined(NETIF_F_TSO) 4094 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 4095 #define TSO_NUM_DESCRIPTORS 2 4096 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 4097 #else 4098 #define NETIF_F_TSO 0 4099 #define skb_tso_size(x) 0 4100 #define TSO_NUM_DESCRIPTORS 0 4101 #define TSO_OFFLOAD_ON 0 4102 #endif 4103 /* LDV_COMMENT_END_PREP */ 4104 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_wol" from driver structure with callbacks "typhoon_ethtool_ops" */ 4105 ldv_handler_precall(); 4106 typhoon_set_wol( var_group1, var_group4); 4107 /* LDV_COMMENT_BEGIN_PREP */ 4108 #define shared_offset(x) offsetof(struct typhoon_shared, x) 4109 #if 0 4110 #endif 4111 #ifdef CONFIG_PM 4112 #endif 4113 #ifdef CONFIG_PM 4114 #endif 4115 /* LDV_COMMENT_END_PREP */ 4116 4117 4118 4119 4120 } 4121 4122 break; 4123 case 5: { 4124 4125 /** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/ 4126 4127 4128 /* content: static void typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)*/ 4129 /* LDV_COMMENT_BEGIN_PREP */ 4130 #define TXHI_ENTRIES 2 4131 #define TXLO_ENTRIES 128 4132 #define RX_ENTRIES 32 4133 #define COMMAND_ENTRIES 16 4134 #define RESPONSE_ENTRIES 32 4135 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 4136 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 4137 #define RXFREE_ENTRIES 128 4138 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 4139 #define TX_TIMEOUT (2*HZ) 4140 #define PKT_BUF_SZ 1536 4141 #define FIRMWARE_NAME "3com/typhoon.bin" 4142 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 4143 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 4144 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 4145 #undef NETIF_F_TSO 4146 #endif 4147 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 4148 #error TX ring too small! 4149 #endif 4150 #define TYPHOON_CRYPTO_NONE 0x00 4151 #define TYPHOON_CRYPTO_DES 0x01 4152 #define TYPHOON_CRYPTO_3DES 0x02 4153 #define TYPHOON_CRYPTO_VARIABLE 0x04 4154 #define TYPHOON_FIBER 0x08 4155 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 4156 #define __3xp_aligned ____cacheline_aligned 4157 #define typhoon_post_pci_writes(x) \ 4158 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) 4159 #define TYPHOON_UDELAY 50 4160 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) 4161 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) 4162 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) 4163 #if defined(NETIF_F_TSO) 4164 #define skb_tso_size(x) (skb_shinfo(x)->gso_size) 4165 #define TSO_NUM_DESCRIPTORS 2 4166 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 4167 #else 4168 #define NETIF_F_TSO 0 4169 #define skb_tso_size(x) 0 4170 #define TSO_NUM_DESCRIPTORS 0 4171 #define TSO_OFFLOAD_ON 0 4172 #endif 4173 /* LDV_COMMENT_END_PREP */ 4174 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_ringparam" from driver structure with callbacks "typhoon_ethtool_ops" */ 4175 ldv_handler_precall(); 4176 typhoon_get_ringparam( var_group1, var_group5); 4177 /* LDV_COMMENT_BEGIN_PREP */ 4178 #define shared_offset(x) offsetof(struct typhoon_shared, x) 4179 #if 0 4180 #endif 4181 #ifdef CONFIG_PM 4182 #endif 4183 #ifdef CONFIG_PM 4184 #endif 4185 /* LDV_COMMENT_END_PREP */ 4186 4187 4188 4189 4190 } 4191 4192 break; 4193 case 6: { 4194 4195 /** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/ 4196 if(ldv_s_typhoon_netdev_ops_net_device_ops==0) { 4197 4198 /* content: static int typhoon_open(struct net_device *dev)*/ 4199 /* LDV_COMMENT_BEGIN_PREP */ 4200 #define TXHI_ENTRIES 2 4201 #define TXLO_ENTRIES 128 4202 #define RX_ENTRIES 32 4203 #define COMMAND_ENTRIES 16 4204 #define RESPONSE_ENTRIES 32 4205 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) 4206 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) 4207 #define RXFREE_ENTRIES 128 4208 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) 4209 #define TX_TIMEOUT (2*HZ) 4210 #define PKT_BUF_SZ 1536 4211 #define FIRMWARE_NAME "3com/typhoon.bin" 4212 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt 4213 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 4214 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO 4215 #undef NETIF_F_TSO 4216 #endif 4217 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) 4218 #error TX ring too small! 4219 #endif 4220 #define TYPHOON_CRYPTO_NONE 0x00 4221 #define TYPHOON_CRYPTO_DES 0x01 4222 #define TYPHOON_CRYPTO_3DES 0x02 4223 #define TYPHOON_CRYPTO_VARIABLE 0x04 4224 #define TYPHOON_FIBER 0x08 4225 #define TYPHOON_WAKEUP_NEEDS_RESET 0