Error Trace

[Home]

Bug # 168

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
19 typedef signed char __s8;
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
28 typedef __u16 __le16;
29 typedef __u16 __be16;
30 typedef __u32 __le32;
31 typedef __u32 __be32;
32 typedef __u64 __le64;
36 typedef __u32 __wsum;
280 struct kernel_symbol { unsigned long value; const char *name; } ;
34 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
108 typedef __u32 uint32_t;
111 typedef __u64 uint64_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
115 typedef void (*ctor_fn_t)();
83 struct ctl_table ;
58 struct device ;
64 struct net_device ;
465 struct file_operations ;
477 struct completion ;
478 struct pt_regs ;
546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
114 struct timespec ;
115 struct compat_timespec ;
116 struct pollfd ;
117 struct __anonstruct_futex_27 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
117 struct __anonstruct_nanosleep_28 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
117 struct __anonstruct_poll_29 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
117 union __anonunion____missing_field_name_26 { struct __anonstruct_futex_27 futex; struct __anonstruct_nanosleep_28 nanosleep; struct __anonstruct_poll_29 poll; } ;
117 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_26 __annonCompField4; } ;
50 struct task_struct ;
39 struct page ;
26 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_32 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_33 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_31 { struct __anonstruct____missing_field_name_32 __annonCompField5; struct __anonstruct____missing_field_name_33 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_31 __annonCompField7; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_34 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_34 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_35 pgd_t;
297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_37 pmd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
445 struct seq_file ;
481 struct thread_struct ;
483 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
83 struct static_key { atomic_t enabled; } ;
23 typedef atomic64_t atomic_long_t;
359 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
233 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField13; struct __anonstruct____missing_field_name_62 __annonCompField14; } ;
26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; union fpregs_state state; } ;
180 struct seq_operations ;
386 struct perf_event ;
391 struct __anonstruct_mm_segment_t_75 { unsigned long seg; } ;
391 typedef struct __anonstruct_mm_segment_t_75 mm_segment_t;
392 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
48 struct thread_info { unsigned long flags; } ;
303 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
10 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
34 struct vm_area_struct ;
15 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
70 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
236 struct pci_dev ;
33 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_141 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_140 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_141 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_140 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_142 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_142 rwlock_t;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_157 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_157 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
28 typedef s64 ktime_t;
109 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
1225 struct completion { unsigned int done; wait_queue_head_t wait; } ;
1144 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_162 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_162 kuid_t;
27 struct __anonstruct_kgid_t_163 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_163 kgid_t;
835 struct nsproxy ;
836 struct ctl_table_root ;
837 struct ctl_table_header ;
838 struct ctl_dir ;
39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
61 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ;
100 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ;
121 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ;
126 struct __anonstruct____missing_field_name_165 { struct ctl_table *ctl_table; int used; int count; int nreg; } ;
126 union __anonunion____missing_field_name_164 { struct __anonstruct____missing_field_name_165 __annonCompField21; struct callback_head rcu; } ;
126 struct ctl_table_set ;
126 struct ctl_table_header { union __anonunion____missing_field_name_164 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ;
147 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ;
153 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ;
158 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool in_dpm_list; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
618 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
97 struct __anonstruct_nodemask_t_166 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_166 nodemask_t;
144 struct pci_bus ;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ;
70 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
38 struct ldt_struct ;
38 struct vdso_image ;
38 struct __anonstruct_mm_context_t_167 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ;
38 typedef struct __anonstruct_mm_context_t_167 mm_context_t;
22 struct bio_vec ;
249 typedef unsigned int isolate_mode_t;
744 struct rw_semaphore ;
745 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
64 struct irq_domain ;
422 union __anonunion____missing_field_name_209 { unsigned long bitmap[1U]; struct callback_head callback_head; } ;
422 struct idr_layer { int prefix; int layer; struct idr_layer *ary[64U]; int count; union __anonunion____missing_field_name_209 __annonCompField33; } ;
40 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
149 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
192 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
229 struct dentry ;
230 struct iattr ;
231 struct super_block ;
232 struct file_system_type ;
233 struct kernfs_open_node ;
234 struct kernfs_iattrs ;
257 struct kernfs_root ;
257 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_218 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_218 __annonCompField34; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct vm_operations_struct ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
286 struct inode ;
511 struct sock ;
512 struct kobject ;
513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
135 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct bin_attribute ;
37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
224 struct proc_dir_entry ;
133 struct exception_table_entry { int insn; int fixup; int handler; } ;
61 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
506 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
37 struct cred ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_236 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_237 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_235 { struct __anonstruct____missing_field_name_236 __annonCompField45; struct __anonstruct____missing_field_name_237 __annonCompField46; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_235 __annonCompField47; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
95 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
111 struct xol_area ;
112 struct uprobes_state { struct xol_area *xol_area; } ;
151 struct address_space ;
152 struct mem_cgroup ;
153 union __anonunion____missing_field_name_238 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
153 union __anonunion____missing_field_name_239 { unsigned long index; void *freelist; } ;
153 struct __anonstruct____missing_field_name_243 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
153 union __anonunion____missing_field_name_242 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_243 __annonCompField50; int units; } ;
153 struct __anonstruct____missing_field_name_241 { union __anonunion____missing_field_name_242 __annonCompField51; atomic_t _refcount; } ;
153 union __anonunion____missing_field_name_240 { unsigned long counters; struct __anonstruct____missing_field_name_241 __annonCompField52; } ;
153 struct dev_pagemap ;
153 struct __anonstruct____missing_field_name_245 { struct page *next; int pages; int pobjects; } ;
153 struct __anonstruct____missing_field_name_246 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
153 struct __anonstruct____missing_field_name_247 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
153 union __anonunion____missing_field_name_244 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_245 __annonCompField54; struct callback_head callback_head; struct __anonstruct____missing_field_name_246 __annonCompField55; struct __anonstruct____missing_field_name_247 __annonCompField56; } ;
153 struct kmem_cache ;
153 union __anonunion____missing_field_name_248 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
153 struct page { unsigned long flags; union __anonunion____missing_field_name_238 __annonCompField48; union __anonunion____missing_field_name_239 __annonCompField49; union __anonunion____missing_field_name_240 __annonCompField53; union __anonunion____missing_field_name_244 __annonCompField57; union __anonunion____missing_field_name_248 __annonCompField58; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_249 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_249 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
560 struct vm_fault ;
614 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 struct kernel_param ;
58 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_254 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_254 __annonCompField59; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
13 typedef unsigned long kernel_ulong_t;
14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ;
187 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
230 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
675 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_311 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_310 { struct __anonstruct____missing_field_name_311 __annonCompField60; } ;
114 struct lockref { union __anonunion____missing_field_name_310 __annonCompField61; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_313 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_312 { struct __anonstruct____missing_field_name_313 __annonCompField62; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_312 __annonCompField63; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_314 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_315 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_314 __annonCompField64; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_315 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 union __anonunion____missing_field_name_316 { struct list_head private_list; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; void *private_data; union __anonunion____missing_field_name_316 __annonCompField65; void *slots[64U]; unsigned long tags[3U][1U]; } ;
105 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
519 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
526 struct pid_namespace ;
526 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ;
144 struct delayed_call { void (*fn)(void *); void *arg; } ;
282 struct backing_dev_info ;
283 struct bdi_writeback ;
285 struct export_operations ;
287 struct iovec ;
288 struct kiocb ;
289 struct pipe_inode_info ;
290 struct poll_table_struct ;
291 struct kstatfs ;
292 struct swap_info_struct ;
293 struct iov_iter ;
294 struct fscrypt_info ;
295 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
210 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_320 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_320 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_321 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_321 __annonCompField66; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
540 struct writeback_control ;
541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
317 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
376 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ;
398 struct request_queue ;
399 struct hd_struct ;
399 struct gendisk ;
399 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
514 struct posix_acl ;
541 struct inode_operations ;
541 union __anonunion____missing_field_name_326 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
541 union __anonunion____missing_field_name_327 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
541 struct file_lock_context ;
541 struct cdev ;
541 union __anonunion____missing_field_name_328 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
541 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_326 __annonCompField67; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_327 __annonCompField68; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_328 __annonCompField69; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
797 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
805 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
828 union __anonunion_f_u_329 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
828 struct file { union __anonunion_f_u_329 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
913 typedef void *fl_owner_t;
914 struct file_lock ;
915 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
921 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
942 struct net ;
948 struct nlm_lockowner ;
949 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_331 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_330 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_331 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_330 fl_u; } ;
1001 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1068 struct files_struct ;
1221 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1256 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1286 struct super_operations ;
1286 struct xattr_handler ;
1286 struct mtd_info ;
1286 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1570 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1583 struct dir_context ;
1608 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1615 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1683 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1753 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
1995 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3167 struct assoc_array_ptr ;
3167 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct user_struct ;
37 struct signal_struct ;
38 struct key_type ;
42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_332 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_333 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_335 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_334 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_335 __annonCompField72; } ;
128 struct __anonstruct____missing_field_name_337 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_336 { union key_payload payload; struct __anonstruct____missing_field_name_337 __annonCompField74; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_332 __annonCompField70; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_333 __annonCompField71; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_334 __annonCompField73; union __anonunion____missing_field_name_336 __annonCompField75; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ;
85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
368 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
26 struct sem_undo_list ;
26 struct sysv_sem { struct sem_undo_list *undo_list; } ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_338 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_338 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
38 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_340 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_341 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_342 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_343 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_346 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_345 { struct __anonstruct__addr_bnd_346 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_344 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_345 __annonCompField76; } ;
11 struct __anonstruct__sigpoll_347 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_348 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_339 { int _pad[28U]; struct __anonstruct__kill_340 _kill; struct __anonstruct__timer_341 _timer; struct __anonstruct__rt_342 _rt; struct __anonstruct__sigchld_343 _sigchld; struct __anonstruct__sigfault_344 _sigfault; struct __anonstruct__sigpoll_347 _sigpoll; struct __anonstruct__sigsys_348 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_339 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
274 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
288 struct k_sigaction { struct sigaction sa; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
607 struct cgroup ;
608 struct sk_buff ;
14 struct bpf_prog ;
14 struct cgroup_bpf { struct bpf_prog *prog[3U]; struct bpf_prog *effective[3U]; } ;
44 struct cgroup_root ;
45 struct cgroup_subsys ;
46 struct cgroup_taskset ;
90 struct cgroup_file { struct kernfs_node *kn; } ;
91 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
142 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
222 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct cgroup_bpf bpf; int ancestor_ids[]; } ;
310 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
349 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
434 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
134 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
515 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
563 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
571 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
578 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
603 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
619 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
641 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
686 struct autogroup ;
687 struct tty_struct ;
687 struct taskstats ;
687 struct tty_audit_buf ;
687 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ;
863 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
908 struct reclaim_state ;
909 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
924 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
981 struct wake_q_node { struct wake_q_node *next; } ;
1226 struct io_context ;
1260 struct uts_namespace ;
1261 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1269 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1327 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1362 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1399 struct rt_rq ;
1399 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1417 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1481 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1500 struct sched_class ;
1500 struct compat_robust_list_head ;
1500 struct numa_group ;
1500 struct kcov ;
1500 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; int closid; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct device_node ;
31 struct fwnode_handle ;
32 struct iommu_ops ;
33 struct iommu_group ;
34 struct iommu_fwspec ;
62 struct device_attribute ;
62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
143 struct device_type ;
202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
358 struct class_attribute ;
358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
453 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
523 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
551 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
723 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
786 enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3 } ;
793 struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; } ;
813 struct dma_coherent_mem ;
813 struct cma ;
813 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ;
971 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
70 struct hotplug_slot ;
70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ;
108 typedef int pci_power_t;
135 typedef unsigned int pci_channel_state_t;
136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ;
161 typedef unsigned short pci_dev_flags_t;
188 typedef unsigned short pci_bus_flags_t;
246 struct pcie_link_state ;
247 struct pci_vpd ;
248 struct pci_sriov ;
250 struct pci_driver ;
250 union __anonunion____missing_field_name_388 { struct pci_sriov *sriov; struct pci_dev *physfn; } ;
250 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u16 aer_cap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned char hotplug_user_indicators; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; unsigned char ptm_root; unsigned char ptm_enabled; u8 ptm_granularity; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_388 __annonCompField89; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ;
419 struct pci_ops ;
419 struct msi_controller ;
482 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ;
606 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;
636 struct pci_dynids { spinlock_t lock; struct list_head list; } ;
650 typedef unsigned int pci_ers_result_t;
660 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ;
693 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ;
273 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
322 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
1322 struct kvec ;
2439 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
56 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
29 union __anonunion____missing_field_name_399 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; struct pipe_inode_info *pipe; } ;
29 union __anonunion____missing_field_name_400 { unsigned long nr_segs; int idx; } ;
29 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_399 __annonCompField90; union __anonunion____missing_field_name_400 __annonCompField91; } ;
1426 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ;
11 typedef unsigned short __kernel_sa_family_t;
23 typedef __kernel_sa_family_t sa_family_t;
24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ;
43 struct __anonstruct_sync_serial_settings_402 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ;
43 typedef struct __anonstruct_sync_serial_settings_402 sync_serial_settings;
50 struct __anonstruct_te1_settings_403 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ;
50 typedef struct __anonstruct_te1_settings_403 te1_settings;
55 struct __anonstruct_raw_hdlc_proto_404 { unsigned short encoding; unsigned short parity; } ;
55 typedef struct __anonstruct_raw_hdlc_proto_404 raw_hdlc_proto;
65 struct __anonstruct_fr_proto_405 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ;
65 typedef struct __anonstruct_fr_proto_405 fr_proto;
69 struct __anonstruct_fr_proto_pvc_406 { unsigned int dlci; } ;
69 typedef struct __anonstruct_fr_proto_pvc_406 fr_proto_pvc;
74 struct __anonstruct_fr_proto_pvc_info_407 { unsigned int dlci; char master[16U]; } ;
74 typedef struct __anonstruct_fr_proto_pvc_info_407 fr_proto_pvc_info;
79 struct __anonstruct_cisco_proto_408 { unsigned int interval; unsigned int timeout; } ;
79 typedef struct __anonstruct_cisco_proto_408 cisco_proto;
117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ;
197 union __anonunion_ifs_ifsu_409 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ;
197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_409 ifs_ifsu; } ;
216 union __anonunion_ifr_ifrn_410 { char ifrn_name[16U]; } ;
216 union __anonunion_ifr_ifru_411 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ;
216 struct ifreq { union __anonunion_ifr_ifrn_410 ifr_ifrn; union __anonunion_ifr_ifru_411 ifr_ifru; } ;
18 typedef s32 compat_time_t;
39 typedef s32 compat_long_t;
45 typedef u32 compat_uptr_t;
46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ;
278 struct compat_robust_list { compat_uptr_t next; } ;
282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ;
161 struct in6_addr ;
15 typedef u64 netdev_features_t;
70 union __anonunion_in6_u_437 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ;
70 struct in6_addr { union __anonunion_in6_u_437 in6_u; } ;
46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ;
227 struct pipe_buf_operations ;
227 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ;
27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ;
63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;
272 struct napi_struct ;
273 struct nf_conntrack { atomic_t use; } ;
254 union __anonunion____missing_field_name_451 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ;
254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_451 __annonCompField100; } ;
278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ;
310 struct skb_frag_struct ;
310 typedef struct skb_frag_struct skb_frag_t;
311 struct __anonstruct_page_452 { struct page *p; } ;
311 struct skb_frag_struct { struct __anonstruct_page_452 page; __u32 page_offset; __u32 size; } ;
344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ;
410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ;
500 typedef unsigned int sk_buff_data_t;
501 struct __anonstruct____missing_field_name_454 { u32 stamp_us; u32 stamp_jiffies; } ;
501 union __anonunion____missing_field_name_453 { u64 v64; struct __anonstruct____missing_field_name_454 __annonCompField101; } ;
501 struct skb_mstamp { union __anonunion____missing_field_name_453 __annonCompField102; } ;
564 union __anonunion____missing_field_name_457 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ;
564 struct __anonstruct____missing_field_name_456 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_457 __annonCompField103; } ;
564 union __anonunion____missing_field_name_455 { struct __anonstruct____missing_field_name_456 __annonCompField104; struct rb_node rbnode; } ;
564 union __anonunion____missing_field_name_458 { struct net_device *dev; unsigned long dev_scratch; } ;
564 struct sec_path ;
564 struct __anonstruct____missing_field_name_460 { __u16 csum_start; __u16 csum_offset; } ;
564 union __anonunion____missing_field_name_459 { __wsum csum; struct __anonstruct____missing_field_name_460 __annonCompField107; } ;
564 union __anonunion____missing_field_name_461 { unsigned int napi_id; unsigned int sender_cpu; } ;
564 union __anonunion____missing_field_name_462 { __u32 mark; __u32 reserved_tailroom; } ;
564 union __anonunion____missing_field_name_463 { __be16 inner_protocol; __u8 inner_ipproto; } ;
564 struct sk_buff { union __anonunion____missing_field_name_455 __annonCompField105; struct sock *sk; union __anonunion____missing_field_name_458 __annonCompField106; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0U]; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; unsigned char __unused; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; unsigned char offload_fwd_mark; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_459 __annonCompField108; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_461 __annonCompField109; __u32 secmark; union __anonunion____missing_field_name_462 __annonCompField110; union __anonunion____missing_field_name_463 __annonCompField111; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ;
845 struct dst_entry ;
39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ;
130 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ;
194 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ;
238 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ;
256 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ;
285 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ;
311 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ;
340 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ;
357 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ;
456 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ;
493 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ;
521 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ;
627 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ;
659 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ;
701 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ;
734 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ;
750 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ;
770 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ;
788 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ;
804 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ;
820 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ;
837 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ;
856 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ;
906 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ;
1077 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ;
1085 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ;
1161 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ;
1537 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ;
39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ;
97 struct __anonstruct_link_modes_467 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ;
97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_467 link_modes; } ;
158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;
375 struct prot_inuse ;
376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ;
38 struct u64_stats_sync { } ;
164 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ;
61 struct icmp_mib { unsigned long mibs[28U]; } ;
67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ;
72 struct icmpv6_mib { unsigned long mibs[6U]; } ;
83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ;
93 struct tcp_mib { unsigned long mibs[16U]; } ;
100 struct udp_mib { unsigned long mibs[9U]; } ;
106 struct linux_mib { unsigned long mibs[118U]; } ;
112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ;
118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ;
26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ;
12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ;
14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ;
187 struct ipv4_devconf ;
188 struct fib_rules_ops ;
189 struct fib_table ;
190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ;
24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ;
29 struct inet_peer_base ;
29 struct xt_table ;
29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; unsigned int fib_seq; atomic_t rt_genid; } ;
144 struct neighbour ;
144 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ;
73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ;
40 struct ipv6_devconf ;
40 struct rt6_info ;
40 struct rt6_statistics ;
40 struct fib6_table ;
40 struct seg6_pernet_data ;
40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; } ;
90 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ;
96 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ;
14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ;
20 struct sctp_mib ;
21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ;
141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ;
79 struct nf_logger ;
80 struct nf_queue_handler ;
81 struct nf_hook_entry ;
81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entry *hooks[13U][8U]; bool defrag_ipv4; bool defrag_ipv6; } ;
26 struct ebt_table ;
27 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ;
19 struct hlist_nulls_node ;
19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ;
23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ;
114 struct ip_conntrack_stat { unsigned int found; unsigned int invalid; unsigned int ignore; unsigned int insert; unsigned int insert_failed; unsigned int drop; unsigned int early_drop; unsigned int error; unsigned int expect_new; unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; } ;
13 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; unsigned int users; } ;
27 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ;
32 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ;
46 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
51 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ;
56 struct nf_dccp_net { struct nf_proto_net pn; int dccp_loose; unsigned int dccp_timeout[10U]; } ;
63 struct nf_sctp_net { struct nf_proto_net pn; unsigned int timeouts[10U]; } ;
76 struct nf_udplite_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ;
83 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct nf_dccp_net dccp; struct nf_sctp_net sctp; struct nf_udplite_net udplite; } ;
100 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ;
107 struct nf_ct_event_notifier ;
107 struct nf_exp_event_notifier ;
107 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; } ;
138 struct nft_af_info ;
139 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ;
509 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ;
16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct hlist_node node; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ;
25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ;
21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ;
30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ;
87 struct mpls_route ;
88 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ;
16 struct proc_ns_operations ;
17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ;
11 struct net_generic ;
12 struct netns_ipvs ;
13 struct ucounts ;
13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; struct ucounts *ucounts; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ;
248 struct __anonstruct_possible_net_t_479 { struct net *net; } ;
248 typedef struct __anonstruct_possible_net_t_479 possible_net_t;
383 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_ACPI_STATIC = 4, FWNODE_PDATA = 5, FWNODE_IRQCHIP = 6 } ;
393 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
32 typedef u32 phandle;
34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ;
44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ;
1290 struct mii_ioctl_data { __u16 phy_id; __u16 reg_num; __u16 val_in; __u16 val_out; } ;
161 struct mii_if_info { int phy_id; int advertising; int phy_id_mask; int reg_num_mask; unsigned char full_duplex; unsigned char force_media; unsigned char supports_gmii; struct net_device *dev; int (*mdio_read)(struct net_device *, int, int); void (*mdio_write)(struct net_device *, int, int, int); } ;
296 struct mii_bus ;
303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ;
41 struct mdio_driver_common { struct device_driver driver; int flags; } ;
244 struct phy_device ;
245 enum led_brightness { LED_OFF = 0, LED_HALF = 127, LED_FULL = 255 } ;
251 struct led_trigger ;
251 struct led_classdev { const char *name; enum led_brightness brightness; enum led_brightness max_brightness; int flags; unsigned long work_flags; void (*brightness_set)(struct led_classdev *, enum led_brightness ); int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness ); enum led_brightness (*brightness_get)(struct led_classdev *); int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); struct device *dev; const struct attribute_group **groups; struct list_head node; const char *default_trigger; unsigned long blink_delay_on; unsigned long blink_delay_off; struct timer_list blink_timer; int blink_brightness; int new_blink_brightness; void (*flash_resume)(struct led_classdev *); struct work_struct set_brightness_work; int delayed_set_value; struct rw_semaphore trigger_lock; struct led_trigger *trigger; struct list_head trig_list; void *trigger_data; bool activated; struct mutex led_access; } ;
226 struct led_trigger { const char *name; void (*activate)(struct led_classdev *); void (*deactivate)(struct led_classdev *); rwlock_t leddev_list_lock; struct list_head led_cdevs; struct list_head next_trig; } ;
418 struct phy_led_trigger { struct led_trigger trigger; char name[31U]; unsigned int speed; } ;
39 enum ldv_32668 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_TRGMII = 16, PHY_INTERFACE_MODE_MAX = 17 } ;
86 typedef enum ldv_32668 phy_interface_t;
149 enum ldv_32721 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ;
156 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_32721 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ;
237 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ;
252 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ;
345 struct phy_driver ;
345 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; u32 eee_broken_modes; int autoneg; int link_timeout; struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; struct phy_led_trigger *last_triggered; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; u8 mdix_ctrl; void (*adjust_link)(struct net_device *); } ;
457 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); } ;
884 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ;
27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_PROTO_QCA = 5, DSA_TAG_LAST = 6 } ;
37 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ;
71 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ;
87 struct packet_type ;
88 struct dsa_switch ;
88 struct dsa_device_ops ;
88 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ;
141 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; } ;
148 struct dsa_switch_ops ;
148 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_ops *ops; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ;
235 struct switchdev_trans ;
236 struct switchdev_obj ;
237 struct switchdev_obj_port_fdb ;
238 struct switchdev_obj_port_mdb ;
239 struct switchdev_obj_port_vlan ;
240 struct dsa_switch_ops { struct list_head list; const char * (*probe)(struct device *, struct device *, int, void **); enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); void (*port_fast_age)(struct dsa_switch *, int); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); } ;
407 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ;
69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ;
87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ;
132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ;
144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ;
164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ;
187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ;
202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ;
236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ;
40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;
105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ;
58 struct mnt_namespace ;
59 struct ipc_namespace ;
60 struct cgroup_namespace ;
61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ;
86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ;
19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ;
31 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; int ucount_max[7U]; } ;
63 struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_t ucount[7U]; } ;
631 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; } ;
686 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ;
41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ;
143 struct nlattr { __u16 nla_len; __u16 nla_type; } ;
105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ;
183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ;
41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ;
869 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ;
16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; } ;
117 struct netpoll_info ;
118 struct wireless_dev ;
119 struct wpan_dev ;
120 struct mpls_dev ;
121 struct udp_tunnel_info ;
70 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ;
113 typedef enum netdev_tx netdev_tx_t;
132 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ;
196 struct neigh_parms ;
197 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ;
217 struct netdev_hw_addr_list { struct list_head list; int count; } ;
222 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ;
251 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ;
302 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ;
357 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ;
405 typedef enum rx_handler_result rx_handler_result_t;
406 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
541 struct Qdisc ;
541 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ;
612 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ;
624 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ;
636 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ;
688 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ;
711 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ;
724 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ;
735 struct netdev_tc_txq { u16 count; u16 offset; } ;
746 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ;
762 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ;
790 struct tc_cls_u32_offload ;
791 struct tc_cls_flower_offload ;
791 struct tc_cls_matchall_offload ;
791 struct tc_cls_bpf_offload ;
791 union __anonunion____missing_field_name_492 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; } ;
791 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_492 __annonCompField115; bool egress_dev; } ;
808 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ;
813 union __anonunion____missing_field_name_493 { struct bpf_prog *prog; bool prog_attached; } ;
813 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_493 __annonCompField116; } ;
836 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(const struct net_device *, int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;
1372 struct __anonstruct_adj_list_494 { struct list_head upper; struct list_head lower; } ;
1372 struct iw_handler_def ;
1372 struct iw_public_data ;
1372 struct switchdev_ops ;
1372 struct l3mdev_ops ;
1372 struct ndisc_ops ;
1372 struct vlan_info ;
1372 struct tipc_bearer ;
1372 struct in_device ;
1372 struct dn_dev ;
1372 struct inet6_dev ;
1372 struct tcf_proto ;
1372 struct cpu_rmap ;
1372 struct pcpu_lstats ;
1372 struct pcpu_sw_netstats ;
1372 struct pcpu_dstats ;
1372 struct pcpu_vstats ;
1372 union __anonunion____missing_field_name_495 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ;
1372 struct garp_port ;
1372 struct mrp_port ;
1372 struct rtnl_link_ops ;
1372 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_494 adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned int min_mtu; unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct nf_hook_entry *nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; struct hlist_head qdisc_hash[16U]; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_495 __annonCompField117; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ;
2194 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ;
2222 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ;
3168 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ;
644 struct firmware { size_t size; const u8 *data; struct page **pages; void *priv; } ;
294 struct chip_info { const char *name; int drv_flags; } ;
497 struct starfire_rx_desc { __le64 rxaddr; } ;
466 struct full_rx_done_desc { __le32 status; __le16 status3; __le16 status2; __le16 vlanid; __le16 csum; __le32 timestamp; } ;
476 typedef struct full_rx_done_desc rx_done_desc;
492 struct starfire_tx_desc_2 { __le32 status; __le32 reserved; __le64 addr; } ;
501 typedef struct starfire_tx_desc_2 starfire_tx_desc;
510 struct tx_done_desc { __le32 status; } ;
517 struct rx_ring_info { struct sk_buff *skb; dma_addr_t mapping; } ;
525 struct tx_ring_info { struct sk_buff *skb; dma_addr_t mapping; unsigned int used_slots; } ;
530 struct netdev_private { struct starfire_rx_desc *rx_ring; starfire_tx_desc *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; struct rx_ring_info rx_info[256U]; struct tx_ring_info tx_info[32U]; rx_done_desc *rx_done_q; dma_addr_t rx_done_q_dma; unsigned int rx_done; struct tx_done_desc *tx_done_q; dma_addr_t tx_done_q_dma; unsigned int tx_done; struct napi_struct napi; struct net_device *dev; struct pci_dev *pci_dev; unsigned long active_vlans[64U]; void *queue_mem; dma_addr_t queue_mem_dma; size_t queue_mem_size; spinlock_t lock; unsigned int cur_rx; unsigned int dirty_rx; unsigned int cur_tx; unsigned int dirty_tx; unsigned int reap_tx; unsigned int rx_buf_sz; int speed100; u32 tx_mode; u32 intr_timer_ctrl; u8 tx_threshold; struct mii_if_info mii_if; int phy_cnt; unsigned char phys[2U]; void *base; } ;
1 long int __builtin_expect(long, long);
34 extern struct module __this_module;
72 void set_bit(long nr, volatile unsigned long *addr);
110 void clear_bit(long nr, volatile unsigned long *addr);
204 bool test_and_set_bit(long nr, volatile unsigned long *addr);
308 bool constant_test_bit(long nr, const volatile unsigned long *addr);
14 unsigned long int find_next_bit(const unsigned long *, unsigned long, unsigned long);
42 unsigned long int find_first_bit(const unsigned long *, unsigned long);
7 __u32 __arch_swab32(__u32 val);
46 __u16 __fswab16(__u16 val);
55 __u32 __fswab32(__u32 val);
173 __u32 __swab32p(const __u32 *p);
79 __u32 __be32_to_cpup(const __be32 *p);
178 int printk(const char *, ...);
8 void ldv_dma_map_page();
7 extern unsigned long page_offset_base;
9 extern unsigned long vmemmap_base;
23 unsigned long int __phys_addr(unsigned long);
32 void * __memcpy(void *, const void *, size_t );
57 void * __memset(void *, int, size_t );
27 size_t strlcpy(char *, const char *, size_t );
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
31 void _raw_spin_lock_irq(raw_spinlock_t *);
41 void _raw_spin_unlock(raw_spinlock_t *);
43 void _raw_spin_unlock_irq(raw_spinlock_t *);
289 raw_spinlock_t * spinlock_check(spinlock_t *lock);
300 void spin_lock(spinlock_t *lock);
330 void spin_lock_irq(spinlock_t *lock);
345 void spin_unlock(spinlock_t *lock);
355 void spin_unlock_irq(spinlock_t *lock);
78 extern volatile unsigned long jiffies;
56 unsigned char readb(const volatile void *addr);
57 unsigned short int readw(const volatile void *addr);
58 unsigned int readl(const volatile void *addr);
64 void writeb(unsigned char val, volatile void *addr);
65 void writew(unsigned short val, volatile void *addr);
66 void writel(unsigned int val, volatile void *addr);
181 void * ioremap_nocache(resource_size_t , unsigned long);
192 void * ioremap(resource_size_t offset, unsigned long size);
197 void iounmap(volatile void *);
87 const char * kobject_name(const struct kobject *kobj);
139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev);
158 void free_irq(unsigned int, void *);
974 const char * dev_name(const struct device *dev);
1021 void * dev_get_drvdata(const struct device *dev);
1026 void dev_set_drvdata(struct device *dev, void *data);
1248 void dev_err(const struct device *, const char *, ...);
1015 int pci_enable_device(struct pci_dev *);
1032 void pci_disable_device(struct pci_dev *);
1035 void pci_set_master(struct pci_dev *);
1042 int pci_try_set_mwi(struct pci_dev *);
1088 int pci_save_state(struct pci_dev *);
1089 void pci_restore_state(struct pci_dev *);
1102 int pci_set_power_state(struct pci_dev *, pci_power_t );
1103 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t );
1161 int pci_request_regions(struct pci_dev *, const char *);
1163 void pci_release_regions(struct pci_dev *);
1216 int __pci_register_driver(struct pci_driver *, struct module *, const char *);
1225 void pci_unregister_driver(struct pci_driver *);
992 void * lowmem_page_address(const struct page *page);
1661 void * pci_get_drvdata(struct pci_dev *pdev);
1666 void pci_set_drvdata(struct pci_dev *pdev, void *data);
1674 const char * pci_name(const struct pci_dev *pdev);
37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );
44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
66 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int);
70 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int);
131 void kmemcheck_mark_initialized(void *address, unsigned int n);
136 int valid_dma_direction(int dma_direction);
28 extern struct dma_map_ops *dma_ops;
30 struct dma_map_ops * get_dma_ops(struct device *dev);
42 bool arch_dma_alloc_attrs(struct device **, gfp_t *);
180 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
180 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);
203 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);
315 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
327 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
456 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
497 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
503 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);
16 void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
31 void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
38 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
44 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
79 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
86 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
10 void __const_udelay(unsigned long);
325 unsigned int skb_frag_size(const skb_frag_t *frag);
911 void consume_skb(struct sk_buff *);
999 int skb_pad(struct sk_buff *, int);
1198 unsigned char * skb_end_pointer(const struct sk_buff *skb);
1804 unsigned int skb_headlen(const struct sk_buff *skb);
1920 unsigned char * skb_put(struct sk_buff *, unsigned int);
2023 void skb_reserve(struct sk_buff *skb, int len);
2429 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );
2445 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);
2565 struct page * skb_frag_page(const skb_frag_t *frag);
2623 void * skb_frag_address(const skb_frag_t *frag);
2783 int skb_padto(struct sk_buff *skb, unsigned int len);
3175 void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len);
31 int mii_link_ok(struct mii_if_info *);
32 int mii_nway_restart(struct mii_if_info *);
34 int mii_ethtool_get_link_ksettings(struct mii_if_info *, struct ethtool_link_ksettings *);
37 int mii_ethtool_set_link_ksettings(struct mii_if_info *, const struct ethtool_link_ksettings *);
44 int generic_mii_ioctl(struct mii_if_info *, struct mii_ioctl_data *, int, unsigned int *);
49 struct mii_ioctl_data * if_mii(struct ifreq *rq);
408 void __napi_schedule(struct napi_struct *);
411 bool napi_disable_pending(struct napi_struct *n);
425 bool napi_schedule_prep(struct napi_struct *n);
467 bool napi_complete_done(struct napi_struct *, int);
476 bool napi_complete(struct napi_struct *n);
502 void napi_disable(struct napi_struct *);
511 void napi_enable(struct napi_struct *n);
1936 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);
2031 void * netdev_priv(const struct net_device *dev);
2062 void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int);
2434 void free_netdev(struct net_device *);
2790 void netif_tx_start_queue(struct netdev_queue *dev_queue);
2801 void netif_start_queue(struct net_device *dev);
2816 void netif_tx_wake_queue(struct netdev_queue *);
2825 void netif_wake_queue(struct net_device *dev);
2840 void netif_tx_stop_queue(struct netdev_queue *dev_queue);
2852 void netif_stop_queue(struct net_device *dev);
2859 bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue);
2870 bool netif_queue_stopped(const struct net_device *dev);
3044 bool netif_running(const struct net_device *dev);
3174 void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );
3196 void dev_kfree_skb_irq(struct sk_buff *skb);
3218 int netif_receive_skb(struct sk_buff *);
3339 void netif_carrier_on(struct net_device *);
3341 void netif_carrier_off(struct net_device *);
3408 void netif_device_detach(struct net_device *);
3410 void netif_device_attach(struct net_device *);
3511 void netif_trans_update(struct net_device *dev);
3667 int register_netdev(struct net_device *);
3668 void unregister_netdev(struct net_device *);
36 __be16 eth_type_trans(struct sk_buff *, struct net_device *);
48 int eth_mac_addr(struct net_device *, void *);
50 int eth_validate_addr(struct net_device *);
52 struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);
11 u32 crc32_le(u32 , const unsigned char *, size_t );
411 void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
42 int request_firmware(const struct firmware **, const char *, struct device *);
53 void release_firmware(const struct firmware *);
78 int intr_latency = 0;
79 int small_frames = 0;
81 int debug = 1;
82 int max_interrupt_work = 20;
83 int mtu = 0;
86 const int multicast_filter_limit = 512;
88 int enable_hw_cksum = 1;
108 int rx_copybreak = 0;
170 const char version[137U] = { '\xe', 's', 't', 'a', 'r', 'f', 'i', 'r', 'e', '.', 'c', ':', 'v', '1', '.', '0', '3', ' ', '7', '/', '2', '6', '/', '2', '0', '0', '0', ' ', ' ', 'W', 'r', 'i', 't', 't', 'e', 'n', ' ', 'b', 'y', ' ', 'D', 'o', 'n', 'a', 'l', 'd', ' ', 'B', 'e', 'c', 'k', 'e', 'r', ' ', '<', 'b', 'e', 'c', 'k', 'e', 'r', '@', 's', 'c', 'y', 'l', 'd', '.', 'c', 'o', 'm', '>', '\xa', ' ', '(', 'u', 'n', 'o', 'f', 'f', 'i', 'c', 'i', 'a', 'l', ' ', '2', '.', '2', '/', '2', '.', '4', ' ', 'k', 'e', 'r', 'n', 'e', 'l', ' ', 'p', 'o', 'r', 't', ',', ' ', 'v', 'e', 'r', 's', 'i', 'o', 'n', ' ', '2', '.', '1', ',', ' ', 'J', 'u', 'l', 'y', ' ', ' ', '6', ',', ' ', '2', '0', '0', '8', ')', '\xa', '\x0' };
289 const struct pci_device_id starfire_pci_tbl[2U] = { { 36868U, 26901U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };
293 const struct pci_device_id __mod_pci__starfire_pci_tbl_device_table[2U] = { };
299 const struct chip_info netdrv_tbl[1U] = { { "Adaptec Starfire 6915", 1 } };
576 int mdio_read(struct net_device *dev, int phy_id, int location);
577 void mdio_write(struct net_device *dev, int phy_id, int location, int value);
578 int netdev_open(struct net_device *dev);
579 void check_duplex(struct net_device *dev);
580 void tx_timeout(struct net_device *dev);
581 void init_ring(struct net_device *dev);
582 netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
583 irqreturn_t intr_handler(int irq, void *dev_instance);
584 void netdev_error(struct net_device *dev, int intr_status);
585 int __netdev_rx(struct net_device *dev, int *quota);
586 int netdev_poll(struct napi_struct *napi, int budget);
587 void refill_rx_ring(struct net_device *dev);
589 void set_rx_mode(struct net_device *dev);
590 struct net_device_stats * get_stats(struct net_device *dev);
591 int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
592 int netdev_close(struct net_device *dev);
593 void netdev_media_change(struct net_device *dev);
594 const struct ethtool_ops ethtool_ops;
598 int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
613 int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
630 const struct net_device_ops netdev_ops = { 0, 0, &netdev_open, &netdev_close, &start_tx, 0, 0, 0, &set_rx_mode, ð_mac_addr, ð_validate_addr, &netdev_ioctl, 0, 0, 0, &tx_timeout, 0, 0, 0, &get_stats, &netdev_vlan_rx_add_vid, &netdev_vlan_rx_kill_vid, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
646 int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1713 u32 set_vlan_mode(struct netdev_private *np);
1805 int check_if_running(struct net_device *dev);
1812 void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
1820 int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd);
1830 int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd);
1842 int nway_reset(struct net_device *dev);
1848 u32 get_link(struct net_device *dev);
1854 u32 get_msglevel(struct net_device *dev);
1859 void set_msglevel(struct net_device *dev, u32 val);
1864 const struct ethtool_ops ethtool_ops = { 0, 0, &get_drvinfo, 0, 0, 0, 0, &get_msglevel, &set_msglevel, &nway_reset, &get_link, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &check_if_running, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &get_link_ksettings, &set_link_ksettings };
1964 int starfire_suspend(struct pci_dev *pdev, pm_message_t state);
1979 int starfire_resume(struct pci_dev *pdev);
1996 void starfire_remove_one(struct pci_dev *pdev);
2020 struct pci_driver starfire_driver = { { 0, 0 }, "starfire", (const struct pci_device_id *)(&starfire_pci_tbl), &starfire_init_one, &starfire_remove_one, &starfire_suspend, 0, 0, &starfire_resume, 0, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };
2032 int starfire_init();
2047 void starfire_cleanup();
2080 void ldv_check_final_state();
2083 void ldv_check_return_value(int);
2086 void ldv_check_return_value_probe(int);
2089 void ldv_initialize();
2092 void ldv_handler_precall();
2095 int nondet_int();
2098 int LDV_IN_INTERRUPT = 0;
2101 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
7 bool ldv_is_err(const void *ptr);
14 void * ldv_err_ptr(long error);
21 long int ldv_ptr_err(const void *ptr);
28 bool ldv_is_err_or_null(const void *ptr);
5 int LDV_DMA_MAP_CALLS = 0;
16 void ldv_dma_mapping_error();
return ;
}
-entry_point
{
2103 struct net_device *var_group1;
2104 int res_netdev_open_5;
2105 int res_netdev_close_28;
2106 struct sk_buff *var_group2;
2107 struct ifreq *var_group3;
2108 int var_netdev_ioctl_27_p2;
2109 unsigned short var_netdev_vlan_rx_add_vid_0_p1;
2110 unsigned short var_netdev_vlan_rx_add_vid_0_p2;
2111 unsigned short var_netdev_vlan_rx_kill_vid_1_p1;
2112 unsigned short var_netdev_vlan_rx_kill_vid_1_p2;
2113 struct ethtool_drvinfo *var_group4;
2114 unsigned int var_set_msglevel_26_p1;
2115 struct ethtool_link_ksettings *var_group5;
2116 const struct ethtool_link_ksettings *var_set_link_ksettings_22_p1;
2117 struct pci_dev *var_group6;
2118 const struct pci_device_id *var_starfire_init_one_2_p1;
2119 int res_starfire_init_one_2;
2120 struct pm_message var_starfire_suspend_29_p1;
2121 int var_intr_handler_10_p0;
2122 void *var_intr_handler_10_p1;
2123 int ldv_s_netdev_ops_net_device_ops;
2124 int ldv_s_starfire_driver_pci_driver;
2125 int tmp;
2126 int tmp___0;
2127 int tmp___1;
4664 ldv_s_netdev_ops_net_device_ops = 0;
4669 ldv_s_starfire_driver_pci_driver = 0;
4543 LDV_IN_INTERRUPT = 1;
4552 ldv_initialize() { /* Function call is skipped due to function is undefined */}
4661 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
4662 -starfire_init()
{
2034 int tmp;
2036 printk((const char *)(&version)) { /* Function call is skipped due to function is undefined */}
2038 printk("\016starfire: polling (NAPI) enabled\n") { /* Function call is skipped due to function is undefined */}
2043 tmp = __pci_register_driver(&starfire_driver, &__this_module, "starfire") { /* Function call is skipped due to function is undefined */}
2043 return tmp;;
}
4662 assume(!(tmp != 0));
4675 goto ldv_52857;
4675 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
4675 assume(tmp___1 != 0);
4679 goto ldv_52856;
4676 ldv_52856:;
4680 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
4680 switch (tmp___0);
4681 assume(!(tmp___0 == 0));
4807 assume(!(tmp___0 == 1));
4934 assume(!(tmp___0 == 2));
5056 assume(!(tmp___0 == 3));
5180 assume(!(tmp___0 == 4));
5304 assume(!(tmp___0 == 5));
5426 assume(!(tmp___0 == 6));
5550 assume(!(tmp___0 == 7));
5674 assume(!(tmp___0 == 8));
5798 assume(!(tmp___0 == 9));
5922 assume(!(tmp___0 == 10));
6046 assume(!(tmp___0 == 11));
6170 assume(!(tmp___0 == 12));
6294 assume(!(tmp___0 == 13));
6418 assume(!(tmp___0 == 14));
6542 assume(!(tmp___0 == 15));
6666 assume(!(tmp___0 == 16));
6790 assume(!(tmp___0 == 17));
6907 assume(!(tmp___0 == 18));
7031 assume(!(tmp___0 == 19));
7155 assume(tmp___0 == 20);
7263 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
7264 -starfire_resume(var_group6)
{
1981 struct net_device *dev;
1982 void *tmp;
1983 _Bool tmp___0;
1981 -pci_get_drvdata(pdev)
{
1663 void *tmp;
1663 -dev_get_drvdata((const struct device *)(&(pdev->dev)))
{
1023 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
1023 return __CPAchecker_TMP_0;;
}
1663 return tmp;;
}
1981 dev = (struct net_device *)tmp;
1983 pci_set_power_state(pdev, 0) { /* Function call is skipped due to function is undefined */}
1984 pci_restore_state(pdev) { /* Function call is skipped due to function is undefined */}
1986 -netif_running((const struct net_device *)dev)
{
3046 _Bool tmp;
3046 -constant_test_bit(0L, (const volatile unsigned long *)(&(dev->state)))
{
310 return (((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1) != 0;;
}
3046 return ((int)tmp) != 0;;
}
1986 assume(!(((int)tmp___0) == 0));
1987 -netdev_open(dev)
{
876 const struct firmware *fw_rx;
877 const struct firmware *fw_tx;
878 const __be32 *fw_rx_data;
879 const __be32 *fw_tx_data;
880 struct netdev_private *np;
881 void *tmp;
882 void *ioaddr;
883 int irq;
884 int i;
885 int retval;
886 unsigned long tx_size;
887 unsigned long rx_size;
888 unsigned long tx_done_q_size;
889 unsigned long rx_done_q_size;
890 unsigned long tx_ring_size;
891 unsigned long rx_ring_size;
892 __be16 *eaddrs;
893 void *setup_frm;
894 unsigned short tmp___0;
895 unsigned short tmp___1;
896 unsigned short tmp___2;
897 unsigned int tmp___3;
898 unsigned int tmp___4;
899 unsigned int tmp___5;
878 -netdev_priv((const struct net_device *)dev)
{
2033 return ((void *)dev) + 3200U;;
}
878 np = (struct netdev_private *)tmp;
879 ioaddr = np->base;
880 const int __CPAchecker_TMP_0 = (const int)(np->pci_dev->irq);
880 irq = __CPAchecker_TMP_0;
887 -request_irq((unsigned int)irq, &intr_handler, 128UL, (const char *)(&(dev->name)), (void *)dev)
{
147 int tmp;
147 tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */}
147 return tmp;;
}
888 assume(!(retval != 0));
892 -writel(0U, ((volatile void *)ioaddr) + 327792U)
{
66 Ignored inline assembler code
67 return ;;
}
893 -writel(1U, ((volatile void *)ioaddr) + 327744U)
{
66 Ignored inline assembler code
67 return ;;
}
894 assume(!(debug > 1));
899 unsigned long __CPAchecker_TMP_1 = (unsigned long)(np->queue_mem);
899 assume(!(__CPAchecker_TMP_1 == ((unsigned long)((void *)0))));
922 netif_carrier_off(dev) { /* Function call is skipped due to function is undefined */}
923 -init_ring(dev)
{
1141 struct netdev_private *np;
1142 void *tmp;
1143 int i;
1144 unsigned int tmp___0;
1145 unsigned int tmp___1;
1146 unsigned int tmp___2;
1147 unsigned int tmp___3;
1148 unsigned int tmp___4;
1149 struct sk_buff *skb;
1150 struct sk_buff *tmp___5;
1141 -netdev_priv((const struct net_device *)dev)
{
2033 return ((void *)dev) + 3200U;;
}
1141 np = (struct netdev_private *)tmp;
1144 tmp___1 = 0U;
1144 np->reap_tx = tmp___1;
1144 tmp___0 = tmp___1;
1144 np->cur_tx = tmp___0;
1144 np->cur_rx = tmp___0;
1145 tmp___4 = 0U;
1145 np->tx_done = tmp___4;
1145 tmp___3 = tmp___4;
1145 np->rx_done = tmp___3;
1145 tmp___2 = tmp___3;
1145 np->dirty_tx = tmp___2;
1145 np->dirty_rx = tmp___2;
1147 unsigned int __CPAchecker_TMP_0;
1147 assume((dev->mtu) > 1500U);
1147 __CPAchecker_TMP_0 = (dev->mtu) + 32U;
1147 np->rx_buf_sz = __CPAchecker_TMP_0;
1150 i = 0;
1150 goto ldv_52538;
1150 assume(i <= 255);
1152 goto ldv_52537;
1151 ldv_52537:;
1151 -netdev_alloc_skb(dev, np->rx_buf_sz)
{
2447 struct sk_buff *tmp;
2448 tmp = __netdev_alloc_skb(dev, length, 34078752U) { /* Function call is skipped due to function is undefined */}
2448 return tmp;;
}
1151 skb = tmp___5;
1152 ((np->rx_info)[i]).skb = skb;
1153 assume(!(((unsigned long)skb) == ((unsigned long)((struct sk_buff *)0))));
1155 void *__CPAchecker_TMP_1 = (void *)(skb->data);
1155 size_t __CPAchecker_TMP_2 = (size_t )(np->rx_buf_sz);
1155 -pci_map_single(np->pci_dev, __CPAchecker_TMP_1, __CPAchecker_TMP_2, 2)
{
41 unsigned long long tmp;
40 struct device *__CPAchecker_TMP_0;
40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));
40 __CPAchecker_TMP_0 = &(hwdev->dev);
40 -dma_map_single_attrs(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, 0UL)
{
38 unsigned long long tmp;
38 -ldv_dma_map_page()
{
10 assume(!(LDV_DMA_MAP_CALLS != 0));
12 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1;
13 return ;;
}
40 -ldv_dma_map_single_attrs_5(dev, ptr, size, dir, attrs)
{
184 struct dma_map_ops *ops;
185 struct dma_map_ops *tmp;
186 unsigned long long addr;
187 int tmp___0;
188 long tmp___1;
189 unsigned long tmp___2;
190 unsigned long tmp___3;
185 -get_dma_ops(dev)
{
32 long tmp;
35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */}
35 assume(!(tmp != 0L));
35 assume(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0)));
36 return dma_ops;;
}
185 ops = tmp;
188 -kmemcheck_mark_initialized(ptr, (unsigned int)size)
{
133 return ;;
}
189 -valid_dma_direction((int)dir)
{
138 int __CPAchecker_TMP_0;
138 assume(!(dma_direction == 0));
138 assume(!(dma_direction == 1));
138 assume(dma_direction == 2);
__CPAchecker_TMP_0 = 1;
138 return __CPAchecker_TMP_0;;
}
189 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */}
189 assume(!(tmp___1 != 0L));
190 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}
190 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs);
193 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}
193 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */}
196 return addr;;
}
40 return tmp;;
}
40 return tmp;;
}
1157 ((np->rx_ring) + ((unsigned long)i))->rxaddr = (((np->rx_info)[i]).mapping) | 1ULL;
1150 i = i + 1;
1151 ldv_52538:;
1150 assume(i <= 255);
1152 goto ldv_52537;
1151 ldv_52537:;
1151 -netdev_alloc_skb(dev, np->rx_buf_sz)
{
2447 struct sk_buff *tmp;
2448 tmp = __netdev_alloc_skb(dev, length, 34078752U) { /* Function call is skipped due to function is undefined */}
2448 return tmp;;
}
1151 skb = tmp___5;
1152 ((np->rx_info)[i]).skb = skb;
1153 assume(!(((unsigned long)skb) == ((unsigned long)((struct sk_buff *)0))));
1155 void *__CPAchecker_TMP_1 = (void *)(skb->data);
1155 size_t __CPAchecker_TMP_2 = (size_t )(np->rx_buf_sz);
1155 -pci_map_single(np->pci_dev, __CPAchecker_TMP_1, __CPAchecker_TMP_2, 2)
{
41 unsigned long long tmp;
40 struct device *__CPAchecker_TMP_0;
40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));
40 __CPAchecker_TMP_0 = &(hwdev->dev);
40 -dma_map_single_attrs(__CPAchecker_TMP_0, ptr, size, (enum dma_data_direction )direction, 0UL)
{
38 unsigned long long tmp;
38 -ldv_dma_map_page()
{
10 assume(LDV_DMA_MAP_CALLS != 0);
10 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_BITOPS_H 2 #define _ASM_X86_BITOPS_H 3 4 /* 5 * Copyright 1992, Linus Torvalds. 6 * 7 * Note: inlines with more than a single statement should be marked 8 * __always_inline to avoid problems with older gcc's inlining heuristics. 9 */ 10 11 #ifndef _LINUX_BITOPS_H 12 #error only <linux/bitops.h> can be included directly 13 #endif 14 15 #include <linux/compiler.h> 16 #include <asm/alternative.h> 17 #include <asm/rmwcc.h> 18 #include <asm/barrier.h> 19 20 #if BITS_PER_LONG == 32 21 # define _BITOPS_LONG_SHIFT 5 22 #elif BITS_PER_LONG == 64 23 # define _BITOPS_LONG_SHIFT 6 24 #else 25 # error "Unexpected BITS_PER_LONG" 26 #endif 27 28 #define BIT_64(n) (U64_C(1) << (n)) 29 30 /* 31 * These have to be done with inline assembly: that way the bit-setting 32 * is guaranteed to be atomic. All bit operations return 0 if the bit 33 * was cleared before the operation and != 0 if it was not. 34 * 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 36 */ 37 38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 39 /* Technically wrong, but this avoids compilation errors on some gcc 40 versions. */ 41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) 42 #else 43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) 44 #endif 45 46 #define ADDR BITOP_ADDR(addr) 47 48 /* 49 * We do the locked ops that don't return the old value as 50 * a mask operation on a byte. 51 */ 52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) 53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) 54 #define CONST_MASK(nr) (1 << ((nr) & 7)) 55 56 /** 57 * set_bit - Atomically set a bit in memory 58 * @nr: the bit to set 59 * @addr: the address to start counting from 60 * 61 * This function is atomic and may not be reordered. See __set_bit() 62 * if you do not require the atomic guarantees. 63 * 64 * Note: there are no guarantees that this function will not be reordered 65 * on non x86 architectures, so if you are writing portable code, 66 * make sure not to rely on its reordering guarantees. 67 * 68 * Note that @nr may be almost arbitrarily large; this function is not 69 * restricted to acting on a single-word quantity. 70 */ 71 static __always_inline void 72 set_bit(long nr, volatile unsigned long *addr) 73 { 74 if (IS_IMMEDIATE(nr)) { 75 asm volatile(LOCK_PREFIX "orb %1,%0" 76 : CONST_MASK_ADDR(nr, addr) 77 : "iq" ((u8)CONST_MASK(nr)) 78 : "memory"); 79 } else { 80 asm volatile(LOCK_PREFIX "bts %1,%0" 81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); 82 } 83 } 84 85 /** 86 * __set_bit - Set a bit in memory 87 * @nr: the bit to set 88 * @addr: the address to start counting from 89 * 90 * Unlike set_bit(), this function is non-atomic and may be reordered. 91 * If it's called on the same region of memory simultaneously, the effect 92 * may be that only one operation succeeds. 93 */ 94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr) 95 { 96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 97 } 98 99 /** 100 * clear_bit - Clears a bit in memory 101 * @nr: Bit to clear 102 * @addr: Address to start counting from 103 * 104 * clear_bit() is atomic and may not be reordered. However, it does 105 * not contain a memory barrier, so if it is used for locking purposes, 106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 107 * in order to ensure changes are visible on other processors. 108 */ 109 static __always_inline void 110 clear_bit(long nr, volatile unsigned long *addr) 111 { 112 if (IS_IMMEDIATE(nr)) { 113 asm volatile(LOCK_PREFIX "andb %1,%0" 114 : CONST_MASK_ADDR(nr, addr) 115 : "iq" ((u8)~CONST_MASK(nr))); 116 } else { 117 asm volatile(LOCK_PREFIX "btr %1,%0" 118 : BITOP_ADDR(addr) 119 : "Ir" (nr)); 120 } 121 } 122 123 /* 124 * clear_bit_unlock - Clears a bit in memory 125 * @nr: Bit to clear 126 * @addr: Address to start counting from 127 * 128 * clear_bit() is atomic and implies release semantics before the memory 129 * operation. It can be used for an unlock. 130 */ 131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) 132 { 133 barrier(); 134 clear_bit(nr, addr); 135 } 136 137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) 138 { 139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 140 } 141 142 /* 143 * __clear_bit_unlock - Clears a bit in memory 144 * @nr: Bit to clear 145 * @addr: Address to start counting from 146 * 147 * __clear_bit() is non-atomic and implies release semantics before the memory 148 * operation. It can be used for an unlock if no other CPUs can concurrently 149 * modify other bits in the word. 150 * 151 * No memory barrier is required here, because x86 cannot reorder stores past 152 * older loads. Same principle as spin_unlock. 153 */ 154 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) 155 { 156 barrier(); 157 __clear_bit(nr, addr); 158 } 159 160 /** 161 * __change_bit - Toggle a bit in memory 162 * @nr: the bit to change 163 * @addr: the address to start counting from 164 * 165 * Unlike change_bit(), this function is non-atomic and may be reordered. 166 * If it's called on the same region of memory simultaneously, the effect 167 * may be that only one operation succeeds. 168 */ 169 static __always_inline void __change_bit(long nr, volatile unsigned long *addr) 170 { 171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 172 } 173 174 /** 175 * change_bit - Toggle a bit in memory 176 * @nr: Bit to change 177 * @addr: Address to start counting from 178 * 179 * change_bit() is atomic and may not be reordered. 180 * Note that @nr may be almost arbitrarily large; this function is not 181 * restricted to acting on a single-word quantity. 182 */ 183 static __always_inline void change_bit(long nr, volatile unsigned long *addr) 184 { 185 if (IS_IMMEDIATE(nr)) { 186 asm volatile(LOCK_PREFIX "xorb %1,%0" 187 : CONST_MASK_ADDR(nr, addr) 188 : "iq" ((u8)CONST_MASK(nr))); 189 } else { 190 asm volatile(LOCK_PREFIX "btc %1,%0" 191 : BITOP_ADDR(addr) 192 : "Ir" (nr)); 193 } 194 } 195 196 /** 197 * test_and_set_bit - Set a bit and return its old value 198 * @nr: Bit to set 199 * @addr: Address to count from 200 * 201 * This operation is atomic and cannot be reordered. 202 * It also implies a memory barrier. 203 */ 204 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 205 { 206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); 207 } 208 209 /** 210 * test_and_set_bit_lock - Set a bit and return its old value for lock 211 * @nr: Bit to set 212 * @addr: Address to count from 213 * 214 * This is the same as test_and_set_bit on x86. 215 */ 216 static __always_inline bool 217 test_and_set_bit_lock(long nr, volatile unsigned long *addr) 218 { 219 return test_and_set_bit(nr, addr); 220 } 221 222 /** 223 * __test_and_set_bit - Set a bit and return its old value 224 * @nr: Bit to set 225 * @addr: Address to count from 226 * 227 * This operation is non-atomic and can be reordered. 228 * If two examples of this operation race, one can appear to succeed 229 * but actually fail. You must protect multiple accesses with a lock. 230 */ 231 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) 232 { 233 bool oldbit; 234 235 asm("bts %2,%1\n\t" 236 CC_SET(c) 237 : CC_OUT(c) (oldbit), ADDR 238 : "Ir" (nr)); 239 return oldbit; 240 } 241 242 /** 243 * test_and_clear_bit - Clear a bit and return its old value 244 * @nr: Bit to clear 245 * @addr: Address to count from 246 * 247 * This operation is atomic and cannot be reordered. 248 * It also implies a memory barrier. 249 */ 250 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 251 { 252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); 253 } 254 255 /** 256 * __test_and_clear_bit - Clear a bit and return its old value 257 * @nr: Bit to clear 258 * @addr: Address to count from 259 * 260 * This operation is non-atomic and can be reordered. 261 * If two examples of this operation race, one can appear to succeed 262 * but actually fail. You must protect multiple accesses with a lock. 263 * 264 * Note: the operation is performed atomically with respect to 265 * the local CPU, but not other CPUs. Portable code should not 266 * rely on this behaviour. 267 * KVM relies on this behaviour on x86 for modifying memory that is also 268 * accessed from a hypervisor on the same CPU if running in a VM: don't change 269 * this without also updating arch/x86/kernel/kvm.c 270 */ 271 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) 272 { 273 bool oldbit; 274 275 asm volatile("btr %2,%1\n\t" 276 CC_SET(c) 277 : CC_OUT(c) (oldbit), ADDR 278 : "Ir" (nr)); 279 return oldbit; 280 } 281 282 /* WARNING: non atomic and it can be reordered! */ 283 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) 284 { 285 bool oldbit; 286 287 asm volatile("btc %2,%1\n\t" 288 CC_SET(c) 289 : CC_OUT(c) (oldbit), ADDR 290 : "Ir" (nr) : "memory"); 291 292 return oldbit; 293 } 294 295 /** 296 * test_and_change_bit - Change a bit and return its old value 297 * @nr: Bit to change 298 * @addr: Address to count from 299 * 300 * This operation is atomic and cannot be reordered. 301 * It also implies a memory barrier. 302 */ 303 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 304 { 305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); 306 } 307 308 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) 309 { 310 return ((1UL << (nr & (BITS_PER_LONG-1))) & 311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 312 } 313 314 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) 315 { 316 bool oldbit; 317 318 asm volatile("bt %2,%1\n\t" 319 CC_SET(c) 320 : CC_OUT(c) (oldbit) 321 : "m" (*(unsigned long *)addr), "Ir" (nr)); 322 323 return oldbit; 324 } 325 326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */ 327 /** 328 * test_bit - Determine whether a bit is set 329 * @nr: bit number to test 330 * @addr: Address to start counting from 331 */ 332 static bool test_bit(int nr, const volatile unsigned long *addr); 333 #endif 334 335 #define test_bit(nr, addr) \ 336 (__builtin_constant_p((nr)) \ 337 ? constant_test_bit((nr), (addr)) \ 338 : variable_test_bit((nr), (addr))) 339 340 /** 341 * __ffs - find first set bit in word 342 * @word: The word to search 343 * 344 * Undefined if no bit exists, so code should check against 0 first. 345 */ 346 static __always_inline unsigned long __ffs(unsigned long word) 347 { 348 asm("rep; bsf %1,%0" 349 : "=r" (word) 350 : "rm" (word)); 351 return word; 352 } 353 354 /** 355 * ffz - find first zero bit in word 356 * @word: The word to search 357 * 358 * Undefined if no zero exists, so code should check against ~0UL first. 359 */ 360 static __always_inline unsigned long ffz(unsigned long word) 361 { 362 asm("rep; bsf %1,%0" 363 : "=r" (word) 364 : "r" (~word)); 365 return word; 366 } 367 368 /* 369 * __fls: find last set bit in word 370 * @word: The word to search 371 * 372 * Undefined if no set bit exists, so code should check against 0 first. 373 */ 374 static __always_inline unsigned long __fls(unsigned long word) 375 { 376 asm("bsr %1,%0" 377 : "=r" (word) 378 : "rm" (word)); 379 return word; 380 } 381 382 #undef ADDR 383 384 #ifdef __KERNEL__ 385 /** 386 * ffs - find first set bit in word 387 * @x: the word to search 388 * 389 * This is defined the same way as the libc and compiler builtin ffs 390 * routines, therefore differs in spirit from the other bitops. 391 * 392 * ffs(value) returns 0 if value is 0 or the position of the first 393 * set bit if value is nonzero. The first (least significant) bit 394 * is at position 1. 395 */ 396 static __always_inline int ffs(int x) 397 { 398 int r; 399 400 #ifdef CONFIG_X86_64 401 /* 402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the 403 * dest reg is undefined if x==0, but their CPU architect says its 404 * value is written to set it to the same as before, except that the 405 * top 32 bits will be cleared. 406 * 407 * We cannot do this on 32 bits because at the very least some 408 * 486 CPUs did not behave this way. 409 */ 410 asm("bsfl %1,%0" 411 : "=r" (r) 412 : "rm" (x), "0" (-1)); 413 #elif defined(CONFIG_X86_CMOV) 414 asm("bsfl %1,%0\n\t" 415 "cmovzl %2,%0" 416 : "=&r" (r) : "rm" (x), "r" (-1)); 417 #else 418 asm("bsfl %1,%0\n\t" 419 "jnz 1f\n\t" 420 "movl $-1,%0\n" 421 "1:" : "=r" (r) : "rm" (x)); 422 #endif 423 return r + 1; 424 } 425 426 /** 427 * fls - find last set bit in word 428 * @x: the word to search 429 * 430 * This is defined in a similar way as the libc and compiler builtin 431 * ffs, but returns the position of the most significant set bit. 432 * 433 * fls(value) returns 0 if value is 0 or the position of the last 434 * set bit if value is nonzero. The last (most significant) bit is 435 * at position 32. 436 */ 437 static __always_inline int fls(int x) 438 { 439 int r; 440 441 #ifdef CONFIG_X86_64 442 /* 443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the 444 * dest reg is undefined if x==0, but their CPU architect says its 445 * value is written to set it to the same as before, except that the 446 * top 32 bits will be cleared. 447 * 448 * We cannot do this on 32 bits because at the very least some 449 * 486 CPUs did not behave this way. 450 */ 451 asm("bsrl %1,%0" 452 : "=r" (r) 453 : "rm" (x), "0" (-1)); 454 #elif defined(CONFIG_X86_CMOV) 455 asm("bsrl %1,%0\n\t" 456 "cmovzl %2,%0" 457 : "=&r" (r) : "rm" (x), "rm" (-1)); 458 #else 459 asm("bsrl %1,%0\n\t" 460 "jnz 1f\n\t" 461 "movl $-1,%0\n" 462 "1:" : "=r" (r) : "rm" (x)); 463 #endif 464 return r + 1; 465 } 466 467 /** 468 * fls64 - find last set bit in a 64-bit word 469 * @x: the word to search 470 * 471 * This is defined in a similar way as the libc and compiler builtin 472 * ffsll, but returns the position of the most significant set bit. 473 * 474 * fls64(value) returns 0 if value is 0 or the position of the last 475 * set bit if value is nonzero. The last (most significant) bit is 476 * at position 64. 477 */ 478 #ifdef CONFIG_X86_64 479 static __always_inline int fls64(__u64 x) 480 { 481 int bitpos = -1; 482 /* 483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the 484 * dest reg is undefined if x==0, but their CPU architect says its 485 * value is written to set it to the same as before. 486 */ 487 asm("bsrq %1,%q0" 488 : "+r" (bitpos) 489 : "rm" (x)); 490 return bitpos + 1; 491 } 492 #else 493 #include <asm-generic/bitops/fls64.h> 494 #endif 495 496 #include <asm-generic/bitops/find.h> 497 498 #include <asm-generic/bitops/sched.h> 499 500 #include <asm/arch_hweight.h> 501 502 #include <asm-generic/bitops/const_hweight.h> 503 504 #include <asm-generic/bitops/le.h> 505 506 #include <asm-generic/bitops/ext2-atomic-setbit.h> 507 508 #endif /* __KERNEL__ */ 509 #endif /* _ASM_X86_BITOPS_H */
1 #ifndef _ASM_X86_DMA_MAPPING_H 2 #define _ASM_X86_DMA_MAPPING_H 3 4 /* 5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and 6 * Documentation/DMA-API.txt for documentation. 7 */ 8 9 #include <linux/kmemcheck.h> 10 #include <linux/scatterlist.h> 11 #include <linux/dma-debug.h> 12 #include <asm/io.h> 13 #include <asm/swiotlb.h> 14 #include <linux/dma-contiguous.h> 15 16 #ifdef CONFIG_ISA 17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 18 #else 19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 20 #endif 21 22 #define DMA_ERROR_CODE 0 23 24 extern int iommu_merge; 25 extern struct device x86_dma_fallback_dev; 26 extern int panic_on_overflow; 27 28 extern struct dma_map_ops *dma_ops; 29 30 static inline struct dma_map_ops *get_dma_ops(struct device *dev) 31 { 32 #ifndef CONFIG_X86_DEV_DMA_OPS 33 return dma_ops; 34 #else 35 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 return dma_ops; 37 else 38 return dev->archdata.dma_ops; 39 #endif 40 } 41 42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); 43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs 44 45 #define HAVE_ARCH_DMA_SUPPORTED 1 46 extern int dma_supported(struct device *hwdev, u64 mask); 47 48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 49 dma_addr_t *dma_addr, gfp_t flag, 50 unsigned long attrs); 51 52 extern void dma_generic_free_coherent(struct device *dev, size_t size, 53 void *vaddr, dma_addr_t dma_addr, 54 unsigned long attrs); 55 56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 60 #else 61 62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 63 { 64 if (!dev->dma_mask) 65 return 0; 66 67 return addr + size - 1 <= *dev->dma_mask; 68 } 69 70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 71 { 72 return paddr; 73 } 74 75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 76 { 77 return daddr; 78 } 79 #endif /* CONFIG_X86_DMA_REMAP */ 80 81 static inline void 82 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 83 enum dma_data_direction dir) 84 { 85 flush_write_buffers(); 86 } 87 88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 89 gfp_t gfp) 90 { 91 unsigned long dma_mask = 0; 92 93 dma_mask = dev->coherent_dma_mask; 94 if (!dma_mask) 95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 96 97 return dma_mask; 98 } 99 100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 101 { 102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 103 104 if (dma_mask <= DMA_BIT_MASK(24)) 105 gfp |= GFP_DMA; 106 #ifdef CONFIG_X86_64 107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 108 gfp |= GFP_DMA32; 109 #endif 110 return gfp; 111 } 112 113 #endif
1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 #define ARCH_HAS_IOREMAP_WT 39 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <asm/page.h> 43 #include <asm/early_ioremap.h> 44 #include <asm/pgtable_types.h> 45 46 #define build_mmio_read(name, size, type, reg, barrier) \ 47 static inline type name(const volatile void __iomem *addr) \ 48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 49 :"m" (*(volatile type __force *)addr) barrier); return ret; } 50 51 #define build_mmio_write(name, size, type, reg, barrier) \ 52 static inline void name(type val, volatile void __iomem *addr) \ 53 { asm volatile("mov" size " %0,%1": :reg (val), \ 54 "m" (*(volatile type __force *)addr) barrier); } 55 56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 59 60 build_mmio_read(__readb, "b", unsigned char, "=q", ) 61 build_mmio_read(__readw, "w", unsigned short, "=r", ) 62 build_mmio_read(__readl, "l", unsigned int, "=r", ) 63 64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 65 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 66 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 67 68 build_mmio_write(__writeb, "b", unsigned char, "q", ) 69 build_mmio_write(__writew, "w", unsigned short, "r", ) 70 build_mmio_write(__writel, "l", unsigned int, "r", ) 71 72 #define readb_relaxed(a) __readb(a) 73 #define readw_relaxed(a) __readw(a) 74 #define readl_relaxed(a) __readl(a) 75 #define __raw_readb __readb 76 #define __raw_readw __readw 77 #define __raw_readl __readl 78 79 #define writeb_relaxed(v, a) __writeb(v, a) 80 #define writew_relaxed(v, a) __writew(v, a) 81 #define writel_relaxed(v, a) __writel(v, a) 82 #define __raw_writeb __writeb 83 #define __raw_writew __writew 84 #define __raw_writel __writel 85 86 #define mmiowb() barrier() 87 88 #ifdef CONFIG_X86_64 89 90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 92 93 #define readq_relaxed(a) readq(a) 94 #define writeq_relaxed(v, a) writeq(v, a) 95 96 #define __raw_readq(a) readq(a) 97 #define __raw_writeq(val, addr) writeq(val, addr) 98 99 /* Let people know that we have them */ 100 #define readq readq 101 #define writeq writeq 102 103 #endif 104 105 /** 106 * virt_to_phys - map virtual addresses to physical 107 * @address: address to remap 108 * 109 * The returned physical address is the physical (CPU) mapping for 110 * the memory address given. It is only valid to use this function on 111 * addresses directly mapped or allocated via kmalloc. 112 * 113 * This function does not give bus mappings for DMA transfers. In 114 * almost all conceivable cases a device driver should not be using 115 * this function 116 */ 117 118 static inline phys_addr_t virt_to_phys(volatile void *address) 119 { 120 return __pa(address); 121 } 122 123 /** 124 * phys_to_virt - map physical address to virtual 125 * @address: address to remap 126 * 127 * The returned virtual address is a current CPU mapping for 128 * the memory address given. It is only valid to use this function on 129 * addresses that have a kernel mapping 130 * 131 * This function does not handle bus mappings for DMA transfers. In 132 * almost all conceivable cases a device driver should not be using 133 * this function 134 */ 135 136 static inline void *phys_to_virt(phys_addr_t address) 137 { 138 return __va(address); 139 } 140 141 /* 142 * Change "struct page" to physical address. 143 */ 144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 145 146 /* 147 * ISA I/O bus memory addresses are 1:1 with the physical address. 148 * However, we truncate the address to unsigned int to avoid undesirable 149 * promitions in legacy drivers. 150 */ 151 static inline unsigned int isa_virt_to_bus(volatile void *address) 152 { 153 return (unsigned int)virt_to_phys(address); 154 } 155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 156 #define isa_bus_to_virt phys_to_virt 157 158 /* 159 * However PCI ones are not necessarily 1:1 and therefore these interfaces 160 * are forbidden in portable PCI drivers. 161 * 162 * Allow them on x86 for legacy drivers, though. 163 */ 164 #define virt_to_bus virt_to_phys 165 #define bus_to_virt phys_to_virt 166 167 /** 168 * ioremap - map bus memory into CPU space 169 * @offset: bus address of the memory 170 * @size: size of the resource to map 171 * 172 * ioremap performs a platform specific sequence of operations to 173 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 174 * writew/writel functions and the other mmio helpers. The returned 175 * address is not guaranteed to be usable directly as a virtual 176 * address. 177 * 178 * If the area you are trying to map is a PCI BAR you should have a 179 * look at pci_iomap(). 180 */ 181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); 183 #define ioremap_uc ioremap_uc 184 185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 187 unsigned long prot_val); 188 189 /* 190 * The default ioremap() behavior is non-cached: 191 */ 192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 193 { 194 return ioremap_nocache(offset, size); 195 } 196 197 extern void iounmap(volatile void __iomem *addr); 198 199 extern void set_iounmap_nonlazy(void); 200 201 #ifdef __KERNEL__ 202 203 #include <asm-generic/iomap.h> 204 205 /* 206 * Convert a virtual cached pointer to an uncached pointer 207 */ 208 #define xlate_dev_kmem_ptr(p) p 209 210 static inline void 211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 212 { 213 memset((void __force *)addr, val, count); 214 } 215 216 static inline void 217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 218 { 219 memcpy(dst, (const void __force *)src, count); 220 } 221 222 static inline void 223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 224 { 225 memcpy((void __force *)dst, src, count); 226 } 227 228 /* 229 * ISA space is 'always mapped' on a typical x86 system, no need to 230 * explicitly ioremap() it. The fact that the ISA IO space is mapped 231 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 232 * are physical addresses. The following constant pointer can be 233 * used as the IO-area pointer (it can be iounmapped as well, so the 234 * analogy with PCI is quite large): 235 */ 236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 237 238 /* 239 * Cache management 240 * 241 * This needed for two cases 242 * 1. Out of order aware processors 243 * 2. Accidentally out of order processors (PPro errata #51) 244 */ 245 246 static inline void flush_write_buffers(void) 247 { 248 #if defined(CONFIG_X86_PPRO_FENCE) 249 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 250 #endif 251 } 252 253 #endif /* __KERNEL__ */ 254 255 extern void native_io_delay(void); 256 257 extern int io_delay_type; 258 extern void io_delay_init(void); 259 260 #if defined(CONFIG_PARAVIRT) 261 #include <asm/paravirt.h> 262 #else 263 264 static inline void slow_down_io(void) 265 { 266 native_io_delay(); 267 #ifdef REALLY_SLOW_IO 268 native_io_delay(); 269 native_io_delay(); 270 native_io_delay(); 271 #endif 272 } 273 274 #endif 275 276 #define BUILDIO(bwl, bw, type) \ 277 static inline void out##bwl(unsigned type value, int port) \ 278 { \ 279 asm volatile("out" #bwl " %" #bw "0, %w1" \ 280 : : "a"(value), "Nd"(port)); \ 281 } \ 282 \ 283 static inline unsigned type in##bwl(int port) \ 284 { \ 285 unsigned type value; \ 286 asm volatile("in" #bwl " %w1, %" #bw "0" \ 287 : "=a"(value) : "Nd"(port)); \ 288 return value; \ 289 } \ 290 \ 291 static inline void out##bwl##_p(unsigned type value, int port) \ 292 { \ 293 out##bwl(value, port); \ 294 slow_down_io(); \ 295 } \ 296 \ 297 static inline unsigned type in##bwl##_p(int port) \ 298 { \ 299 unsigned type value = in##bwl(port); \ 300 slow_down_io(); \ 301 return value; \ 302 } \ 303 \ 304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 305 { \ 306 asm volatile("rep; outs" #bwl \ 307 : "+S"(addr), "+c"(count) : "d"(port)); \ 308 } \ 309 \ 310 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 311 { \ 312 asm volatile("rep; ins" #bwl \ 313 : "+D"(addr), "+c"(count) : "d"(port)); \ 314 } 315 316 BUILDIO(b, b, char) 317 BUILDIO(w, w, short) 318 BUILDIO(l, , int) 319 320 extern void *xlate_dev_mem_ptr(phys_addr_t phys); 321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 322 323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 324 enum page_cache_mode pcm); 325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); 327 328 extern bool is_early_ioremap_ptep(pte_t *ptep); 329 330 #ifdef CONFIG_XEN 331 #include <xen/xen.h> 332 struct bio_vec; 333 334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 335 const struct bio_vec *vec2); 336 337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 338 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ 339 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) 340 #endif /* CONFIG_XEN */ 341 342 #define IO_SPACE_LIMIT 0xffff 343 344 #ifdef CONFIG_MTRR 345 extern int __must_check arch_phys_wc_index(int handle); 346 #define arch_phys_wc_index arch_phys_wc_index 347 348 extern int __must_check arch_phys_wc_add(unsigned long base, 349 unsigned long size); 350 extern void arch_phys_wc_del(int handle); 351 #define arch_phys_wc_add arch_phys_wc_add 352 #endif 353 354 #ifdef CONFIG_X86_PAT 355 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); 356 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); 357 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc 358 #endif 359 360 #endif /* _ASM_X86_IO_H */
1 2 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */ 3 /* 4 Written 1998-2000 by Donald Becker. 5 6 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please 7 send all bug reports to me, and not to Donald Becker, as this code 8 has been heavily modified from Donald's original version. 9 10 This software may be used and distributed according to the terms of 11 the GNU General Public License (GPL), incorporated herein by reference. 12 Drivers based on or derived from this code fall under the GPL and must 13 retain the authorship, copyright and license notice. This file is not 14 a complete program and may only be used when the entire operating 15 system is licensed under the GPL. 16 17 The information below comes from Donald Becker's original driver: 18 19 The author may be reached as becker@scyld.com, or C/O 20 Scyld Computing Corporation 21 410 Severn Ave., Suite 210 22 Annapolis MD 21403 23 24 Support and updates available at 25 http://www.scyld.com/network/starfire.html 26 [link no longer provides useful info -jgarzik] 27 28 */ 29 30 #define DRV_NAME "starfire" 31 #define DRV_VERSION "2.1" 32 #define DRV_RELDATE "July 6, 2008" 33 34 #include <linux/interrupt.h> 35 #include <linux/module.h> 36 #include <linux/kernel.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/init.h> 41 #include <linux/delay.h> 42 #include <linux/crc32.h> 43 #include <linux/ethtool.h> 44 #include <linux/mii.h> 45 #include <linux/if_vlan.h> 46 #include <linux/mm.h> 47 #include <linux/firmware.h> 48 #include <asm/processor.h> /* Processor type for cache alignment. */ 49 #include <linux/uaccess.h> 50 #include <asm/io.h> 51 52 /* 53 * The current frame processor firmware fails to checksum a fragment 54 * of length 1. If and when this is fixed, the #define below can be removed. 55 */ 56 #define HAS_BROKEN_FIRMWARE 57 58 /* 59 * If using the broken firmware, data must be padded to the next 32-bit boundary. 60 */ 61 #ifdef HAS_BROKEN_FIRMWARE 62 #define PADDING_MASK 3 63 #endif 64 65 /* 66 * Define this if using the driver with the zero-copy patch 67 */ 68 #define ZEROCOPY 69 70 #if IS_ENABLED(CONFIG_VLAN_8021Q) 71 #define VLAN_SUPPORT 72 #endif 73 74 /* The user-configurable values. 75 These may be modified when a driver module is loaded.*/ 76 77 /* Used for tuning interrupt latency vs. overhead. */ 78 static int intr_latency; 79 static int small_frames; 80 81 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 82 static int max_interrupt_work = 20; 83 static int mtu; 84 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 85 The Starfire has a 512 element hash table based on the Ethernet CRC. */ 86 static const int multicast_filter_limit = 512; 87 /* Whether to do TCP/UDP checksums in hardware */ 88 static int enable_hw_cksum = 1; 89 90 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 91 /* 92 * Set the copy breakpoint for the copy-only-tiny-frames scheme. 93 * Setting to > 1518 effectively disables this feature. 94 * 95 * NOTE: 96 * The ia64 doesn't allow for unaligned loads even of integers being 97 * misaligned on a 2 byte boundary. Thus always force copying of 98 * packets as the starfire doesn't allow for misaligned DMAs ;-( 99 * 23/10/2000 - Jes 100 * 101 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64, 102 * at least, having unaligned frames leads to a rather serious performance 103 * penalty. -Ion 104 */ 105 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 106 static int rx_copybreak = PKT_BUF_SZ; 107 #else 108 static int rx_copybreak /* = 0 */; 109 #endif 110 111 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */ 112 #ifdef __sparc__ 113 #define DMA_BURST_SIZE 64 114 #else 115 #define DMA_BURST_SIZE 128 116 #endif 117 118 /* Operational parameters that are set at compile time. */ 119 120 /* The "native" ring sizes are either 256 or 2048. 121 However in some modes a descriptor may be marked to wrap the ring earlier. 122 */ 123 #define RX_RING_SIZE 256 124 #define TX_RING_SIZE 32 125 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */ 126 #define DONE_Q_SIZE 1024 127 /* All queues must be aligned on a 256-byte boundary */ 128 #define QUEUE_ALIGN 256 129 130 #if RX_RING_SIZE > 256 131 #define RX_Q_ENTRIES Rx2048QEntries 132 #else 133 #define RX_Q_ENTRIES Rx256QEntries 134 #endif 135 136 /* Operational parameters that usually are not changed. */ 137 /* Time in jiffies before concluding the transmitter is hung. */ 138 #define TX_TIMEOUT (2 * HZ) 139 140 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 141 /* 64-bit dma_addr_t */ 142 #define ADDR_64BITS /* This chip uses 64 bit addresses. */ 143 #define netdrv_addr_t __le64 144 #define cpu_to_dma(x) cpu_to_le64(x) 145 #define dma_to_cpu(x) le64_to_cpu(x) 146 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 147 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 148 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 149 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 150 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 151 #else /* 32-bit dma_addr_t */ 152 #define netdrv_addr_t __le32 153 #define cpu_to_dma(x) cpu_to_le32(x) 154 #define dma_to_cpu(x) le32_to_cpu(x) 155 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 156 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 157 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 158 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 159 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 160 #endif 161 162 #define skb_first_frag_len(skb) skb_headlen(skb) 163 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 164 165 /* Firmware names */ 166 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 167 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 168 169 /* These identify the driver base version and may not be removed. */ 170 static const char version[] = 171 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n" 172 " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 173 174 MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 175 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver"); 176 MODULE_LICENSE("GPL"); 177 MODULE_VERSION(DRV_VERSION); 178 MODULE_FIRMWARE(FIRMWARE_RX); 179 MODULE_FIRMWARE(FIRMWARE_TX); 180 181 module_param(max_interrupt_work, int, 0); 182 module_param(mtu, int, 0); 183 module_param(debug, int, 0); 184 module_param(rx_copybreak, int, 0); 185 module_param(intr_latency, int, 0); 186 module_param(small_frames, int, 0); 187 module_param(enable_hw_cksum, int, 0); 188 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); 189 MODULE_PARM_DESC(mtu, "MTU (all boards)"); 190 MODULE_PARM_DESC(debug, "Debug level (0-6)"); 191 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 192 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); 193 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); 194 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); 195 196 /* 197 Theory of Operation 198 199 I. Board Compatibility 200 201 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter. 202 203 II. Board-specific settings 204 205 III. Driver operation 206 207 IIIa. Ring buffers 208 209 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The 210 ring sizes are set fixed by the hardware, but may optionally be wrapped 211 earlier by the END bit in the descriptor. 212 This driver uses that hardware queue size for the Rx ring, where a large 213 number of entries has no ill effect beyond increases the potential backlog. 214 The Tx ring is wrapped with the END bit, since a large hardware Tx queue 215 disables the queue layer priority ordering and we have no mechanism to 216 utilize the hardware two-level priority queue. When modifying the 217 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning 218 levels. 219 220 IIIb/c. Transmit/Receive Structure 221 222 See the Adaptec manual for the many possible structures, and options for 223 each structure. There are far too many to document all of them here. 224 225 For transmit this driver uses type 0/1 transmit descriptors (depending 226 on the 32/64 bitness of the architecture), and relies on automatic 227 minimum-length padding. It does not use the completion queue 228 consumer index, but instead checks for non-zero status entries. 229 230 For receive this driver uses type 2/3 receive descriptors. The driver 231 allocates full frame size skbuffs for the Rx ring buffers, so all frames 232 should fit in a single descriptor. The driver does not use the completion 233 queue consumer index, but instead checks for non-zero status entries. 234 235 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff 236 is allocated and the frame is copied to the new skbuff. When the incoming 237 frame is larger, the skbuff is passed directly up the protocol stack. 238 Buffers consumed this way are replaced by newly allocated skbuffs in a later 239 phase of receive. 240 241 A notable aspect of operation is that unaligned buffers are not permitted by 242 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame 243 isn't longword aligned, which may cause problems on some machine 244 e.g. Alphas and IA64. For these architectures, the driver is forced to copy 245 the frame into a new skbuff unconditionally. Copied frames are put into the 246 skbuff at an offset of "+2", thus 16-byte aligning the IP header. 247 248 IIId. Synchronization 249 250 The driver runs as two independent, single-threaded flows of control. One 251 is the send-packet routine, which enforces single-threaded use by the 252 dev->tbusy flag. The other thread is the interrupt handler, which is single 253 threaded by the hardware and interrupt handling software. 254 255 The send packet thread has partial control over the Tx ring and the netif_queue 256 status. If the number of free Tx slots in the ring falls below a certain number 257 (currently hardcoded to 4), it signals the upper layer to stop the queue. 258 259 The interrupt handler has exclusive control over the Rx ring and records stats 260 from the Tx ring. After reaping the stats, it marks the Tx queue entry as 261 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the 262 number of free Tx slow is above the threshold, it signals the upper layer to 263 restart the queue. 264 265 IV. Notes 266 267 IVb. References 268 269 The Adaptec Starfire manuals, available only from Adaptec. 270 http://www.scyld.com/expert/100mbps.html 271 http://www.scyld.com/expert/NWay.html 272 273 IVc. Errata 274 275 - StopOnPerr is broken, don't enable 276 - Hardware ethernet padding exposes random data, perform software padding 277 instead (unverified -- works correctly for all the hardware I have) 278 279 */ 280 281 282 283 enum chip_capability_flags {CanHaveMII=1, }; 284 285 enum chipset { 286 CH_6915 = 0, 287 }; 288 289 static const struct pci_device_id starfire_pci_tbl[] = { 290 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 }, 291 { 0, } 292 }; 293 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); 294 295 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ 296 static const struct chip_info { 297 const char *name; 298 int drv_flags; 299 } netdrv_tbl[] = { 300 { "Adaptec Starfire 6915", CanHaveMII }, 301 }; 302 303 304 /* Offsets to the device registers. 305 Unlike software-only systems, device drivers interact with complex hardware. 306 It's not useful to define symbolic names for every register bit in the 307 device. The name can only partially document the semantics and make 308 the driver longer and more difficult to read. 309 In general, only the important configuration values or bits changed 310 multiple times should be defined symbolically. 311 */ 312 enum register_offsets { 313 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074, 314 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088, 315 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000, 316 GPIOCtrl=0x5008C, TxDescCtrl=0x50090, 317 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */ 318 TxRingHiAddr=0x5009C, /* 64 bit address extension. */ 319 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4, 320 TxThreshold=0x500B0, 321 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8, 322 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0, 323 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0, 324 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0, 325 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4, 326 TxMode=0x55000, VlanType=0x55064, 327 PerfFilterTable=0x56000, HashTable=0x56100, 328 TxGfpMem=0x58000, RxGfpMem=0x5a000, 329 }; 330 331 /* 332 * Bits in the interrupt status/mask registers. 333 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register 334 * enables all the interrupt sources that are or'ed into those status bits. 335 */ 336 enum intr_status_bits { 337 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000, 338 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000, 339 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000, 340 IntrTxComplQLow=0x200000, IntrPCI=0x100000, 341 IntrDMAErr=0x080000, IntrTxDataLow=0x040000, 342 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000, 343 IntrNormalSummary=0x8000, IntrTxDone=0x4000, 344 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000, 345 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400, 346 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100, 347 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40, 348 IntrNoTxCsum=0x20, IntrTxBadID=0x10, 349 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04, 350 IntrTxGfp=0x02, IntrPCIPad=0x01, 351 /* not quite bits */ 352 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done, 353 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low, 354 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe, 355 }; 356 357 /* Bits in the RxFilterMode register. */ 358 enum rx_mode_bits { 359 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01, 360 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30, 361 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200, 362 WakeupOnGFP=0x0800, 363 }; 364 365 /* Bits in the TxMode register */ 366 enum tx_mode_bits { 367 MiiSoftReset=0x8000, MIILoopback=0x4000, 368 TxFlowEnable=0x0800, RxFlowEnable=0x0400, 369 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01, 370 }; 371 372 /* Bits in the TxDescCtrl register. */ 373 enum tx_ctrl_bits { 374 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20, 375 TxDescSpace128=0x30, TxDescSpace256=0x40, 376 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02, 377 TxDescType3=0x03, TxDescType4=0x04, 378 TxNoDMACompletion=0x08, 379 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0, 380 TxHiPriFIFOThreshShift=24, TxPadLenShift=16, 381 TxDMABurstSizeShift=8, 382 }; 383 384 /* Bits in the RxDescQCtrl register. */ 385 enum rx_ctrl_bits { 386 RxBufferLenShift=16, RxMinDescrThreshShift=0, 387 RxPrefetchMode=0x8000, RxVariableQ=0x2000, 388 Rx2048QEntries=0x4000, Rx256QEntries=0, 389 RxDescAddr64bit=0x1000, RxDescAddr32bit=0, 390 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0, 391 RxDescSpace4=0x000, RxDescSpace8=0x100, 392 RxDescSpace16=0x200, RxDescSpace32=0x300, 393 RxDescSpace64=0x400, RxDescSpace128=0x500, 394 RxConsumerWrEn=0x80, 395 }; 396 397 /* Bits in the RxDMACtrl register. */ 398 enum rx_dmactrl_bits { 399 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000, 400 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000, 401 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000, 402 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000, 403 RxChecksumRejectTCPOnly=0x01000000, 404 RxCompletionQ2Enable=0x800000, 405 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000, 406 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000, 407 RxDMAQ2NonIP=0x400000, 408 RxUseBackupQueue=0x080000, RxDMACRC=0x040000, 409 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8, 410 RxBurstSizeShift=0, 411 }; 412 413 /* Bits in the RxCompletionAddr register */ 414 enum rx_compl_bits { 415 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0, 416 RxComplProducerWrEn=0x40, 417 RxComplType0=0x00, RxComplType1=0x10, 418 RxComplType2=0x20, RxComplType3=0x30, 419 RxComplThreshShift=0, 420 }; 421 422 /* Bits in the TxCompletionAddr register */ 423 enum tx_compl_bits { 424 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0, 425 TxComplProducerWrEn=0x40, 426 TxComplIntrStatus=0x20, 427 CommonQueueMode=0x10, 428 TxComplThreshShift=0, 429 }; 430 431 /* Bits in the GenCtrl register */ 432 enum gen_ctrl_bits { 433 RxEnable=0x05, TxEnable=0x0a, 434 RxGFPEnable=0x10, TxGFPEnable=0x20, 435 }; 436 437 /* Bits in the IntrTimerCtrl register */ 438 enum intr_ctrl_bits { 439 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100, 440 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600, 441 IntrLatencyMask=0x1f, 442 }; 443 444 /* The Rx and Tx buffer descriptors. */ 445 struct starfire_rx_desc { 446 netdrv_addr_t rxaddr; 447 }; 448 enum rx_desc_bits { 449 RxDescValid=1, RxDescEndRing=2, 450 }; 451 452 /* Completion queue entry. */ 453 struct short_rx_done_desc { 454 __le32 status; /* Low 16 bits is length. */ 455 }; 456 struct basic_rx_done_desc { 457 __le32 status; /* Low 16 bits is length. */ 458 __le16 vlanid; 459 __le16 status2; 460 }; 461 struct csum_rx_done_desc { 462 __le32 status; /* Low 16 bits is length. */ 463 __le16 csum; /* Partial checksum */ 464 __le16 status2; 465 }; 466 struct full_rx_done_desc { 467 __le32 status; /* Low 16 bits is length. */ 468 __le16 status3; 469 __le16 status2; 470 __le16 vlanid; 471 __le16 csum; /* partial checksum */ 472 __le32 timestamp; 473 }; 474 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */ 475 #ifdef VLAN_SUPPORT 476 typedef struct full_rx_done_desc rx_done_desc; 477 #define RxComplType RxComplType3 478 #else /* not VLAN_SUPPORT */ 479 typedef struct csum_rx_done_desc rx_done_desc; 480 #define RxComplType RxComplType2 481 #endif /* not VLAN_SUPPORT */ 482 483 enum rx_done_bits { 484 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000, 485 }; 486 487 /* Type 1 Tx descriptor. */ 488 struct starfire_tx_desc_1 { 489 __le32 status; /* Upper bits are status, lower 16 length. */ 490 __le32 addr; 491 }; 492 493 /* Type 2 Tx descriptor. */ 494 struct starfire_tx_desc_2 { 495 __le32 status; /* Upper bits are status, lower 16 length. */ 496 __le32 reserved; 497 __le64 addr; 498 }; 499 500 #ifdef ADDR_64BITS 501 typedef struct starfire_tx_desc_2 starfire_tx_desc; 502 #define TX_DESC_TYPE TxDescType2 503 #else /* not ADDR_64BITS */ 504 typedef struct starfire_tx_desc_1 starfire_tx_desc; 505 #define TX_DESC_TYPE TxDescType1 506 #endif /* not ADDR_64BITS */ 507 #define TX_DESC_SPACING TxDescSpaceUnlim 508 509 enum tx_desc_bits { 510 TxDescID=0xB0000000, 511 TxCRCEn=0x01000000, TxDescIntr=0x08000000, 512 TxRingWrap=0x04000000, TxCalTCP=0x02000000, 513 }; 514 struct tx_done_desc { 515 __le32 status; /* timestamp, index. */ 516 #if 0 517 __le32 intrstatus; /* interrupt status */ 518 #endif 519 }; 520 521 struct rx_ring_info { 522 struct sk_buff *skb; 523 dma_addr_t mapping; 524 }; 525 struct tx_ring_info { 526 struct sk_buff *skb; 527 dma_addr_t mapping; 528 unsigned int used_slots; 529 }; 530 531 #define PHY_CNT 2 532 struct netdev_private { 533 /* Descriptor rings first for alignment. */ 534 struct starfire_rx_desc *rx_ring; 535 starfire_tx_desc *tx_ring; 536 dma_addr_t rx_ring_dma; 537 dma_addr_t tx_ring_dma; 538 /* The addresses of rx/tx-in-place skbuffs. */ 539 struct rx_ring_info rx_info[RX_RING_SIZE]; 540 struct tx_ring_info tx_info[TX_RING_SIZE]; 541 /* Pointers to completion queues (full pages). */ 542 rx_done_desc *rx_done_q; 543 dma_addr_t rx_done_q_dma; 544 unsigned int rx_done; 545 struct tx_done_desc *tx_done_q; 546 dma_addr_t tx_done_q_dma; 547 unsigned int tx_done; 548 struct napi_struct napi; 549 struct net_device *dev; 550 struct pci_dev *pci_dev; 551 #ifdef VLAN_SUPPORT 552 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 553 #endif 554 void *queue_mem; 555 dma_addr_t queue_mem_dma; 556 size_t queue_mem_size; 557 558 /* Frequently used values: keep some adjacent for cache effect. */ 559 spinlock_t lock; 560 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ 561 unsigned int cur_tx, dirty_tx, reap_tx; 562 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 563 /* These values keep track of the transceiver/media in use. */ 564 int speed100; /* Set if speed == 100MBit. */ 565 u32 tx_mode; 566 u32 intr_timer_ctrl; 567 u8 tx_threshold; 568 /* MII transceiver section. */ 569 struct mii_if_info mii_if; /* MII lib hooks/info */ 570 int phy_cnt; /* MII device addresses. */ 571 unsigned char phys[PHY_CNT]; /* MII device addresses. */ 572 void __iomem *base; 573 }; 574 575 576 static int mdio_read(struct net_device *dev, int phy_id, int location); 577 static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 578 static int netdev_open(struct net_device *dev); 579 static void check_duplex(struct net_device *dev); 580 static void tx_timeout(struct net_device *dev); 581 static void init_ring(struct net_device *dev); 582 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 583 static irqreturn_t intr_handler(int irq, void *dev_instance); 584 static void netdev_error(struct net_device *dev, int intr_status); 585 static int __netdev_rx(struct net_device *dev, int *quota); 586 static int netdev_poll(struct napi_struct *napi, int budget); 587 static void refill_rx_ring(struct net_device *dev); 588 static void netdev_error(struct net_device *dev, int intr_status); 589 static void set_rx_mode(struct net_device *dev); 590 static struct net_device_stats *get_stats(struct net_device *dev); 591 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 592 static int netdev_close(struct net_device *dev); 593 static void netdev_media_change(struct net_device *dev); 594 static const struct ethtool_ops ethtool_ops; 595 596 597 #ifdef VLAN_SUPPORT 598 static int netdev_vlan_rx_add_vid(struct net_device *dev, 599 __be16 proto, u16 vid) 600 { 601 struct netdev_private *np = netdev_priv(dev); 602 603 spin_lock(&np->lock); 604 if (debug > 1) 605 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid); 606 set_bit(vid, np->active_vlans); 607 set_rx_mode(dev); 608 spin_unlock(&np->lock); 609 610 return 0; 611 } 612 613 static int netdev_vlan_rx_kill_vid(struct net_device *dev, 614 __be16 proto, u16 vid) 615 { 616 struct netdev_private *np = netdev_priv(dev); 617 618 spin_lock(&np->lock); 619 if (debug > 1) 620 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 621 clear_bit(vid, np->active_vlans); 622 set_rx_mode(dev); 623 spin_unlock(&np->lock); 624 625 return 0; 626 } 627 #endif /* VLAN_SUPPORT */ 628 629 630 static const struct net_device_ops netdev_ops = { 631 .ndo_open = netdev_open, 632 .ndo_stop = netdev_close, 633 .ndo_start_xmit = start_tx, 634 .ndo_tx_timeout = tx_timeout, 635 .ndo_get_stats = get_stats, 636 .ndo_set_rx_mode = set_rx_mode, 637 .ndo_do_ioctl = netdev_ioctl, 638 .ndo_set_mac_address = eth_mac_addr, 639 .ndo_validate_addr = eth_validate_addr, 640 #ifdef VLAN_SUPPORT 641 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid, 642 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid, 643 #endif 644 }; 645 646 static int starfire_init_one(struct pci_dev *pdev, 647 const struct pci_device_id *ent) 648 { 649 struct device *d = &pdev->dev; 650 struct netdev_private *np; 651 int i, irq, chip_idx = ent->driver_data; 652 struct net_device *dev; 653 long ioaddr; 654 void __iomem *base; 655 int drv_flags, io_size; 656 int boguscnt; 657 658 /* when built into the kernel, we only print version if device is found */ 659 #ifndef MODULE 660 static int printed_version; 661 if (!printed_version++) 662 printk(version); 663 #endif 664 665 if (pci_enable_device (pdev)) 666 return -EIO; 667 668 ioaddr = pci_resource_start(pdev, 0); 669 io_size = pci_resource_len(pdev, 0); 670 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { 671 dev_err(d, "no PCI MEM resources, aborting\n"); 672 return -ENODEV; 673 } 674 675 dev = alloc_etherdev(sizeof(*np)); 676 if (!dev) 677 return -ENOMEM; 678 679 SET_NETDEV_DEV(dev, &pdev->dev); 680 681 irq = pdev->irq; 682 683 if (pci_request_regions (pdev, DRV_NAME)) { 684 dev_err(d, "cannot reserve PCI resources, aborting\n"); 685 goto err_out_free_netdev; 686 } 687 688 base = ioremap(ioaddr, io_size); 689 if (!base) { 690 dev_err(d, "cannot remap %#x @ %#lx, aborting\n", 691 io_size, ioaddr); 692 goto err_out_free_res; 693 } 694 695 pci_set_master(pdev); 696 697 /* enable MWI -- it vastly improves Rx performance on sparc64 */ 698 pci_try_set_mwi(pdev); 699 700 #ifdef ZEROCOPY 701 /* Starfire can do TCP/UDP checksumming */ 702 if (enable_hw_cksum) 703 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 704 #endif /* ZEROCOPY */ 705 706 #ifdef VLAN_SUPPORT 707 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; 708 #endif /* VLAN_RX_KILL_VID */ 709 #ifdef ADDR_64BITS 710 dev->features |= NETIF_F_HIGHDMA; 711 #endif /* ADDR_64BITS */ 712 713 /* Serial EEPROM reads are hidden by the hardware. */ 714 for (i = 0; i < 6; i++) 715 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i); 716 717 #if ! defined(final_version) /* Dump the EEPROM contents during development. */ 718 if (debug > 4) 719 for (i = 0; i < 0x20; i++) 720 printk("%2.2x%s", 721 (unsigned int)readb(base + EEPROMCtrl + i), 722 i % 16 != 15 ? " " : "\n"); 723 #endif 724 725 /* Issue soft reset */ 726 writel(MiiSoftReset, base + TxMode); 727 udelay(1000); 728 writel(0, base + TxMode); 729 730 /* Reset the chip to erase previous misconfiguration. */ 731 writel(1, base + PCIDeviceConfig); 732 boguscnt = 1000; 733 while (--boguscnt > 0) { 734 udelay(10); 735 if ((readl(base + PCIDeviceConfig) & 1) == 0) 736 break; 737 } 738 if (boguscnt == 0) 739 printk("%s: chipset reset never completed!\n", dev->name); 740 /* wait a little longer */ 741 udelay(1000); 742 743 np = netdev_priv(dev); 744 np->dev = dev; 745 np->base = base; 746 spin_lock_init(&np->lock); 747 pci_set_drvdata(pdev, dev); 748 749 np->pci_dev = pdev; 750 751 np->mii_if.dev = dev; 752 np->mii_if.mdio_read = mdio_read; 753 np->mii_if.mdio_write = mdio_write; 754 np->mii_if.phy_id_mask = 0x1f; 755 np->mii_if.reg_num_mask = 0x1f; 756 757 drv_flags = netdrv_tbl[chip_idx].drv_flags; 758 759 np->speed100 = 1; 760 761 /* timer resolution is 128 * 0.8us */ 762 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) | 763 Timer10X | EnableIntrMasking; 764 765 if (small_frames > 0) { 766 np->intr_timer_ctrl |= SmallFrameBypass; 767 switch (small_frames) { 768 case 1 ... 64: 769 np->intr_timer_ctrl |= SmallFrame64; 770 break; 771 case 65 ... 128: 772 np->intr_timer_ctrl |= SmallFrame128; 773 break; 774 case 129 ... 256: 775 np->intr_timer_ctrl |= SmallFrame256; 776 break; 777 default: 778 np->intr_timer_ctrl |= SmallFrame512; 779 if (small_frames > 512) 780 printk("Adjusting small_frames down to 512\n"); 781 break; 782 } 783 } 784 785 dev->netdev_ops = &netdev_ops; 786 dev->watchdog_timeo = TX_TIMEOUT; 787 dev->ethtool_ops = &ethtool_ops; 788 789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); 790 791 if (mtu) 792 dev->mtu = mtu; 793 794 if (register_netdev(dev)) 795 goto err_out_cleardev; 796 797 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", 798 dev->name, netdrv_tbl[chip_idx].name, base, 799 dev->dev_addr, irq); 800 801 if (drv_flags & CanHaveMII) { 802 int phy, phy_idx = 0; 803 int mii_status; 804 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) { 805 mdio_write(dev, phy, MII_BMCR, BMCR_RESET); 806 mdelay(100); 807 boguscnt = 1000; 808 while (--boguscnt > 0) 809 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0) 810 break; 811 if (boguscnt == 0) { 812 printk("%s: PHY#%d reset never completed!\n", dev->name, phy); 813 continue; 814 } 815 mii_status = mdio_read(dev, phy, MII_BMSR); 816 if (mii_status != 0) { 817 np->phys[phy_idx++] = phy; 818 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); 819 printk(KERN_INFO "%s: MII PHY found at address %d, status " 820 "%#4.4x advertising %#4.4x.\n", 821 dev->name, phy, mii_status, np->mii_if.advertising); 822 /* there can be only one PHY on-board */ 823 break; 824 } 825 } 826 np->phy_cnt = phy_idx; 827 if (np->phy_cnt > 0) 828 np->mii_if.phy_id = np->phys[0]; 829 else 830 memset(&np->mii_if, 0, sizeof(np->mii_if)); 831 } 832 833 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n", 834 dev->name, enable_hw_cksum ? "enabled" : "disabled"); 835 return 0; 836 837 err_out_cleardev: 838 iounmap(base); 839 err_out_free_res: 840 pci_release_regions (pdev); 841 err_out_free_netdev: 842 free_netdev(dev); 843 return -ENODEV; 844 } 845 846 847 /* Read the MII Management Data I/O (MDIO) interfaces. */ 848 static int mdio_read(struct net_device *dev, int phy_id, int location) 849 { 850 struct netdev_private *np = netdev_priv(dev); 851 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); 852 int result, boguscnt=1000; 853 /* ??? Should we add a busy-wait here? */ 854 do { 855 result = readl(mdio_addr); 856 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0); 857 if (boguscnt == 0) 858 return 0; 859 if ((result & 0xffff) == 0xffff) 860 return 0; 861 return result & 0xffff; 862 } 863 864 865 static void mdio_write(struct net_device *dev, int phy_id, int location, int value) 866 { 867 struct netdev_private *np = netdev_priv(dev); 868 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); 869 writel(value, mdio_addr); 870 /* The busy-wait will occur before a read. */ 871 } 872 873 874 static int netdev_open(struct net_device *dev) 875 { 876 const struct firmware *fw_rx, *fw_tx; 877 const __be32 *fw_rx_data, *fw_tx_data; 878 struct netdev_private *np = netdev_priv(dev); 879 void __iomem *ioaddr = np->base; 880 const int irq = np->pci_dev->irq; 881 int i, retval; 882 size_t tx_size, rx_size; 883 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; 884 885 /* Do we ever need to reset the chip??? */ 886 887 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); 888 if (retval) 889 return retval; 890 891 /* Disable the Rx and Tx, and reset the chip. */ 892 writel(0, ioaddr + GenCtrl); 893 writel(1, ioaddr + PCIDeviceConfig); 894 if (debug > 1) 895 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 896 dev->name, irq); 897 898 /* Allocate the various queues. */ 899 if (!np->queue_mem) { 900 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; 901 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; 902 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; 903 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; 904 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 905 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 906 if (np->queue_mem == NULL) { 907 free_irq(irq, dev); 908 return -ENOMEM; 909 } 910 911 np->tx_done_q = np->queue_mem; 912 np->tx_done_q_dma = np->queue_mem_dma; 913 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size; 914 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size; 915 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size; 916 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size; 917 np->rx_ring = (void *) np->tx_ring + tx_ring_size; 918 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size; 919 } 920 921 /* Start with no carrier, it gets adjusted later */ 922 netif_carrier_off(dev); 923 init_ring(dev); 924 /* Set the size of the Rx buffers. */ 925 writel((np->rx_buf_sz << RxBufferLenShift) | 926 (0 << RxMinDescrThreshShift) | 927 RxPrefetchMode | RxVariableQ | 928 RX_Q_ENTRIES | 929 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE | 930 RxDescSpace4, 931 ioaddr + RxDescQCtrl); 932 933 /* Set up the Rx DMA controller. */ 934 writel(RxChecksumIgnore | 935 (0 << RxEarlyIntThreshShift) | 936 (6 << RxHighPrioThreshShift) | 937 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift), 938 ioaddr + RxDMACtrl); 939 940 /* Set Tx descriptor */ 941 writel((2 << TxHiPriFIFOThreshShift) | 942 (0 << TxPadLenShift) | 943 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) | 944 TX_DESC_Q_ADDR_SIZE | 945 TX_DESC_SPACING | TX_DESC_TYPE, 946 ioaddr + TxDescCtrl); 947 948 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr); 949 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr); 950 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr); 951 writel(np->rx_ring_dma, ioaddr + RxDescQAddr); 952 writel(np->tx_ring_dma, ioaddr + TxRingPtr); 953 954 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr); 955 writel(np->rx_done_q_dma | 956 RxComplType | 957 (0 << RxComplThreshShift), 958 ioaddr + RxCompletionAddr); 959 960 if (debug > 1) 961 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name); 962 963 /* Fill both the Tx SA register and the Rx perfect filter. */ 964 for (i = 0; i < 6; i++) 965 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i); 966 /* The first entry is special because it bypasses the VLAN filter. 967 Don't use it. */ 968 writew(0, ioaddr + PerfFilterTable); 969 writew(0, ioaddr + PerfFilterTable + 4); 970 writew(0, ioaddr + PerfFilterTable + 8); 971 for (i = 1; i < 16; i++) { 972 __be16 *eaddrs = (__be16 *)dev->dev_addr; 973 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16; 974 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4; 975 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4; 976 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8; 977 } 978 979 /* Initialize other registers. */ 980 /* Configure the PCI bus bursts and FIFO thresholds. */ 981 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */ 982 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode); 983 udelay(1000); 984 writel(np->tx_mode, ioaddr + TxMode); 985 np->tx_threshold = 4; 986 writel(np->tx_threshold, ioaddr + TxThreshold); 987 988 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); 989 990 napi_enable(&np->napi); 991 992 netif_start_queue(dev); 993 994 if (debug > 1) 995 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name); 996 set_rx_mode(dev); 997 998 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE); 999 check_duplex(dev); 1000 1001 /* Enable GPIO interrupts on link change */ 1002 writel(0x0f00ff00, ioaddr + GPIOCtrl); 1003 1004 /* Set the interrupt mask */ 1005 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr | 1006 IntrTxDMADone | IntrStatsMax | IntrLinkChange | 1007 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID, 1008 ioaddr + IntrEnable); 1009 /* Enable PCI interrupts. */ 1010 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig), 1011 ioaddr + PCIDeviceConfig); 1012 1013 #ifdef VLAN_SUPPORT 1014 /* Set VLAN type to 802.1q */ 1015 writel(ETH_P_8021Q, ioaddr + VlanType); 1016 #endif /* VLAN_SUPPORT */ 1017 1018 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev); 1019 if (retval) { 1020 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", 1021 FIRMWARE_RX); 1022 goto out_init; 1023 } 1024 if (fw_rx->size % 4) { 1025 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", 1026 fw_rx->size, FIRMWARE_RX); 1027 retval = -EINVAL; 1028 goto out_rx; 1029 } 1030 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev); 1031 if (retval) { 1032 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", 1033 FIRMWARE_TX); 1034 goto out_rx; 1035 } 1036 if (fw_tx->size % 4) { 1037 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", 1038 fw_tx->size, FIRMWARE_TX); 1039 retval = -EINVAL; 1040 goto out_tx; 1041 } 1042 fw_rx_data = (const __be32 *)&fw_rx->data[0]; 1043 fw_tx_data = (const __be32 *)&fw_tx->data[0]; 1044 rx_size = fw_rx->size / 4; 1045 tx_size = fw_tx->size / 4; 1046 1047 /* Load Rx/Tx firmware into the frame processors */ 1048 for (i = 0; i < rx_size; i++) 1049 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4); 1050 for (i = 0; i < tx_size; i++) 1051 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4); 1052 if (enable_hw_cksum) 1053 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */ 1054 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl); 1055 else 1056 /* Enable the Rx and Tx units only. */ 1057 writel(TxEnable|RxEnable, ioaddr + GenCtrl); 1058 1059 if (debug > 1) 1060 printk(KERN_DEBUG "%s: Done netdev_open().\n", 1061 dev->name); 1062 1063 out_tx: 1064 release_firmware(fw_tx); 1065 out_rx: 1066 release_firmware(fw_rx); 1067 out_init: 1068 if (retval) 1069 netdev_close(dev); 1070 return retval; 1071 } 1072 1073 1074 static void check_duplex(struct net_device *dev) 1075 { 1076 struct netdev_private *np = netdev_priv(dev); 1077 u16 reg0; 1078 int silly_count = 1000; 1079 1080 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising); 1081 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET); 1082 udelay(500); 1083 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET) 1084 /* do nothing */; 1085 if (!silly_count) { 1086 printk("%s: MII reset failed!\n", dev->name); 1087 return; 1088 } 1089 1090 reg0 = mdio_read(dev, np->phys[0], MII_BMCR); 1091 1092 if (!np->mii_if.force_media) { 1093 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART; 1094 } else { 1095 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART); 1096 if (np->speed100) 1097 reg0 |= BMCR_SPEED100; 1098 if (np->mii_if.full_duplex) 1099 reg0 |= BMCR_FULLDPLX; 1100 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n", 1101 dev->name, 1102 np->speed100 ? "100" : "10", 1103 np->mii_if.full_duplex ? "full" : "half"); 1104 } 1105 mdio_write(dev, np->phys[0], MII_BMCR, reg0); 1106 } 1107 1108 1109 static void tx_timeout(struct net_device *dev) 1110 { 1111 struct netdev_private *np = netdev_priv(dev); 1112 void __iomem *ioaddr = np->base; 1113 int old_debug; 1114 1115 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, " 1116 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus)); 1117 1118 /* Perhaps we should reinitialize the hardware here. */ 1119 1120 /* 1121 * Stop and restart the interface. 1122 * Cheat and increase the debug level temporarily. 1123 */ 1124 old_debug = debug; 1125 debug = 2; 1126 netdev_close(dev); 1127 netdev_open(dev); 1128 debug = old_debug; 1129 1130 /* Trigger an immediate transmit demand. */ 1131 1132 netif_trans_update(dev); /* prevent tx timeout */ 1133 dev->stats.tx_errors++; 1134 netif_wake_queue(dev); 1135 } 1136 1137 1138 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1139 static void init_ring(struct net_device *dev) 1140 { 1141 struct netdev_private *np = netdev_priv(dev); 1142 int i; 1143 1144 np->cur_rx = np->cur_tx = np->reap_tx = 0; 1145 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0; 1146 1147 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1148 1149 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1150 for (i = 0; i < RX_RING_SIZE; i++) { 1151 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); 1152 np->rx_info[i].skb = skb; 1153 if (skb == NULL) 1154 break; 1155 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1156 /* Grrr, we cannot offset to correctly align the IP header. */ 1157 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1158 } 1159 writew(i - 1, np->base + RxDescQIdx); 1160 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1161 1162 /* Clear the remainder of the Rx buffer ring. */ 1163 for ( ; i < RX_RING_SIZE; i++) { 1164 np->rx_ring[i].rxaddr = 0; 1165 np->rx_info[i].skb = NULL; 1166 np->rx_info[i].mapping = 0; 1167 } 1168 /* Mark the last entry as wrapping the ring. */ 1169 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing); 1170 1171 /* Clear the completion rings. */ 1172 for (i = 0; i < DONE_Q_SIZE; i++) { 1173 np->rx_done_q[i].status = 0; 1174 np->tx_done_q[i].status = 0; 1175 } 1176 1177 for (i = 0; i < TX_RING_SIZE; i++) 1178 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i])); 1179 } 1180 1181 1182 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 1183 { 1184 struct netdev_private *np = netdev_priv(dev); 1185 unsigned int entry; 1186 u32 status; 1187 int i; 1188 1189 /* 1190 * be cautious here, wrapping the queue has weird semantics 1191 * and we may not have enough slots even when it seems we do. 1192 */ 1193 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { 1194 netif_stop_queue(dev); 1195 return NETDEV_TX_BUSY; 1196 } 1197 1198 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 1199 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1200 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK)) 1201 return NETDEV_TX_OK; 1202 } 1203 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1204 1205 entry = np->cur_tx % TX_RING_SIZE; 1206 for (i = 0; i < skb_num_frags(skb); i++) { 1207 int wrap_ring = 0; 1208 status = TxDescID; 1209 1210 if (i == 0) { 1211 np->tx_info[entry].skb = skb; 1212 status |= TxCRCEn; 1213 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { 1214 status |= TxRingWrap; 1215 wrap_ring = 1; 1216 } 1217 if (np->reap_tx) { 1218 status |= TxDescIntr; 1219 np->reap_tx = 0; 1220 } 1221 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1222 status |= TxCalTCP; 1223 dev->stats.tx_compressed++; 1224 } 1225 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16); 1226 1227 np->tx_info[entry].mapping = 1228 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); 1229 } else { 1230 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; 1231 status |= skb_frag_size(this_frag); 1232 np->tx_info[entry].mapping = 1233 pci_map_single(np->pci_dev, 1234 skb_frag_address(this_frag), 1235 skb_frag_size(this_frag), 1236 PCI_DMA_TODEVICE); 1237 } 1238 1239 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1240 np->tx_ring[entry].status = cpu_to_le32(status); 1241 if (debug > 3) 1242 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n", 1243 dev->name, np->cur_tx, np->dirty_tx, 1244 entry, status); 1245 if (wrap_ring) { 1246 np->tx_info[entry].used_slots = TX_RING_SIZE - entry; 1247 np->cur_tx += np->tx_info[entry].used_slots; 1248 entry = 0; 1249 } else { 1250 np->tx_info[entry].used_slots = 1; 1251 np->cur_tx += np->tx_info[entry].used_slots; 1252 entry++; 1253 } 1254 /* scavenge the tx descriptors twice per TX_RING_SIZE */ 1255 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) 1256 np->reap_tx = 1; 1257 } 1258 1259 /* Non-x86: explicitly flush descriptor cache lines here. */ 1260 /* Ensure all descriptors are written back before the transmit is 1261 initiated. - Jes */ 1262 wmb(); 1263 1264 /* Update the producer index. */ 1265 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx); 1266 1267 /* 4 is arbitrary, but should be ok */ 1268 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) 1269 netif_stop_queue(dev); 1270 1271 return NETDEV_TX_OK; 1272 } 1273 1274 1275 /* The interrupt handler does all of the Rx thread work and cleans up 1276 after the Tx thread. */ 1277 static irqreturn_t intr_handler(int irq, void *dev_instance) 1278 { 1279 struct net_device *dev = dev_instance; 1280 struct netdev_private *np = netdev_priv(dev); 1281 void __iomem *ioaddr = np->base; 1282 int boguscnt = max_interrupt_work; 1283 int consumer; 1284 int tx_status; 1285 int handled = 0; 1286 1287 do { 1288 u32 intr_status = readl(ioaddr + IntrClear); 1289 1290 if (debug > 4) 1291 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n", 1292 dev->name, intr_status); 1293 1294 if (intr_status == 0 || intr_status == (u32) -1) 1295 break; 1296 1297 handled = 1; 1298 1299 if (intr_status & (IntrRxDone | IntrRxEmpty)) { 1300 u32 enable; 1301 1302 if (likely(napi_schedule_prep(&np->napi))) { 1303 __napi_schedule(&np->napi); 1304 enable = readl(ioaddr + IntrEnable); 1305 enable &= ~(IntrRxDone | IntrRxEmpty); 1306 writel(enable, ioaddr + IntrEnable); 1307 /* flush PCI posting buffers */ 1308 readl(ioaddr + IntrEnable); 1309 } else { 1310 /* Paranoia check */ 1311 enable = readl(ioaddr + IntrEnable); 1312 if (enable & (IntrRxDone | IntrRxEmpty)) { 1313 printk(KERN_INFO 1314 "%s: interrupt while in poll!\n", 1315 dev->name); 1316 enable &= ~(IntrRxDone | IntrRxEmpty); 1317 writel(enable, ioaddr + IntrEnable); 1318 } 1319 } 1320 } 1321 1322 /* Scavenge the skbuff list based on the Tx-done queue. 1323 There are redundant checks here that may be cleaned up 1324 after the driver has proven to be reliable. */ 1325 consumer = readl(ioaddr + TxConsumerIdx); 1326 if (debug > 3) 1327 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n", 1328 dev->name, consumer); 1329 1330 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) { 1331 if (debug > 3) 1332 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n", 1333 dev->name, np->dirty_tx, np->tx_done, tx_status); 1334 if ((tx_status & 0xe0000000) == 0xa0000000) { 1335 dev->stats.tx_packets++; 1336 } else if ((tx_status & 0xe0000000) == 0x80000000) { 1337 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc); 1338 struct sk_buff *skb = np->tx_info[entry].skb; 1339 np->tx_info[entry].skb = NULL; 1340 pci_unmap_single(np->pci_dev, 1341 np->tx_info[entry].mapping, 1342 skb_first_frag_len(skb), 1343 PCI_DMA_TODEVICE); 1344 np->tx_info[entry].mapping = 0; 1345 np->dirty_tx += np->tx_info[entry].used_slots; 1346 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; 1347 { 1348 int i; 1349 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1350 pci_unmap_single(np->pci_dev, 1351 np->tx_info[entry].mapping, 1352 skb_frag_size(&skb_shinfo(skb)->frags[i]), 1353 PCI_DMA_TODEVICE); 1354 np->dirty_tx++; 1355 entry++; 1356 } 1357 } 1358 1359 dev_kfree_skb_irq(skb); 1360 } 1361 np->tx_done_q[np->tx_done].status = 0; 1362 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; 1363 } 1364 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); 1365 1366 if (netif_queue_stopped(dev) && 1367 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { 1368 /* The ring is no longer full, wake the queue. */ 1369 netif_wake_queue(dev); 1370 } 1371 1372 /* Stats overflow */ 1373 if (intr_status & IntrStatsMax) 1374 get_stats(dev); 1375 1376 /* Media change interrupt. */ 1377 if (intr_status & IntrLinkChange) 1378 netdev_media_change(dev); 1379 1380 /* Abnormal error summary/uncommon events handlers. */ 1381 if (intr_status & IntrAbnormalSummary) 1382 netdev_error(dev, intr_status); 1383 1384 if (--boguscnt < 0) { 1385 if (debug > 1) 1386 printk(KERN_WARNING "%s: Too much work at interrupt, " 1387 "status=%#8.8x.\n", 1388 dev->name, intr_status); 1389 break; 1390 } 1391 } while (1); 1392 1393 if (debug > 4) 1394 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n", 1395 dev->name, (int) readl(ioaddr + IntrStatus)); 1396 return IRQ_RETVAL(handled); 1397 } 1398 1399 1400 /* 1401 * This routine is logically part of the interrupt/poll handler, but separated 1402 * for clarity and better register allocation. 1403 */ 1404 static int __netdev_rx(struct net_device *dev, int *quota) 1405 { 1406 struct netdev_private *np = netdev_priv(dev); 1407 u32 desc_status; 1408 int retcode = 0; 1409 1410 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1411 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) { 1412 struct sk_buff *skb; 1413 u16 pkt_len; 1414 int entry; 1415 rx_done_desc *desc = &np->rx_done_q[np->rx_done]; 1416 1417 if (debug > 4) 1418 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); 1419 if (!(desc_status & RxOK)) { 1420 /* There was an error. */ 1421 if (debug > 2) 1422 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status); 1423 dev->stats.rx_errors++; 1424 if (desc_status & RxFIFOErr) 1425 dev->stats.rx_fifo_errors++; 1426 goto next_rx; 1427 } 1428 1429 if (*quota <= 0) { /* out of rx quota */ 1430 retcode = 1; 1431 goto out; 1432 } 1433 (*quota)--; 1434 1435 pkt_len = desc_status; /* Implicitly Truncate */ 1436 entry = (desc_status >> 16) & 0x7ff; 1437 1438 if (debug > 4) 1439 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota); 1440 /* Check if the packet is long enough to accept without copying 1441 to a minimally-sized skbuff. */ 1442 if (pkt_len < rx_copybreak && 1443 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { 1444 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1445 pci_dma_sync_single_for_cpu(np->pci_dev, 1446 np->rx_info[entry].mapping, 1447 pkt_len, PCI_DMA_FROMDEVICE); 1448 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); 1449 pci_dma_sync_single_for_device(np->pci_dev, 1450 np->rx_info[entry].mapping, 1451 pkt_len, PCI_DMA_FROMDEVICE); 1452 skb_put(skb, pkt_len); 1453 } else { 1454 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1455 skb = np->rx_info[entry].skb; 1456 skb_put(skb, pkt_len); 1457 np->rx_info[entry].skb = NULL; 1458 np->rx_info[entry].mapping = 0; 1459 } 1460 #ifndef final_version /* Remove after testing. */ 1461 /* You will want this info for the initial debug. */ 1462 if (debug > 5) { 1463 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n", 1464 skb->data, skb->data + 6, 1465 skb->data[12], skb->data[13]); 1466 } 1467 #endif 1468 1469 skb->protocol = eth_type_trans(skb, dev); 1470 #ifdef VLAN_SUPPORT 1471 if (debug > 4) 1472 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2)); 1473 #endif 1474 if (le16_to_cpu(desc->status2) & 0x0100) { 1475 skb->ip_summed = CHECKSUM_UNNECESSARY; 1476 dev->stats.rx_compressed++; 1477 } 1478 /* 1479 * This feature doesn't seem to be working, at least 1480 * with the two firmware versions I have. If the GFP sees 1481 * an IP fragment, it either ignores it completely, or reports 1482 * "bad checksum" on it. 1483 * 1484 * Maybe I missed something -- corrections are welcome. 1485 * Until then, the printk stays. :-) -Ion 1486 */ 1487 else if (le16_to_cpu(desc->status2) & 0x0040) { 1488 skb->ip_summed = CHECKSUM_COMPLETE; 1489 skb->csum = le16_to_cpu(desc->csum); 1490 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); 1491 } 1492 #ifdef VLAN_SUPPORT 1493 if (le16_to_cpu(desc->status2) & 0x0200) { 1494 u16 vlid = le16_to_cpu(desc->vlanid); 1495 1496 if (debug > 4) { 1497 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", 1498 vlid); 1499 } 1500 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid); 1501 } 1502 #endif /* VLAN_SUPPORT */ 1503 netif_receive_skb(skb); 1504 dev->stats.rx_packets++; 1505 1506 next_rx: 1507 np->cur_rx++; 1508 desc->status = 0; 1509 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; 1510 } 1511 1512 if (*quota == 0) { /* out of rx quota */ 1513 retcode = 1; 1514 goto out; 1515 } 1516 writew(np->rx_done, np->base + CompletionQConsumerIdx); 1517 1518 out: 1519 refill_rx_ring(dev); 1520 if (debug > 5) 1521 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n", 1522 retcode, np->rx_done, desc_status); 1523 return retcode; 1524 } 1525 1526 static int netdev_poll(struct napi_struct *napi, int budget) 1527 { 1528 struct netdev_private *np = container_of(napi, struct netdev_private, napi); 1529 struct net_device *dev = np->dev; 1530 u32 intr_status; 1531 void __iomem *ioaddr = np->base; 1532 int quota = budget; 1533 1534 do { 1535 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear); 1536 1537 if (__netdev_rx(dev, &quota)) 1538 goto out; 1539 1540 intr_status = readl(ioaddr + IntrStatus); 1541 } while (intr_status & (IntrRxDone | IntrRxEmpty)); 1542 1543 napi_complete(napi); 1544 intr_status = readl(ioaddr + IntrEnable); 1545 intr_status |= IntrRxDone | IntrRxEmpty; 1546 writel(intr_status, ioaddr + IntrEnable); 1547 1548 out: 1549 if (debug > 5) 1550 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", 1551 budget - quota); 1552 1553 /* Restart Rx engine if stopped. */ 1554 return budget - quota; 1555 } 1556 1557 static void refill_rx_ring(struct net_device *dev) 1558 { 1559 struct netdev_private *np = netdev_priv(dev); 1560 struct sk_buff *skb; 1561 int entry = -1; 1562 1563 /* Refill the Rx ring buffers. */ 1564 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { 1565 entry = np->dirty_rx % RX_RING_SIZE; 1566 if (np->rx_info[entry].skb == NULL) { 1567 skb = netdev_alloc_skb(dev, np->rx_buf_sz); 1568 np->rx_info[entry].skb = skb; 1569 if (skb == NULL) 1570 break; /* Better luck next round. */ 1571 np->rx_info[entry].mapping = 1572 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1573 np->rx_ring[entry].rxaddr = 1574 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1575 } 1576 if (entry == RX_RING_SIZE - 1) 1577 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing); 1578 } 1579 if (entry >= 0) 1580 writew(entry, np->base + RxDescQIdx); 1581 } 1582 1583 1584 static void netdev_media_change(struct net_device *dev) 1585 { 1586 struct netdev_private *np = netdev_priv(dev); 1587 void __iomem *ioaddr = np->base; 1588 u16 reg0, reg1, reg4, reg5; 1589 u32 new_tx_mode; 1590 u32 new_intr_timer_ctrl; 1591 1592 /* reset status first */ 1593 mdio_read(dev, np->phys[0], MII_BMCR); 1594 mdio_read(dev, np->phys[0], MII_BMSR); 1595 1596 reg0 = mdio_read(dev, np->phys[0], MII_BMCR); 1597 reg1 = mdio_read(dev, np->phys[0], MII_BMSR); 1598 1599 if (reg1 & BMSR_LSTATUS) { 1600 /* link is up */ 1601 if (reg0 & BMCR_ANENABLE) { 1602 /* autonegotiation is enabled */ 1603 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE); 1604 reg5 = mdio_read(dev, np->phys[0], MII_LPA); 1605 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) { 1606 np->speed100 = 1; 1607 np->mii_if.full_duplex = 1; 1608 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) { 1609 np->speed100 = 1; 1610 np->mii_if.full_duplex = 0; 1611 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) { 1612 np->speed100 = 0; 1613 np->mii_if.full_duplex = 1; 1614 } else { 1615 np->speed100 = 0; 1616 np->mii_if.full_duplex = 0; 1617 } 1618 } else { 1619 /* autonegotiation is disabled */ 1620 if (reg0 & BMCR_SPEED100) 1621 np->speed100 = 1; 1622 else 1623 np->speed100 = 0; 1624 if (reg0 & BMCR_FULLDPLX) 1625 np->mii_if.full_duplex = 1; 1626 else 1627 np->mii_if.full_duplex = 0; 1628 } 1629 netif_carrier_on(dev); 1630 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n", 1631 dev->name, 1632 np->speed100 ? "100" : "10", 1633 np->mii_if.full_duplex ? "full" : "half"); 1634 1635 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */ 1636 if (np->mii_if.full_duplex) 1637 new_tx_mode |= FullDuplex; 1638 if (np->tx_mode != new_tx_mode) { 1639 np->tx_mode = new_tx_mode; 1640 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode); 1641 udelay(1000); 1642 writel(np->tx_mode, ioaddr + TxMode); 1643 } 1644 1645 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X; 1646 if (np->speed100) 1647 new_intr_timer_ctrl |= Timer10X; 1648 if (np->intr_timer_ctrl != new_intr_timer_ctrl) { 1649 np->intr_timer_ctrl = new_intr_timer_ctrl; 1650 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl); 1651 } 1652 } else { 1653 netif_carrier_off(dev); 1654 printk(KERN_DEBUG "%s: Link is down\n", dev->name); 1655 } 1656 } 1657 1658 1659 static void netdev_error(struct net_device *dev, int intr_status) 1660 { 1661 struct netdev_private *np = netdev_priv(dev); 1662 1663 /* Came close to underrunning the Tx FIFO, increase threshold. */ 1664 if (intr_status & IntrTxDataLow) { 1665 if (np->tx_threshold <= PKT_BUF_SZ / 16) { 1666 writel(++np->tx_threshold, np->base + TxThreshold); 1667 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n", 1668 dev->name, np->tx_threshold * 16); 1669 } else 1670 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name); 1671 } 1672 if (intr_status & IntrRxGFPDead) { 1673 dev->stats.rx_fifo_errors++; 1674 dev->stats.rx_errors++; 1675 } 1676 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) { 1677 dev->stats.tx_fifo_errors++; 1678 dev->stats.tx_errors++; 1679 } 1680 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug) 1681 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n", 1682 dev->name, intr_status); 1683 } 1684 1685 1686 static struct net_device_stats *get_stats(struct net_device *dev) 1687 { 1688 struct netdev_private *np = netdev_priv(dev); 1689 void __iomem *ioaddr = np->base; 1690 1691 /* This adapter architecture needs no SMP locks. */ 1692 dev->stats.tx_bytes = readl(ioaddr + 0x57010); 1693 dev->stats.rx_bytes = readl(ioaddr + 0x57044); 1694 dev->stats.tx_packets = readl(ioaddr + 0x57000); 1695 dev->stats.tx_aborted_errors = 1696 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028); 1697 dev->stats.tx_window_errors = readl(ioaddr + 0x57018); 1698 dev->stats.collisions = 1699 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008); 1700 1701 /* The chip only need report frame silently dropped. */ 1702 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus); 1703 writew(0, ioaddr + RxDMAStatus); 1704 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C); 1705 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040); 1706 dev->stats.rx_length_errors = readl(ioaddr + 0x57058); 1707 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C); 1708 1709 return &dev->stats; 1710 } 1711 1712 #ifdef VLAN_SUPPORT 1713 static u32 set_vlan_mode(struct netdev_private *np) 1714 { 1715 u32 ret = VlanMode; 1716 u16 vid; 1717 void __iomem *filter_addr = np->base + HashTable + 8; 1718 int vlan_count = 0; 1719 1720 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) { 1721 if (vlan_count == 32) 1722 break; 1723 writew(vid, filter_addr); 1724 filter_addr += 16; 1725 vlan_count++; 1726 } 1727 if (vlan_count == 32) { 1728 ret |= PerfectFilterVlan; 1729 while (vlan_count < 32) { 1730 writew(0, filter_addr); 1731 filter_addr += 16; 1732 vlan_count++; 1733 } 1734 } 1735 return ret; 1736 } 1737 #endif /* VLAN_SUPPORT */ 1738 1739 static void set_rx_mode(struct net_device *dev) 1740 { 1741 struct netdev_private *np = netdev_priv(dev); 1742 void __iomem *ioaddr = np->base; 1743 u32 rx_mode = MinVLANPrio; 1744 struct netdev_hw_addr *ha; 1745 int i; 1746 1747 #ifdef VLAN_SUPPORT 1748 rx_mode |= set_vlan_mode(np); 1749 #endif /* VLAN_SUPPORT */ 1750 1751 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1752 rx_mode |= AcceptAll; 1753 } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 1754 (dev->flags & IFF_ALLMULTI)) { 1755 /* Too many to match, or accept all multicasts. */ 1756 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter; 1757 } else if (netdev_mc_count(dev) <= 14) { 1758 /* Use the 16 element perfect filter, skip first two entries. */ 1759 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; 1760 __be16 *eaddrs; 1761 netdev_for_each_mc_addr(ha, dev) { 1762 eaddrs = (__be16 *) ha->addr; 1763 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; 1764 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1765 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; 1766 } 1767 eaddrs = (__be16 *)dev->dev_addr; 1768 i = netdev_mc_count(dev) + 2; 1769 while (i++ < 16) { 1770 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; 1771 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1772 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; 1773 } 1774 rx_mode |= AcceptBroadcast|PerfectFilter; 1775 } else { 1776 /* Must use a multicast hash table. */ 1777 void __iomem *filter_addr; 1778 __be16 *eaddrs; 1779 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ 1780 1781 memset(mc_filter, 0, sizeof(mc_filter)); 1782 netdev_for_each_mc_addr(ha, dev) { 1783 /* The chip uses the upper 9 CRC bits 1784 as index into the hash table */ 1785 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23; 1786 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; 1787 1788 *fptr |= cpu_to_le32(1 << (bit_nr & 31)); 1789 } 1790 /* Clear the perfect filter list, skip first two entries. */ 1791 filter_addr = ioaddr + PerfFilterTable + 2 * 16; 1792 eaddrs = (__be16 *)dev->dev_addr; 1793 for (i = 2; i < 16; i++) { 1794 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; 1795 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1796 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; 1797 } 1798 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++) 1799 writew(mc_filter[i], filter_addr); 1800 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter; 1801 } 1802 writel(rx_mode, ioaddr + RxFilterMode); 1803 } 1804 1805 static int check_if_running(struct net_device *dev) 1806 { 1807 if (!netif_running(dev)) 1808 return -EINVAL; 1809 return 0; 1810 } 1811 1812 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1813 { 1814 struct netdev_private *np = netdev_priv(dev); 1815 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1816 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1817 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); 1818 } 1819 1820 static int get_link_ksettings(struct net_device *dev, 1821 struct ethtool_link_ksettings *cmd) 1822 { 1823 struct netdev_private *np = netdev_priv(dev); 1824 spin_lock_irq(&np->lock); 1825 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); 1826 spin_unlock_irq(&np->lock); 1827 return 0; 1828 } 1829 1830 static int set_link_ksettings(struct net_device *dev, 1831 const struct ethtool_link_ksettings *cmd) 1832 { 1833 struct netdev_private *np = netdev_priv(dev); 1834 int res; 1835 spin_lock_irq(&np->lock); 1836 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); 1837 spin_unlock_irq(&np->lock); 1838 check_duplex(dev); 1839 return res; 1840 } 1841 1842 static int nway_reset(struct net_device *dev) 1843 { 1844 struct netdev_private *np = netdev_priv(dev); 1845 return mii_nway_restart(&np->mii_if); 1846 } 1847 1848 static u32 get_link(struct net_device *dev) 1849 { 1850 struct netdev_private *np = netdev_priv(dev); 1851 return mii_link_ok(&np->mii_if); 1852 } 1853 1854 static u32 get_msglevel(struct net_device *dev) 1855 { 1856 return debug; 1857 } 1858 1859 static void set_msglevel(struct net_device *dev, u32 val) 1860 { 1861 debug = val; 1862 } 1863 1864 static const struct ethtool_ops ethtool_ops = { 1865 .begin = check_if_running, 1866 .get_drvinfo = get_drvinfo, 1867 .nway_reset = nway_reset, 1868 .get_link = get_link, 1869 .get_msglevel = get_msglevel, 1870 .set_msglevel = set_msglevel, 1871 .get_link_ksettings = get_link_ksettings, 1872 .set_link_ksettings = set_link_ksettings, 1873 }; 1874 1875 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1876 { 1877 struct netdev_private *np = netdev_priv(dev); 1878 struct mii_ioctl_data *data = if_mii(rq); 1879 int rc; 1880 1881 if (!netif_running(dev)) 1882 return -EINVAL; 1883 1884 spin_lock_irq(&np->lock); 1885 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); 1886 spin_unlock_irq(&np->lock); 1887 1888 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0])) 1889 check_duplex(dev); 1890 1891 return rc; 1892 } 1893 1894 static int netdev_close(struct net_device *dev) 1895 { 1896 struct netdev_private *np = netdev_priv(dev); 1897 void __iomem *ioaddr = np->base; 1898 int i; 1899 1900 netif_stop_queue(dev); 1901 1902 napi_disable(&np->napi); 1903 1904 if (debug > 1) { 1905 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", 1906 dev->name, (int) readl(ioaddr + IntrStatus)); 1907 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", 1908 dev->name, np->cur_tx, np->dirty_tx, 1909 np->cur_rx, np->dirty_rx); 1910 } 1911 1912 /* Disable interrupts by clearing the interrupt mask. */ 1913 writel(0, ioaddr + IntrEnable); 1914 1915 /* Stop the chip's Tx and Rx processes. */ 1916 writel(0, ioaddr + GenCtrl); 1917 readl(ioaddr + GenCtrl); 1918 1919 if (debug > 5) { 1920 printk(KERN_DEBUG" Tx ring at %#llx:\n", 1921 (long long) np->tx_ring_dma); 1922 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++) 1923 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n", 1924 i, le32_to_cpu(np->tx_ring[i].status), 1925 (long long) dma_to_cpu(np->tx_ring[i].addr), 1926 le32_to_cpu(np->tx_done_q[i].status)); 1927 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n", 1928 (long long) np->rx_ring_dma, np->rx_done_q); 1929 if (np->rx_done_q) 1930 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) { 1931 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n", 1932 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); 1933 } 1934 } 1935 1936 free_irq(np->pci_dev->irq, dev); 1937 1938 /* Free all the skbuffs in the Rx queue. */ 1939 for (i = 0; i < RX_RING_SIZE; i++) { 1940 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ 1941 if (np->rx_info[i].skb != NULL) { 1942 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1943 dev_kfree_skb(np->rx_info[i].skb); 1944 } 1945 np->rx_info[i].skb = NULL; 1946 np->rx_info[i].mapping = 0; 1947 } 1948 for (i = 0; i < TX_RING_SIZE; i++) { 1949 struct sk_buff *skb = np->tx_info[i].skb; 1950 if (skb == NULL) 1951 continue; 1952 pci_unmap_single(np->pci_dev, 1953 np->tx_info[i].mapping, 1954 skb_first_frag_len(skb), PCI_DMA_TODEVICE); 1955 np->tx_info[i].mapping = 0; 1956 dev_kfree_skb(skb); 1957 np->tx_info[i].skb = NULL; 1958 } 1959 1960 return 0; 1961 } 1962 1963 #ifdef CONFIG_PM 1964 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state) 1965 { 1966 struct net_device *dev = pci_get_drvdata(pdev); 1967 1968 if (netif_running(dev)) { 1969 netif_device_detach(dev); 1970 netdev_close(dev); 1971 } 1972 1973 pci_save_state(pdev); 1974 pci_set_power_state(pdev, pci_choose_state(pdev,state)); 1975 1976 return 0; 1977 } 1978 1979 static int starfire_resume(struct pci_dev *pdev) 1980 { 1981 struct net_device *dev = pci_get_drvdata(pdev); 1982 1983 pci_set_power_state(pdev, PCI_D0); 1984 pci_restore_state(pdev); 1985 1986 if (netif_running(dev)) { 1987 netdev_open(dev); 1988 netif_device_attach(dev); 1989 } 1990 1991 return 0; 1992 } 1993 #endif /* CONFIG_PM */ 1994 1995 1996 static void starfire_remove_one(struct pci_dev *pdev) 1997 { 1998 struct net_device *dev = pci_get_drvdata(pdev); 1999 struct netdev_private *np = netdev_priv(dev); 2000 2001 BUG_ON(!dev); 2002 2003 unregister_netdev(dev); 2004 2005 if (np->queue_mem) 2006 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma); 2007 2008 2009 /* XXX: add wakeup code -- requires firmware for MagicPacket */ 2010 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */ 2011 pci_disable_device(pdev); 2012 2013 iounmap(np->base); 2014 pci_release_regions(pdev); 2015 2016 free_netdev(dev); /* Will also free np!! */ 2017 } 2018 2019 2020 static struct pci_driver starfire_driver = { 2021 .name = DRV_NAME, 2022 .probe = starfire_init_one, 2023 .remove = starfire_remove_one, 2024 #ifdef CONFIG_PM 2025 .suspend = starfire_suspend, 2026 .resume = starfire_resume, 2027 #endif /* CONFIG_PM */ 2028 .id_table = starfire_pci_tbl, 2029 }; 2030 2031 2032 static int __init starfire_init (void) 2033 { 2034 /* when a module, this is printed whether or not devices are found in probe */ 2035 #ifdef MODULE 2036 printk(version); 2037 2038 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n"); 2039 #endif 2040 2041 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t)); 2042 2043 return pci_register_driver(&starfire_driver); 2044 } 2045 2046 2047 static void __exit starfire_cleanup (void) 2048 { 2049 pci_unregister_driver (&starfire_driver); 2050 } 2051 2052 2053 module_init(starfire_init); 2054 module_exit(starfire_cleanup); 2055 2056 2057 /* 2058 * Local variables: 2059 * c-basic-offset: 8 2060 * tab-width: 8 2061 * End: 2062 */ 2063 2064 2065 2066 2067 2068 /* LDV_COMMENT_BEGIN_MAIN */ 2069 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 2070 2071 /*###########################################################################*/ 2072 2073 /*############## Driver Environment Generator 0.2 output ####################*/ 2074 2075 /*###########################################################################*/ 2076 2077 2078 2079 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 2080 void ldv_check_final_state(void); 2081 2082 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 2083 void ldv_check_return_value(int res); 2084 2085 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 2086 void ldv_check_return_value_probe(int res); 2087 2088 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 2089 void ldv_initialize(void); 2090 2091 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 2092 void ldv_handler_precall(void); 2093 2094 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 2095 int nondet_int(void); 2096 2097 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 2098 int LDV_IN_INTERRUPT; 2099 2100 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 2101 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 2102 2103 2104 2105 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 2106 /*============================= VARIABLE DECLARATION PART =============================*/ 2107 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/ 2108 /* content: static int netdev_open(struct net_device *dev)*/ 2109 /* LDV_COMMENT_BEGIN_PREP */ 2110 #define DRV_NAME "starfire" 2111 #define DRV_VERSION "2.1" 2112 #define DRV_RELDATE "July 6, 2008" 2113 #define HAS_BROKEN_FIRMWARE 2114 #ifdef HAS_BROKEN_FIRMWARE 2115 #define PADDING_MASK 3 2116 #endif 2117 #define ZEROCOPY 2118 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2119 #define VLAN_SUPPORT 2120 #endif 2121 #define PKT_BUF_SZ 1536 2122 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2123 #else 2124 #endif 2125 #ifdef __sparc__ 2126 #define DMA_BURST_SIZE 64 2127 #else 2128 #define DMA_BURST_SIZE 128 2129 #endif 2130 #define RX_RING_SIZE 256 2131 #define TX_RING_SIZE 32 2132 #define DONE_Q_SIZE 1024 2133 #define QUEUE_ALIGN 256 2134 #if RX_RING_SIZE > 256 2135 #define RX_Q_ENTRIES Rx2048QEntries 2136 #else 2137 #define RX_Q_ENTRIES Rx256QEntries 2138 #endif 2139 #define TX_TIMEOUT (2 * HZ) 2140 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2141 #define ADDR_64BITS 2142 #define netdrv_addr_t __le64 2143 #define cpu_to_dma(x) cpu_to_le64(x) 2144 #define dma_to_cpu(x) le64_to_cpu(x) 2145 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2146 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2147 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2148 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2149 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2150 #else 2151 #define netdrv_addr_t __le32 2152 #define cpu_to_dma(x) cpu_to_le32(x) 2153 #define dma_to_cpu(x) le32_to_cpu(x) 2154 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2155 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2156 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2157 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2158 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2159 #endif 2160 #define skb_first_frag_len(skb) skb_headlen(skb) 2161 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2162 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2163 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2164 #ifdef VLAN_SUPPORT 2165 #define RxComplType RxComplType3 2166 #else 2167 #define RxComplType RxComplType2 2168 #endif 2169 #ifdef ADDR_64BITS 2170 #define TX_DESC_TYPE TxDescType2 2171 #else 2172 #define TX_DESC_TYPE TxDescType1 2173 #endif 2174 #define TX_DESC_SPACING TxDescSpaceUnlim 2175 #if 0 2176 #endif 2177 #define PHY_CNT 2 2178 #ifdef VLAN_SUPPORT 2179 #endif 2180 #ifdef VLAN_SUPPORT 2181 #endif 2182 #ifdef VLAN_SUPPORT 2183 #endif 2184 #ifndef MODULE 2185 #endif 2186 #ifdef ZEROCOPY 2187 #endif 2188 #ifdef VLAN_SUPPORT 2189 #endif 2190 #ifdef ADDR_64BITS 2191 #endif 2192 #if ! defined(final_version) 2193 #endif 2194 /* LDV_COMMENT_END_PREP */ 2195 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_open" */ 2196 struct net_device * var_group1; 2197 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "netdev_open" */ 2198 static int res_netdev_open_5; 2199 /* LDV_COMMENT_BEGIN_PREP */ 2200 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2201 #endif 2202 #ifndef final_version 2203 #endif 2204 #ifdef VLAN_SUPPORT 2205 #endif 2206 #ifdef VLAN_SUPPORT 2207 #endif 2208 #ifdef VLAN_SUPPORT 2209 #endif 2210 #ifdef VLAN_SUPPORT 2211 #endif 2212 #ifdef CONFIG_PM 2213 #endif 2214 #ifdef CONFIG_PM 2215 #endif 2216 #ifdef MODULE 2217 #endif 2218 /* LDV_COMMENT_END_PREP */ 2219 /* content: static int netdev_close(struct net_device *dev)*/ 2220 /* LDV_COMMENT_BEGIN_PREP */ 2221 #define DRV_NAME "starfire" 2222 #define DRV_VERSION "2.1" 2223 #define DRV_RELDATE "July 6, 2008" 2224 #define HAS_BROKEN_FIRMWARE 2225 #ifdef HAS_BROKEN_FIRMWARE 2226 #define PADDING_MASK 3 2227 #endif 2228 #define ZEROCOPY 2229 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2230 #define VLAN_SUPPORT 2231 #endif 2232 #define PKT_BUF_SZ 1536 2233 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2234 #else 2235 #endif 2236 #ifdef __sparc__ 2237 #define DMA_BURST_SIZE 64 2238 #else 2239 #define DMA_BURST_SIZE 128 2240 #endif 2241 #define RX_RING_SIZE 256 2242 #define TX_RING_SIZE 32 2243 #define DONE_Q_SIZE 1024 2244 #define QUEUE_ALIGN 256 2245 #if RX_RING_SIZE > 256 2246 #define RX_Q_ENTRIES Rx2048QEntries 2247 #else 2248 #define RX_Q_ENTRIES Rx256QEntries 2249 #endif 2250 #define TX_TIMEOUT (2 * HZ) 2251 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2252 #define ADDR_64BITS 2253 #define netdrv_addr_t __le64 2254 #define cpu_to_dma(x) cpu_to_le64(x) 2255 #define dma_to_cpu(x) le64_to_cpu(x) 2256 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2257 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2258 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2259 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2260 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2261 #else 2262 #define netdrv_addr_t __le32 2263 #define cpu_to_dma(x) cpu_to_le32(x) 2264 #define dma_to_cpu(x) le32_to_cpu(x) 2265 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2266 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2267 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2268 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2269 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2270 #endif 2271 #define skb_first_frag_len(skb) skb_headlen(skb) 2272 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2273 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2274 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2275 #ifdef VLAN_SUPPORT 2276 #define RxComplType RxComplType3 2277 #else 2278 #define RxComplType RxComplType2 2279 #endif 2280 #ifdef ADDR_64BITS 2281 #define TX_DESC_TYPE TxDescType2 2282 #else 2283 #define TX_DESC_TYPE TxDescType1 2284 #endif 2285 #define TX_DESC_SPACING TxDescSpaceUnlim 2286 #if 0 2287 #endif 2288 #define PHY_CNT 2 2289 #ifdef VLAN_SUPPORT 2290 #endif 2291 #ifdef VLAN_SUPPORT 2292 #endif 2293 #ifdef VLAN_SUPPORT 2294 #endif 2295 #ifndef MODULE 2296 #endif 2297 #ifdef ZEROCOPY 2298 #endif 2299 #ifdef VLAN_SUPPORT 2300 #endif 2301 #ifdef ADDR_64BITS 2302 #endif 2303 #if ! defined(final_version) 2304 #endif 2305 #ifdef VLAN_SUPPORT 2306 #endif 2307 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2308 #endif 2309 #ifndef final_version 2310 #endif 2311 #ifdef VLAN_SUPPORT 2312 #endif 2313 #ifdef VLAN_SUPPORT 2314 #endif 2315 #ifdef VLAN_SUPPORT 2316 #endif 2317 #ifdef VLAN_SUPPORT 2318 #endif 2319 /* LDV_COMMENT_END_PREP */ 2320 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "netdev_close" */ 2321 static int res_netdev_close_28; 2322 /* LDV_COMMENT_BEGIN_PREP */ 2323 #ifdef CONFIG_PM 2324 #endif 2325 #ifdef CONFIG_PM 2326 #endif 2327 #ifdef MODULE 2328 #endif 2329 /* LDV_COMMENT_END_PREP */ 2330 /* content: static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)*/ 2331 /* LDV_COMMENT_BEGIN_PREP */ 2332 #define DRV_NAME "starfire" 2333 #define DRV_VERSION "2.1" 2334 #define DRV_RELDATE "July 6, 2008" 2335 #define HAS_BROKEN_FIRMWARE 2336 #ifdef HAS_BROKEN_FIRMWARE 2337 #define PADDING_MASK 3 2338 #endif 2339 #define ZEROCOPY 2340 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2341 #define VLAN_SUPPORT 2342 #endif 2343 #define PKT_BUF_SZ 1536 2344 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2345 #else 2346 #endif 2347 #ifdef __sparc__ 2348 #define DMA_BURST_SIZE 64 2349 #else 2350 #define DMA_BURST_SIZE 128 2351 #endif 2352 #define RX_RING_SIZE 256 2353 #define TX_RING_SIZE 32 2354 #define DONE_Q_SIZE 1024 2355 #define QUEUE_ALIGN 256 2356 #if RX_RING_SIZE > 256 2357 #define RX_Q_ENTRIES Rx2048QEntries 2358 #else 2359 #define RX_Q_ENTRIES Rx256QEntries 2360 #endif 2361 #define TX_TIMEOUT (2 * HZ) 2362 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2363 #define ADDR_64BITS 2364 #define netdrv_addr_t __le64 2365 #define cpu_to_dma(x) cpu_to_le64(x) 2366 #define dma_to_cpu(x) le64_to_cpu(x) 2367 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2368 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2369 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2370 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2371 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2372 #else 2373 #define netdrv_addr_t __le32 2374 #define cpu_to_dma(x) cpu_to_le32(x) 2375 #define dma_to_cpu(x) le32_to_cpu(x) 2376 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2377 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2378 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2379 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2380 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2381 #endif 2382 #define skb_first_frag_len(skb) skb_headlen(skb) 2383 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2384 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2385 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2386 #ifdef VLAN_SUPPORT 2387 #define RxComplType RxComplType3 2388 #else 2389 #define RxComplType RxComplType2 2390 #endif 2391 #ifdef ADDR_64BITS 2392 #define TX_DESC_TYPE TxDescType2 2393 #else 2394 #define TX_DESC_TYPE TxDescType1 2395 #endif 2396 #define TX_DESC_SPACING TxDescSpaceUnlim 2397 #if 0 2398 #endif 2399 #define PHY_CNT 2 2400 #ifdef VLAN_SUPPORT 2401 #endif 2402 #ifdef VLAN_SUPPORT 2403 #endif 2404 #ifdef VLAN_SUPPORT 2405 #endif 2406 #ifndef MODULE 2407 #endif 2408 #ifdef ZEROCOPY 2409 #endif 2410 #ifdef VLAN_SUPPORT 2411 #endif 2412 #ifdef ADDR_64BITS 2413 #endif 2414 #if ! defined(final_version) 2415 #endif 2416 #ifdef VLAN_SUPPORT 2417 #endif 2418 /* LDV_COMMENT_END_PREP */ 2419 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "start_tx" */ 2420 struct sk_buff * var_group2; 2421 /* LDV_COMMENT_BEGIN_PREP */ 2422 #ifndef final_version 2423 #endif 2424 #ifdef VLAN_SUPPORT 2425 #endif 2426 #ifdef VLAN_SUPPORT 2427 #endif 2428 #ifdef VLAN_SUPPORT 2429 #endif 2430 #ifdef VLAN_SUPPORT 2431 #endif 2432 #ifdef CONFIG_PM 2433 #endif 2434 #ifdef CONFIG_PM 2435 #endif 2436 #ifdef MODULE 2437 #endif 2438 /* LDV_COMMENT_END_PREP */ 2439 /* content: static void tx_timeout(struct net_device *dev)*/ 2440 /* LDV_COMMENT_BEGIN_PREP */ 2441 #define DRV_NAME "starfire" 2442 #define DRV_VERSION "2.1" 2443 #define DRV_RELDATE "July 6, 2008" 2444 #define HAS_BROKEN_FIRMWARE 2445 #ifdef HAS_BROKEN_FIRMWARE 2446 #define PADDING_MASK 3 2447 #endif 2448 #define ZEROCOPY 2449 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2450 #define VLAN_SUPPORT 2451 #endif 2452 #define PKT_BUF_SZ 1536 2453 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2454 #else 2455 #endif 2456 #ifdef __sparc__ 2457 #define DMA_BURST_SIZE 64 2458 #else 2459 #define DMA_BURST_SIZE 128 2460 #endif 2461 #define RX_RING_SIZE 256 2462 #define TX_RING_SIZE 32 2463 #define DONE_Q_SIZE 1024 2464 #define QUEUE_ALIGN 256 2465 #if RX_RING_SIZE > 256 2466 #define RX_Q_ENTRIES Rx2048QEntries 2467 #else 2468 #define RX_Q_ENTRIES Rx256QEntries 2469 #endif 2470 #define TX_TIMEOUT (2 * HZ) 2471 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2472 #define ADDR_64BITS 2473 #define netdrv_addr_t __le64 2474 #define cpu_to_dma(x) cpu_to_le64(x) 2475 #define dma_to_cpu(x) le64_to_cpu(x) 2476 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2477 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2478 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2479 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2480 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2481 #else 2482 #define netdrv_addr_t __le32 2483 #define cpu_to_dma(x) cpu_to_le32(x) 2484 #define dma_to_cpu(x) le32_to_cpu(x) 2485 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2486 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2487 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2488 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2489 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2490 #endif 2491 #define skb_first_frag_len(skb) skb_headlen(skb) 2492 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2493 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2494 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2495 #ifdef VLAN_SUPPORT 2496 #define RxComplType RxComplType3 2497 #else 2498 #define RxComplType RxComplType2 2499 #endif 2500 #ifdef ADDR_64BITS 2501 #define TX_DESC_TYPE TxDescType2 2502 #else 2503 #define TX_DESC_TYPE TxDescType1 2504 #endif 2505 #define TX_DESC_SPACING TxDescSpaceUnlim 2506 #if 0 2507 #endif 2508 #define PHY_CNT 2 2509 #ifdef VLAN_SUPPORT 2510 #endif 2511 #ifdef VLAN_SUPPORT 2512 #endif 2513 #ifdef VLAN_SUPPORT 2514 #endif 2515 #ifndef MODULE 2516 #endif 2517 #ifdef ZEROCOPY 2518 #endif 2519 #ifdef VLAN_SUPPORT 2520 #endif 2521 #ifdef ADDR_64BITS 2522 #endif 2523 #if ! defined(final_version) 2524 #endif 2525 #ifdef VLAN_SUPPORT 2526 #endif 2527 /* LDV_COMMENT_END_PREP */ 2528 /* LDV_COMMENT_BEGIN_PREP */ 2529 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2530 #endif 2531 #ifndef final_version 2532 #endif 2533 #ifdef VLAN_SUPPORT 2534 #endif 2535 #ifdef VLAN_SUPPORT 2536 #endif 2537 #ifdef VLAN_SUPPORT 2538 #endif 2539 #ifdef VLAN_SUPPORT 2540 #endif 2541 #ifdef CONFIG_PM 2542 #endif 2543 #ifdef CONFIG_PM 2544 #endif 2545 #ifdef MODULE 2546 #endif 2547 /* LDV_COMMENT_END_PREP */ 2548 /* content: static struct net_device_stats *get_stats(struct net_device *dev)*/ 2549 /* LDV_COMMENT_BEGIN_PREP */ 2550 #define DRV_NAME "starfire" 2551 #define DRV_VERSION "2.1" 2552 #define DRV_RELDATE "July 6, 2008" 2553 #define HAS_BROKEN_FIRMWARE 2554 #ifdef HAS_BROKEN_FIRMWARE 2555 #define PADDING_MASK 3 2556 #endif 2557 #define ZEROCOPY 2558 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2559 #define VLAN_SUPPORT 2560 #endif 2561 #define PKT_BUF_SZ 1536 2562 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2563 #else 2564 #endif 2565 #ifdef __sparc__ 2566 #define DMA_BURST_SIZE 64 2567 #else 2568 #define DMA_BURST_SIZE 128 2569 #endif 2570 #define RX_RING_SIZE 256 2571 #define TX_RING_SIZE 32 2572 #define DONE_Q_SIZE 1024 2573 #define QUEUE_ALIGN 256 2574 #if RX_RING_SIZE > 256 2575 #define RX_Q_ENTRIES Rx2048QEntries 2576 #else 2577 #define RX_Q_ENTRIES Rx256QEntries 2578 #endif 2579 #define TX_TIMEOUT (2 * HZ) 2580 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2581 #define ADDR_64BITS 2582 #define netdrv_addr_t __le64 2583 #define cpu_to_dma(x) cpu_to_le64(x) 2584 #define dma_to_cpu(x) le64_to_cpu(x) 2585 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2586 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2587 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2588 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2589 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2590 #else 2591 #define netdrv_addr_t __le32 2592 #define cpu_to_dma(x) cpu_to_le32(x) 2593 #define dma_to_cpu(x) le32_to_cpu(x) 2594 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2595 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2596 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2597 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2598 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2599 #endif 2600 #define skb_first_frag_len(skb) skb_headlen(skb) 2601 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2602 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2603 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2604 #ifdef VLAN_SUPPORT 2605 #define RxComplType RxComplType3 2606 #else 2607 #define RxComplType RxComplType2 2608 #endif 2609 #ifdef ADDR_64BITS 2610 #define TX_DESC_TYPE TxDescType2 2611 #else 2612 #define TX_DESC_TYPE TxDescType1 2613 #endif 2614 #define TX_DESC_SPACING TxDescSpaceUnlim 2615 #if 0 2616 #endif 2617 #define PHY_CNT 2 2618 #ifdef VLAN_SUPPORT 2619 #endif 2620 #ifdef VLAN_SUPPORT 2621 #endif 2622 #ifdef VLAN_SUPPORT 2623 #endif 2624 #ifndef MODULE 2625 #endif 2626 #ifdef ZEROCOPY 2627 #endif 2628 #ifdef VLAN_SUPPORT 2629 #endif 2630 #ifdef ADDR_64BITS 2631 #endif 2632 #if ! defined(final_version) 2633 #endif 2634 #ifdef VLAN_SUPPORT 2635 #endif 2636 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2637 #endif 2638 #ifndef final_version 2639 #endif 2640 #ifdef VLAN_SUPPORT 2641 #endif 2642 #ifdef VLAN_SUPPORT 2643 #endif 2644 /* LDV_COMMENT_END_PREP */ 2645 /* LDV_COMMENT_BEGIN_PREP */ 2646 #ifdef VLAN_SUPPORT 2647 #endif 2648 #ifdef VLAN_SUPPORT 2649 #endif 2650 #ifdef CONFIG_PM 2651 #endif 2652 #ifdef CONFIG_PM 2653 #endif 2654 #ifdef MODULE 2655 #endif 2656 /* LDV_COMMENT_END_PREP */ 2657 /* content: static void set_rx_mode(struct net_device *dev)*/ 2658 /* LDV_COMMENT_BEGIN_PREP */ 2659 #define DRV_NAME "starfire" 2660 #define DRV_VERSION "2.1" 2661 #define DRV_RELDATE "July 6, 2008" 2662 #define HAS_BROKEN_FIRMWARE 2663 #ifdef HAS_BROKEN_FIRMWARE 2664 #define PADDING_MASK 3 2665 #endif 2666 #define ZEROCOPY 2667 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2668 #define VLAN_SUPPORT 2669 #endif 2670 #define PKT_BUF_SZ 1536 2671 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2672 #else 2673 #endif 2674 #ifdef __sparc__ 2675 #define DMA_BURST_SIZE 64 2676 #else 2677 #define DMA_BURST_SIZE 128 2678 #endif 2679 #define RX_RING_SIZE 256 2680 #define TX_RING_SIZE 32 2681 #define DONE_Q_SIZE 1024 2682 #define QUEUE_ALIGN 256 2683 #if RX_RING_SIZE > 256 2684 #define RX_Q_ENTRIES Rx2048QEntries 2685 #else 2686 #define RX_Q_ENTRIES Rx256QEntries 2687 #endif 2688 #define TX_TIMEOUT (2 * HZ) 2689 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2690 #define ADDR_64BITS 2691 #define netdrv_addr_t __le64 2692 #define cpu_to_dma(x) cpu_to_le64(x) 2693 #define dma_to_cpu(x) le64_to_cpu(x) 2694 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2695 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2696 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2697 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2698 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2699 #else 2700 #define netdrv_addr_t __le32 2701 #define cpu_to_dma(x) cpu_to_le32(x) 2702 #define dma_to_cpu(x) le32_to_cpu(x) 2703 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2704 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2705 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2706 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2707 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2708 #endif 2709 #define skb_first_frag_len(skb) skb_headlen(skb) 2710 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2711 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2712 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2713 #ifdef VLAN_SUPPORT 2714 #define RxComplType RxComplType3 2715 #else 2716 #define RxComplType RxComplType2 2717 #endif 2718 #ifdef ADDR_64BITS 2719 #define TX_DESC_TYPE TxDescType2 2720 #else 2721 #define TX_DESC_TYPE TxDescType1 2722 #endif 2723 #define TX_DESC_SPACING TxDescSpaceUnlim 2724 #if 0 2725 #endif 2726 #define PHY_CNT 2 2727 #ifdef VLAN_SUPPORT 2728 #endif 2729 #ifdef VLAN_SUPPORT 2730 #endif 2731 #ifdef VLAN_SUPPORT 2732 #endif 2733 #ifndef MODULE 2734 #endif 2735 #ifdef ZEROCOPY 2736 #endif 2737 #ifdef VLAN_SUPPORT 2738 #endif 2739 #ifdef ADDR_64BITS 2740 #endif 2741 #if ! defined(final_version) 2742 #endif 2743 #ifdef VLAN_SUPPORT 2744 #endif 2745 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2746 #endif 2747 #ifndef final_version 2748 #endif 2749 #ifdef VLAN_SUPPORT 2750 #endif 2751 #ifdef VLAN_SUPPORT 2752 #endif 2753 #ifdef VLAN_SUPPORT 2754 #endif 2755 /* LDV_COMMENT_END_PREP */ 2756 /* LDV_COMMENT_BEGIN_PREP */ 2757 #ifdef CONFIG_PM 2758 #endif 2759 #ifdef CONFIG_PM 2760 #endif 2761 #ifdef MODULE 2762 #endif 2763 /* LDV_COMMENT_END_PREP */ 2764 /* content: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/ 2765 /* LDV_COMMENT_BEGIN_PREP */ 2766 #define DRV_NAME "starfire" 2767 #define DRV_VERSION "2.1" 2768 #define DRV_RELDATE "July 6, 2008" 2769 #define HAS_BROKEN_FIRMWARE 2770 #ifdef HAS_BROKEN_FIRMWARE 2771 #define PADDING_MASK 3 2772 #endif 2773 #define ZEROCOPY 2774 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2775 #define VLAN_SUPPORT 2776 #endif 2777 #define PKT_BUF_SZ 1536 2778 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2779 #else 2780 #endif 2781 #ifdef __sparc__ 2782 #define DMA_BURST_SIZE 64 2783 #else 2784 #define DMA_BURST_SIZE 128 2785 #endif 2786 #define RX_RING_SIZE 256 2787 #define TX_RING_SIZE 32 2788 #define DONE_Q_SIZE 1024 2789 #define QUEUE_ALIGN 256 2790 #if RX_RING_SIZE > 256 2791 #define RX_Q_ENTRIES Rx2048QEntries 2792 #else 2793 #define RX_Q_ENTRIES Rx256QEntries 2794 #endif 2795 #define TX_TIMEOUT (2 * HZ) 2796 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2797 #define ADDR_64BITS 2798 #define netdrv_addr_t __le64 2799 #define cpu_to_dma(x) cpu_to_le64(x) 2800 #define dma_to_cpu(x) le64_to_cpu(x) 2801 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2802 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2803 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2804 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2805 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2806 #else 2807 #define netdrv_addr_t __le32 2808 #define cpu_to_dma(x) cpu_to_le32(x) 2809 #define dma_to_cpu(x) le32_to_cpu(x) 2810 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2811 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2812 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2813 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2814 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2815 #endif 2816 #define skb_first_frag_len(skb) skb_headlen(skb) 2817 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2818 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2819 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2820 #ifdef VLAN_SUPPORT 2821 #define RxComplType RxComplType3 2822 #else 2823 #define RxComplType RxComplType2 2824 #endif 2825 #ifdef ADDR_64BITS 2826 #define TX_DESC_TYPE TxDescType2 2827 #else 2828 #define TX_DESC_TYPE TxDescType1 2829 #endif 2830 #define TX_DESC_SPACING TxDescSpaceUnlim 2831 #if 0 2832 #endif 2833 #define PHY_CNT 2 2834 #ifdef VLAN_SUPPORT 2835 #endif 2836 #ifdef VLAN_SUPPORT 2837 #endif 2838 #ifdef VLAN_SUPPORT 2839 #endif 2840 #ifndef MODULE 2841 #endif 2842 #ifdef ZEROCOPY 2843 #endif 2844 #ifdef VLAN_SUPPORT 2845 #endif 2846 #ifdef ADDR_64BITS 2847 #endif 2848 #if ! defined(final_version) 2849 #endif 2850 #ifdef VLAN_SUPPORT 2851 #endif 2852 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2853 #endif 2854 #ifndef final_version 2855 #endif 2856 #ifdef VLAN_SUPPORT 2857 #endif 2858 #ifdef VLAN_SUPPORT 2859 #endif 2860 #ifdef VLAN_SUPPORT 2861 #endif 2862 #ifdef VLAN_SUPPORT 2863 #endif 2864 /* LDV_COMMENT_END_PREP */ 2865 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_ioctl" */ 2866 struct ifreq * var_group3; 2867 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_ioctl" */ 2868 int var_netdev_ioctl_27_p2; 2869 /* LDV_COMMENT_BEGIN_PREP */ 2870 #ifdef CONFIG_PM 2871 #endif 2872 #ifdef CONFIG_PM 2873 #endif 2874 #ifdef MODULE 2875 #endif 2876 /* LDV_COMMENT_END_PREP */ 2877 /* content: static int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)*/ 2878 /* LDV_COMMENT_BEGIN_PREP */ 2879 #define DRV_NAME "starfire" 2880 #define DRV_VERSION "2.1" 2881 #define DRV_RELDATE "July 6, 2008" 2882 #define HAS_BROKEN_FIRMWARE 2883 #ifdef HAS_BROKEN_FIRMWARE 2884 #define PADDING_MASK 3 2885 #endif 2886 #define ZEROCOPY 2887 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2888 #define VLAN_SUPPORT 2889 #endif 2890 #define PKT_BUF_SZ 1536 2891 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 2892 #else 2893 #endif 2894 #ifdef __sparc__ 2895 #define DMA_BURST_SIZE 64 2896 #else 2897 #define DMA_BURST_SIZE 128 2898 #endif 2899 #define RX_RING_SIZE 256 2900 #define TX_RING_SIZE 32 2901 #define DONE_Q_SIZE 1024 2902 #define QUEUE_ALIGN 256 2903 #if RX_RING_SIZE > 256 2904 #define RX_Q_ENTRIES Rx2048QEntries 2905 #else 2906 #define RX_Q_ENTRIES Rx256QEntries 2907 #endif 2908 #define TX_TIMEOUT (2 * HZ) 2909 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2910 #define ADDR_64BITS 2911 #define netdrv_addr_t __le64 2912 #define cpu_to_dma(x) cpu_to_le64(x) 2913 #define dma_to_cpu(x) le64_to_cpu(x) 2914 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 2915 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 2916 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 2917 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 2918 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 2919 #else 2920 #define netdrv_addr_t __le32 2921 #define cpu_to_dma(x) cpu_to_le32(x) 2922 #define dma_to_cpu(x) le32_to_cpu(x) 2923 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 2924 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 2925 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 2926 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 2927 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 2928 #endif 2929 #define skb_first_frag_len(skb) skb_headlen(skb) 2930 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 2931 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 2932 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 2933 #ifdef VLAN_SUPPORT 2934 #define RxComplType RxComplType3 2935 #else 2936 #define RxComplType RxComplType2 2937 #endif 2938 #ifdef ADDR_64BITS 2939 #define TX_DESC_TYPE TxDescType2 2940 #else 2941 #define TX_DESC_TYPE TxDescType1 2942 #endif 2943 #define TX_DESC_SPACING TxDescSpaceUnlim 2944 #if 0 2945 #endif 2946 #define PHY_CNT 2 2947 #ifdef VLAN_SUPPORT 2948 #endif 2949 #ifdef VLAN_SUPPORT 2950 /* LDV_COMMENT_END_PREP */ 2951 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_add_vid" */ 2952 __be16 var_netdev_vlan_rx_add_vid_0_p1; 2953 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_add_vid" */ 2954 u16 var_netdev_vlan_rx_add_vid_0_p2; 2955 /* LDV_COMMENT_BEGIN_PREP */ 2956 #endif 2957 #ifdef VLAN_SUPPORT 2958 #endif 2959 #ifndef MODULE 2960 #endif 2961 #ifdef ZEROCOPY 2962 #endif 2963 #ifdef VLAN_SUPPORT 2964 #endif 2965 #ifdef ADDR_64BITS 2966 #endif 2967 #if ! defined(final_version) 2968 #endif 2969 #ifdef VLAN_SUPPORT 2970 #endif 2971 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 2972 #endif 2973 #ifndef final_version 2974 #endif 2975 #ifdef VLAN_SUPPORT 2976 #endif 2977 #ifdef VLAN_SUPPORT 2978 #endif 2979 #ifdef VLAN_SUPPORT 2980 #endif 2981 #ifdef VLAN_SUPPORT 2982 #endif 2983 #ifdef CONFIG_PM 2984 #endif 2985 #ifdef CONFIG_PM 2986 #endif 2987 #ifdef MODULE 2988 #endif 2989 /* LDV_COMMENT_END_PREP */ 2990 /* content: static int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)*/ 2991 /* LDV_COMMENT_BEGIN_PREP */ 2992 #define DRV_NAME "starfire" 2993 #define DRV_VERSION "2.1" 2994 #define DRV_RELDATE "July 6, 2008" 2995 #define HAS_BROKEN_FIRMWARE 2996 #ifdef HAS_BROKEN_FIRMWARE 2997 #define PADDING_MASK 3 2998 #endif 2999 #define ZEROCOPY 3000 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3001 #define VLAN_SUPPORT 3002 #endif 3003 #define PKT_BUF_SZ 1536 3004 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3005 #else 3006 #endif 3007 #ifdef __sparc__ 3008 #define DMA_BURST_SIZE 64 3009 #else 3010 #define DMA_BURST_SIZE 128 3011 #endif 3012 #define RX_RING_SIZE 256 3013 #define TX_RING_SIZE 32 3014 #define DONE_Q_SIZE 1024 3015 #define QUEUE_ALIGN 256 3016 #if RX_RING_SIZE > 256 3017 #define RX_Q_ENTRIES Rx2048QEntries 3018 #else 3019 #define RX_Q_ENTRIES Rx256QEntries 3020 #endif 3021 #define TX_TIMEOUT (2 * HZ) 3022 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3023 #define ADDR_64BITS 3024 #define netdrv_addr_t __le64 3025 #define cpu_to_dma(x) cpu_to_le64(x) 3026 #define dma_to_cpu(x) le64_to_cpu(x) 3027 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3028 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3029 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3030 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3031 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3032 #else 3033 #define netdrv_addr_t __le32 3034 #define cpu_to_dma(x) cpu_to_le32(x) 3035 #define dma_to_cpu(x) le32_to_cpu(x) 3036 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3037 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3038 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3039 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3040 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3041 #endif 3042 #define skb_first_frag_len(skb) skb_headlen(skb) 3043 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3044 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3045 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3046 #ifdef VLAN_SUPPORT 3047 #define RxComplType RxComplType3 3048 #else 3049 #define RxComplType RxComplType2 3050 #endif 3051 #ifdef ADDR_64BITS 3052 #define TX_DESC_TYPE TxDescType2 3053 #else 3054 #define TX_DESC_TYPE TxDescType1 3055 #endif 3056 #define TX_DESC_SPACING TxDescSpaceUnlim 3057 #if 0 3058 #endif 3059 #define PHY_CNT 2 3060 #ifdef VLAN_SUPPORT 3061 #endif 3062 #ifdef VLAN_SUPPORT 3063 /* LDV_COMMENT_END_PREP */ 3064 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_kill_vid" */ 3065 __be16 var_netdev_vlan_rx_kill_vid_1_p1; 3066 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_kill_vid" */ 3067 u16 var_netdev_vlan_rx_kill_vid_1_p2; 3068 /* LDV_COMMENT_BEGIN_PREP */ 3069 #endif 3070 #ifdef VLAN_SUPPORT 3071 #endif 3072 #ifndef MODULE 3073 #endif 3074 #ifdef ZEROCOPY 3075 #endif 3076 #ifdef VLAN_SUPPORT 3077 #endif 3078 #ifdef ADDR_64BITS 3079 #endif 3080 #if ! defined(final_version) 3081 #endif 3082 #ifdef VLAN_SUPPORT 3083 #endif 3084 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3085 #endif 3086 #ifndef final_version 3087 #endif 3088 #ifdef VLAN_SUPPORT 3089 #endif 3090 #ifdef VLAN_SUPPORT 3091 #endif 3092 #ifdef VLAN_SUPPORT 3093 #endif 3094 #ifdef VLAN_SUPPORT 3095 #endif 3096 #ifdef CONFIG_PM 3097 #endif 3098 #ifdef CONFIG_PM 3099 #endif 3100 #ifdef MODULE 3101 #endif 3102 /* LDV_COMMENT_END_PREP */ 3103 3104 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/ 3105 /* content: static int check_if_running(struct net_device *dev)*/ 3106 /* LDV_COMMENT_BEGIN_PREP */ 3107 #define DRV_NAME "starfire" 3108 #define DRV_VERSION "2.1" 3109 #define DRV_RELDATE "July 6, 2008" 3110 #define HAS_BROKEN_FIRMWARE 3111 #ifdef HAS_BROKEN_FIRMWARE 3112 #define PADDING_MASK 3 3113 #endif 3114 #define ZEROCOPY 3115 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3116 #define VLAN_SUPPORT 3117 #endif 3118 #define PKT_BUF_SZ 1536 3119 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3120 #else 3121 #endif 3122 #ifdef __sparc__ 3123 #define DMA_BURST_SIZE 64 3124 #else 3125 #define DMA_BURST_SIZE 128 3126 #endif 3127 #define RX_RING_SIZE 256 3128 #define TX_RING_SIZE 32 3129 #define DONE_Q_SIZE 1024 3130 #define QUEUE_ALIGN 256 3131 #if RX_RING_SIZE > 256 3132 #define RX_Q_ENTRIES Rx2048QEntries 3133 #else 3134 #define RX_Q_ENTRIES Rx256QEntries 3135 #endif 3136 #define TX_TIMEOUT (2 * HZ) 3137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3138 #define ADDR_64BITS 3139 #define netdrv_addr_t __le64 3140 #define cpu_to_dma(x) cpu_to_le64(x) 3141 #define dma_to_cpu(x) le64_to_cpu(x) 3142 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3143 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3144 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3145 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3146 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3147 #else 3148 #define netdrv_addr_t __le32 3149 #define cpu_to_dma(x) cpu_to_le32(x) 3150 #define dma_to_cpu(x) le32_to_cpu(x) 3151 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3152 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3153 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3154 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3155 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3156 #endif 3157 #define skb_first_frag_len(skb) skb_headlen(skb) 3158 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3159 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3160 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3161 #ifdef VLAN_SUPPORT 3162 #define RxComplType RxComplType3 3163 #else 3164 #define RxComplType RxComplType2 3165 #endif 3166 #ifdef ADDR_64BITS 3167 #define TX_DESC_TYPE TxDescType2 3168 #else 3169 #define TX_DESC_TYPE TxDescType1 3170 #endif 3171 #define TX_DESC_SPACING TxDescSpaceUnlim 3172 #if 0 3173 #endif 3174 #define PHY_CNT 2 3175 #ifdef VLAN_SUPPORT 3176 #endif 3177 #ifdef VLAN_SUPPORT 3178 #endif 3179 #ifdef VLAN_SUPPORT 3180 #endif 3181 #ifndef MODULE 3182 #endif 3183 #ifdef ZEROCOPY 3184 #endif 3185 #ifdef VLAN_SUPPORT 3186 #endif 3187 #ifdef ADDR_64BITS 3188 #endif 3189 #if ! defined(final_version) 3190 #endif 3191 #ifdef VLAN_SUPPORT 3192 #endif 3193 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3194 #endif 3195 #ifndef final_version 3196 #endif 3197 #ifdef VLAN_SUPPORT 3198 #endif 3199 #ifdef VLAN_SUPPORT 3200 #endif 3201 #ifdef VLAN_SUPPORT 3202 #endif 3203 #ifdef VLAN_SUPPORT 3204 #endif 3205 /* LDV_COMMENT_END_PREP */ 3206 /* LDV_COMMENT_BEGIN_PREP */ 3207 #ifdef CONFIG_PM 3208 #endif 3209 #ifdef CONFIG_PM 3210 #endif 3211 #ifdef MODULE 3212 #endif 3213 /* LDV_COMMENT_END_PREP */ 3214 /* content: static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/ 3215 /* LDV_COMMENT_BEGIN_PREP */ 3216 #define DRV_NAME "starfire" 3217 #define DRV_VERSION "2.1" 3218 #define DRV_RELDATE "July 6, 2008" 3219 #define HAS_BROKEN_FIRMWARE 3220 #ifdef HAS_BROKEN_FIRMWARE 3221 #define PADDING_MASK 3 3222 #endif 3223 #define ZEROCOPY 3224 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3225 #define VLAN_SUPPORT 3226 #endif 3227 #define PKT_BUF_SZ 1536 3228 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3229 #else 3230 #endif 3231 #ifdef __sparc__ 3232 #define DMA_BURST_SIZE 64 3233 #else 3234 #define DMA_BURST_SIZE 128 3235 #endif 3236 #define RX_RING_SIZE 256 3237 #define TX_RING_SIZE 32 3238 #define DONE_Q_SIZE 1024 3239 #define QUEUE_ALIGN 256 3240 #if RX_RING_SIZE > 256 3241 #define RX_Q_ENTRIES Rx2048QEntries 3242 #else 3243 #define RX_Q_ENTRIES Rx256QEntries 3244 #endif 3245 #define TX_TIMEOUT (2 * HZ) 3246 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3247 #define ADDR_64BITS 3248 #define netdrv_addr_t __le64 3249 #define cpu_to_dma(x) cpu_to_le64(x) 3250 #define dma_to_cpu(x) le64_to_cpu(x) 3251 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3252 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3253 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3254 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3255 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3256 #else 3257 #define netdrv_addr_t __le32 3258 #define cpu_to_dma(x) cpu_to_le32(x) 3259 #define dma_to_cpu(x) le32_to_cpu(x) 3260 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3261 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3262 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3263 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3264 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3265 #endif 3266 #define skb_first_frag_len(skb) skb_headlen(skb) 3267 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3268 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3269 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3270 #ifdef VLAN_SUPPORT 3271 #define RxComplType RxComplType3 3272 #else 3273 #define RxComplType RxComplType2 3274 #endif 3275 #ifdef ADDR_64BITS 3276 #define TX_DESC_TYPE TxDescType2 3277 #else 3278 #define TX_DESC_TYPE TxDescType1 3279 #endif 3280 #define TX_DESC_SPACING TxDescSpaceUnlim 3281 #if 0 3282 #endif 3283 #define PHY_CNT 2 3284 #ifdef VLAN_SUPPORT 3285 #endif 3286 #ifdef VLAN_SUPPORT 3287 #endif 3288 #ifdef VLAN_SUPPORT 3289 #endif 3290 #ifndef MODULE 3291 #endif 3292 #ifdef ZEROCOPY 3293 #endif 3294 #ifdef VLAN_SUPPORT 3295 #endif 3296 #ifdef ADDR_64BITS 3297 #endif 3298 #if ! defined(final_version) 3299 #endif 3300 #ifdef VLAN_SUPPORT 3301 #endif 3302 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3303 #endif 3304 #ifndef final_version 3305 #endif 3306 #ifdef VLAN_SUPPORT 3307 #endif 3308 #ifdef VLAN_SUPPORT 3309 #endif 3310 #ifdef VLAN_SUPPORT 3311 #endif 3312 #ifdef VLAN_SUPPORT 3313 #endif 3314 /* LDV_COMMENT_END_PREP */ 3315 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_drvinfo" */ 3316 struct ethtool_drvinfo * var_group4; 3317 /* LDV_COMMENT_BEGIN_PREP */ 3318 #ifdef CONFIG_PM 3319 #endif 3320 #ifdef CONFIG_PM 3321 #endif 3322 #ifdef MODULE 3323 #endif 3324 /* LDV_COMMENT_END_PREP */ 3325 /* content: static int nway_reset(struct net_device *dev)*/ 3326 /* LDV_COMMENT_BEGIN_PREP */ 3327 #define DRV_NAME "starfire" 3328 #define DRV_VERSION "2.1" 3329 #define DRV_RELDATE "July 6, 2008" 3330 #define HAS_BROKEN_FIRMWARE 3331 #ifdef HAS_BROKEN_FIRMWARE 3332 #define PADDING_MASK 3 3333 #endif 3334 #define ZEROCOPY 3335 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3336 #define VLAN_SUPPORT 3337 #endif 3338 #define PKT_BUF_SZ 1536 3339 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3340 #else 3341 #endif 3342 #ifdef __sparc__ 3343 #define DMA_BURST_SIZE 64 3344 #else 3345 #define DMA_BURST_SIZE 128 3346 #endif 3347 #define RX_RING_SIZE 256 3348 #define TX_RING_SIZE 32 3349 #define DONE_Q_SIZE 1024 3350 #define QUEUE_ALIGN 256 3351 #if RX_RING_SIZE > 256 3352 #define RX_Q_ENTRIES Rx2048QEntries 3353 #else 3354 #define RX_Q_ENTRIES Rx256QEntries 3355 #endif 3356 #define TX_TIMEOUT (2 * HZ) 3357 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3358 #define ADDR_64BITS 3359 #define netdrv_addr_t __le64 3360 #define cpu_to_dma(x) cpu_to_le64(x) 3361 #define dma_to_cpu(x) le64_to_cpu(x) 3362 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3363 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3364 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3365 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3366 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3367 #else 3368 #define netdrv_addr_t __le32 3369 #define cpu_to_dma(x) cpu_to_le32(x) 3370 #define dma_to_cpu(x) le32_to_cpu(x) 3371 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3372 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3373 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3374 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3375 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3376 #endif 3377 #define skb_first_frag_len(skb) skb_headlen(skb) 3378 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3379 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3380 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3381 #ifdef VLAN_SUPPORT 3382 #define RxComplType RxComplType3 3383 #else 3384 #define RxComplType RxComplType2 3385 #endif 3386 #ifdef ADDR_64BITS 3387 #define TX_DESC_TYPE TxDescType2 3388 #else 3389 #define TX_DESC_TYPE TxDescType1 3390 #endif 3391 #define TX_DESC_SPACING TxDescSpaceUnlim 3392 #if 0 3393 #endif 3394 #define PHY_CNT 2 3395 #ifdef VLAN_SUPPORT 3396 #endif 3397 #ifdef VLAN_SUPPORT 3398 #endif 3399 #ifdef VLAN_SUPPORT 3400 #endif 3401 #ifndef MODULE 3402 #endif 3403 #ifdef ZEROCOPY 3404 #endif 3405 #ifdef VLAN_SUPPORT 3406 #endif 3407 #ifdef ADDR_64BITS 3408 #endif 3409 #if ! defined(final_version) 3410 #endif 3411 #ifdef VLAN_SUPPORT 3412 #endif 3413 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3414 #endif 3415 #ifndef final_version 3416 #endif 3417 #ifdef VLAN_SUPPORT 3418 #endif 3419 #ifdef VLAN_SUPPORT 3420 #endif 3421 #ifdef VLAN_SUPPORT 3422 #endif 3423 #ifdef VLAN_SUPPORT 3424 #endif 3425 /* LDV_COMMENT_END_PREP */ 3426 /* LDV_COMMENT_BEGIN_PREP */ 3427 #ifdef CONFIG_PM 3428 #endif 3429 #ifdef CONFIG_PM 3430 #endif 3431 #ifdef MODULE 3432 #endif 3433 /* LDV_COMMENT_END_PREP */ 3434 /* content: static u32 get_link(struct net_device *dev)*/ 3435 /* LDV_COMMENT_BEGIN_PREP */ 3436 #define DRV_NAME "starfire" 3437 #define DRV_VERSION "2.1" 3438 #define DRV_RELDATE "July 6, 2008" 3439 #define HAS_BROKEN_FIRMWARE 3440 #ifdef HAS_BROKEN_FIRMWARE 3441 #define PADDING_MASK 3 3442 #endif 3443 #define ZEROCOPY 3444 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3445 #define VLAN_SUPPORT 3446 #endif 3447 #define PKT_BUF_SZ 1536 3448 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3449 #else 3450 #endif 3451 #ifdef __sparc__ 3452 #define DMA_BURST_SIZE 64 3453 #else 3454 #define DMA_BURST_SIZE 128 3455 #endif 3456 #define RX_RING_SIZE 256 3457 #define TX_RING_SIZE 32 3458 #define DONE_Q_SIZE 1024 3459 #define QUEUE_ALIGN 256 3460 #if RX_RING_SIZE > 256 3461 #define RX_Q_ENTRIES Rx2048QEntries 3462 #else 3463 #define RX_Q_ENTRIES Rx256QEntries 3464 #endif 3465 #define TX_TIMEOUT (2 * HZ) 3466 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3467 #define ADDR_64BITS 3468 #define netdrv_addr_t __le64 3469 #define cpu_to_dma(x) cpu_to_le64(x) 3470 #define dma_to_cpu(x) le64_to_cpu(x) 3471 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3472 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3473 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3474 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3475 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3476 #else 3477 #define netdrv_addr_t __le32 3478 #define cpu_to_dma(x) cpu_to_le32(x) 3479 #define dma_to_cpu(x) le32_to_cpu(x) 3480 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3481 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3482 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3483 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3484 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3485 #endif 3486 #define skb_first_frag_len(skb) skb_headlen(skb) 3487 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3488 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3489 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3490 #ifdef VLAN_SUPPORT 3491 #define RxComplType RxComplType3 3492 #else 3493 #define RxComplType RxComplType2 3494 #endif 3495 #ifdef ADDR_64BITS 3496 #define TX_DESC_TYPE TxDescType2 3497 #else 3498 #define TX_DESC_TYPE TxDescType1 3499 #endif 3500 #define TX_DESC_SPACING TxDescSpaceUnlim 3501 #if 0 3502 #endif 3503 #define PHY_CNT 2 3504 #ifdef VLAN_SUPPORT 3505 #endif 3506 #ifdef VLAN_SUPPORT 3507 #endif 3508 #ifdef VLAN_SUPPORT 3509 #endif 3510 #ifndef MODULE 3511 #endif 3512 #ifdef ZEROCOPY 3513 #endif 3514 #ifdef VLAN_SUPPORT 3515 #endif 3516 #ifdef ADDR_64BITS 3517 #endif 3518 #if ! defined(final_version) 3519 #endif 3520 #ifdef VLAN_SUPPORT 3521 #endif 3522 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3523 #endif 3524 #ifndef final_version 3525 #endif 3526 #ifdef VLAN_SUPPORT 3527 #endif 3528 #ifdef VLAN_SUPPORT 3529 #endif 3530 #ifdef VLAN_SUPPORT 3531 #endif 3532 #ifdef VLAN_SUPPORT 3533 #endif 3534 /* LDV_COMMENT_END_PREP */ 3535 /* LDV_COMMENT_BEGIN_PREP */ 3536 #ifdef CONFIG_PM 3537 #endif 3538 #ifdef CONFIG_PM 3539 #endif 3540 #ifdef MODULE 3541 #endif 3542 /* LDV_COMMENT_END_PREP */ 3543 /* content: static u32 get_msglevel(struct net_device *dev)*/ 3544 /* LDV_COMMENT_BEGIN_PREP */ 3545 #define DRV_NAME "starfire" 3546 #define DRV_VERSION "2.1" 3547 #define DRV_RELDATE "July 6, 2008" 3548 #define HAS_BROKEN_FIRMWARE 3549 #ifdef HAS_BROKEN_FIRMWARE 3550 #define PADDING_MASK 3 3551 #endif 3552 #define ZEROCOPY 3553 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3554 #define VLAN_SUPPORT 3555 #endif 3556 #define PKT_BUF_SZ 1536 3557 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3558 #else 3559 #endif 3560 #ifdef __sparc__ 3561 #define DMA_BURST_SIZE 64 3562 #else 3563 #define DMA_BURST_SIZE 128 3564 #endif 3565 #define RX_RING_SIZE 256 3566 #define TX_RING_SIZE 32 3567 #define DONE_Q_SIZE 1024 3568 #define QUEUE_ALIGN 256 3569 #if RX_RING_SIZE > 256 3570 #define RX_Q_ENTRIES Rx2048QEntries 3571 #else 3572 #define RX_Q_ENTRIES Rx256QEntries 3573 #endif 3574 #define TX_TIMEOUT (2 * HZ) 3575 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3576 #define ADDR_64BITS 3577 #define netdrv_addr_t __le64 3578 #define cpu_to_dma(x) cpu_to_le64(x) 3579 #define dma_to_cpu(x) le64_to_cpu(x) 3580 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3581 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3582 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3583 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3584 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3585 #else 3586 #define netdrv_addr_t __le32 3587 #define cpu_to_dma(x) cpu_to_le32(x) 3588 #define dma_to_cpu(x) le32_to_cpu(x) 3589 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3590 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3591 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3592 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3593 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3594 #endif 3595 #define skb_first_frag_len(skb) skb_headlen(skb) 3596 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3597 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3598 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3599 #ifdef VLAN_SUPPORT 3600 #define RxComplType RxComplType3 3601 #else 3602 #define RxComplType RxComplType2 3603 #endif 3604 #ifdef ADDR_64BITS 3605 #define TX_DESC_TYPE TxDescType2 3606 #else 3607 #define TX_DESC_TYPE TxDescType1 3608 #endif 3609 #define TX_DESC_SPACING TxDescSpaceUnlim 3610 #if 0 3611 #endif 3612 #define PHY_CNT 2 3613 #ifdef VLAN_SUPPORT 3614 #endif 3615 #ifdef VLAN_SUPPORT 3616 #endif 3617 #ifdef VLAN_SUPPORT 3618 #endif 3619 #ifndef MODULE 3620 #endif 3621 #ifdef ZEROCOPY 3622 #endif 3623 #ifdef VLAN_SUPPORT 3624 #endif 3625 #ifdef ADDR_64BITS 3626 #endif 3627 #if ! defined(final_version) 3628 #endif 3629 #ifdef VLAN_SUPPORT 3630 #endif 3631 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3632 #endif 3633 #ifndef final_version 3634 #endif 3635 #ifdef VLAN_SUPPORT 3636 #endif 3637 #ifdef VLAN_SUPPORT 3638 #endif 3639 #ifdef VLAN_SUPPORT 3640 #endif 3641 #ifdef VLAN_SUPPORT 3642 #endif 3643 /* LDV_COMMENT_END_PREP */ 3644 /* LDV_COMMENT_BEGIN_PREP */ 3645 #ifdef CONFIG_PM 3646 #endif 3647 #ifdef CONFIG_PM 3648 #endif 3649 #ifdef MODULE 3650 #endif 3651 /* LDV_COMMENT_END_PREP */ 3652 /* content: static void set_msglevel(struct net_device *dev, u32 val)*/ 3653 /* LDV_COMMENT_BEGIN_PREP */ 3654 #define DRV_NAME "starfire" 3655 #define DRV_VERSION "2.1" 3656 #define DRV_RELDATE "July 6, 2008" 3657 #define HAS_BROKEN_FIRMWARE 3658 #ifdef HAS_BROKEN_FIRMWARE 3659 #define PADDING_MASK 3 3660 #endif 3661 #define ZEROCOPY 3662 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3663 #define VLAN_SUPPORT 3664 #endif 3665 #define PKT_BUF_SZ 1536 3666 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3667 #else 3668 #endif 3669 #ifdef __sparc__ 3670 #define DMA_BURST_SIZE 64 3671 #else 3672 #define DMA_BURST_SIZE 128 3673 #endif 3674 #define RX_RING_SIZE 256 3675 #define TX_RING_SIZE 32 3676 #define DONE_Q_SIZE 1024 3677 #define QUEUE_ALIGN 256 3678 #if RX_RING_SIZE > 256 3679 #define RX_Q_ENTRIES Rx2048QEntries 3680 #else 3681 #define RX_Q_ENTRIES Rx256QEntries 3682 #endif 3683 #define TX_TIMEOUT (2 * HZ) 3684 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3685 #define ADDR_64BITS 3686 #define netdrv_addr_t __le64 3687 #define cpu_to_dma(x) cpu_to_le64(x) 3688 #define dma_to_cpu(x) le64_to_cpu(x) 3689 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3690 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3691 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3692 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3693 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3694 #else 3695 #define netdrv_addr_t __le32 3696 #define cpu_to_dma(x) cpu_to_le32(x) 3697 #define dma_to_cpu(x) le32_to_cpu(x) 3698 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3699 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3700 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3701 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3702 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3703 #endif 3704 #define skb_first_frag_len(skb) skb_headlen(skb) 3705 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3706 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3707 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3708 #ifdef VLAN_SUPPORT 3709 #define RxComplType RxComplType3 3710 #else 3711 #define RxComplType RxComplType2 3712 #endif 3713 #ifdef ADDR_64BITS 3714 #define TX_DESC_TYPE TxDescType2 3715 #else 3716 #define TX_DESC_TYPE TxDescType1 3717 #endif 3718 #define TX_DESC_SPACING TxDescSpaceUnlim 3719 #if 0 3720 #endif 3721 #define PHY_CNT 2 3722 #ifdef VLAN_SUPPORT 3723 #endif 3724 #ifdef VLAN_SUPPORT 3725 #endif 3726 #ifdef VLAN_SUPPORT 3727 #endif 3728 #ifndef MODULE 3729 #endif 3730 #ifdef ZEROCOPY 3731 #endif 3732 #ifdef VLAN_SUPPORT 3733 #endif 3734 #ifdef ADDR_64BITS 3735 #endif 3736 #if ! defined(final_version) 3737 #endif 3738 #ifdef VLAN_SUPPORT 3739 #endif 3740 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3741 #endif 3742 #ifndef final_version 3743 #endif 3744 #ifdef VLAN_SUPPORT 3745 #endif 3746 #ifdef VLAN_SUPPORT 3747 #endif 3748 #ifdef VLAN_SUPPORT 3749 #endif 3750 #ifdef VLAN_SUPPORT 3751 #endif 3752 /* LDV_COMMENT_END_PREP */ 3753 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "set_msglevel" */ 3754 u32 var_set_msglevel_26_p1; 3755 /* LDV_COMMENT_BEGIN_PREP */ 3756 #ifdef CONFIG_PM 3757 #endif 3758 #ifdef CONFIG_PM 3759 #endif 3760 #ifdef MODULE 3761 #endif 3762 /* LDV_COMMENT_END_PREP */ 3763 /* content: static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd)*/ 3764 /* LDV_COMMENT_BEGIN_PREP */ 3765 #define DRV_NAME "starfire" 3766 #define DRV_VERSION "2.1" 3767 #define DRV_RELDATE "July 6, 2008" 3768 #define HAS_BROKEN_FIRMWARE 3769 #ifdef HAS_BROKEN_FIRMWARE 3770 #define PADDING_MASK 3 3771 #endif 3772 #define ZEROCOPY 3773 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3774 #define VLAN_SUPPORT 3775 #endif 3776 #define PKT_BUF_SZ 1536 3777 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3778 #else 3779 #endif 3780 #ifdef __sparc__ 3781 #define DMA_BURST_SIZE 64 3782 #else 3783 #define DMA_BURST_SIZE 128 3784 #endif 3785 #define RX_RING_SIZE 256 3786 #define TX_RING_SIZE 32 3787 #define DONE_Q_SIZE 1024 3788 #define QUEUE_ALIGN 256 3789 #if RX_RING_SIZE > 256 3790 #define RX_Q_ENTRIES Rx2048QEntries 3791 #else 3792 #define RX_Q_ENTRIES Rx256QEntries 3793 #endif 3794 #define TX_TIMEOUT (2 * HZ) 3795 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3796 #define ADDR_64BITS 3797 #define netdrv_addr_t __le64 3798 #define cpu_to_dma(x) cpu_to_le64(x) 3799 #define dma_to_cpu(x) le64_to_cpu(x) 3800 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3801 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3802 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3803 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3804 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3805 #else 3806 #define netdrv_addr_t __le32 3807 #define cpu_to_dma(x) cpu_to_le32(x) 3808 #define dma_to_cpu(x) le32_to_cpu(x) 3809 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3810 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3811 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3812 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3813 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3814 #endif 3815 #define skb_first_frag_len(skb) skb_headlen(skb) 3816 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3817 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3818 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3819 #ifdef VLAN_SUPPORT 3820 #define RxComplType RxComplType3 3821 #else 3822 #define RxComplType RxComplType2 3823 #endif 3824 #ifdef ADDR_64BITS 3825 #define TX_DESC_TYPE TxDescType2 3826 #else 3827 #define TX_DESC_TYPE TxDescType1 3828 #endif 3829 #define TX_DESC_SPACING TxDescSpaceUnlim 3830 #if 0 3831 #endif 3832 #define PHY_CNT 2 3833 #ifdef VLAN_SUPPORT 3834 #endif 3835 #ifdef VLAN_SUPPORT 3836 #endif 3837 #ifdef VLAN_SUPPORT 3838 #endif 3839 #ifndef MODULE 3840 #endif 3841 #ifdef ZEROCOPY 3842 #endif 3843 #ifdef VLAN_SUPPORT 3844 #endif 3845 #ifdef ADDR_64BITS 3846 #endif 3847 #if ! defined(final_version) 3848 #endif 3849 #ifdef VLAN_SUPPORT 3850 #endif 3851 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3852 #endif 3853 #ifndef final_version 3854 #endif 3855 #ifdef VLAN_SUPPORT 3856 #endif 3857 #ifdef VLAN_SUPPORT 3858 #endif 3859 #ifdef VLAN_SUPPORT 3860 #endif 3861 #ifdef VLAN_SUPPORT 3862 #endif 3863 /* LDV_COMMENT_END_PREP */ 3864 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_link_ksettings" */ 3865 struct ethtool_link_ksettings * var_group5; 3866 /* LDV_COMMENT_BEGIN_PREP */ 3867 #ifdef CONFIG_PM 3868 #endif 3869 #ifdef CONFIG_PM 3870 #endif 3871 #ifdef MODULE 3872 #endif 3873 /* LDV_COMMENT_END_PREP */ 3874 /* content: static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd)*/ 3875 /* LDV_COMMENT_BEGIN_PREP */ 3876 #define DRV_NAME "starfire" 3877 #define DRV_VERSION "2.1" 3878 #define DRV_RELDATE "July 6, 2008" 3879 #define HAS_BROKEN_FIRMWARE 3880 #ifdef HAS_BROKEN_FIRMWARE 3881 #define PADDING_MASK 3 3882 #endif 3883 #define ZEROCOPY 3884 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3885 #define VLAN_SUPPORT 3886 #endif 3887 #define PKT_BUF_SZ 1536 3888 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 3889 #else 3890 #endif 3891 #ifdef __sparc__ 3892 #define DMA_BURST_SIZE 64 3893 #else 3894 #define DMA_BURST_SIZE 128 3895 #endif 3896 #define RX_RING_SIZE 256 3897 #define TX_RING_SIZE 32 3898 #define DONE_Q_SIZE 1024 3899 #define QUEUE_ALIGN 256 3900 #if RX_RING_SIZE > 256 3901 #define RX_Q_ENTRIES Rx2048QEntries 3902 #else 3903 #define RX_Q_ENTRIES Rx256QEntries 3904 #endif 3905 #define TX_TIMEOUT (2 * HZ) 3906 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3907 #define ADDR_64BITS 3908 #define netdrv_addr_t __le64 3909 #define cpu_to_dma(x) cpu_to_le64(x) 3910 #define dma_to_cpu(x) le64_to_cpu(x) 3911 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 3912 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 3913 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 3914 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 3915 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 3916 #else 3917 #define netdrv_addr_t __le32 3918 #define cpu_to_dma(x) cpu_to_le32(x) 3919 #define dma_to_cpu(x) le32_to_cpu(x) 3920 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 3921 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 3922 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 3923 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 3924 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 3925 #endif 3926 #define skb_first_frag_len(skb) skb_headlen(skb) 3927 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 3928 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 3929 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 3930 #ifdef VLAN_SUPPORT 3931 #define RxComplType RxComplType3 3932 #else 3933 #define RxComplType RxComplType2 3934 #endif 3935 #ifdef ADDR_64BITS 3936 #define TX_DESC_TYPE TxDescType2 3937 #else 3938 #define TX_DESC_TYPE TxDescType1 3939 #endif 3940 #define TX_DESC_SPACING TxDescSpaceUnlim 3941 #if 0 3942 #endif 3943 #define PHY_CNT 2 3944 #ifdef VLAN_SUPPORT 3945 #endif 3946 #ifdef VLAN_SUPPORT 3947 #endif 3948 #ifdef VLAN_SUPPORT 3949 #endif 3950 #ifndef MODULE 3951 #endif 3952 #ifdef ZEROCOPY 3953 #endif 3954 #ifdef VLAN_SUPPORT 3955 #endif 3956 #ifdef ADDR_64BITS 3957 #endif 3958 #if ! defined(final_version) 3959 #endif 3960 #ifdef VLAN_SUPPORT 3961 #endif 3962 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 3963 #endif 3964 #ifndef final_version 3965 #endif 3966 #ifdef VLAN_SUPPORT 3967 #endif 3968 #ifdef VLAN_SUPPORT 3969 #endif 3970 #ifdef VLAN_SUPPORT 3971 #endif 3972 #ifdef VLAN_SUPPORT 3973 #endif 3974 /* LDV_COMMENT_END_PREP */ 3975 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "set_link_ksettings" */ 3976 const struct ethtool_link_ksettings * var_set_link_ksettings_22_p1; 3977 /* LDV_COMMENT_BEGIN_PREP */ 3978 #ifdef CONFIG_PM 3979 #endif 3980 #ifdef CONFIG_PM 3981 #endif 3982 #ifdef MODULE 3983 #endif 3984 /* LDV_COMMENT_END_PREP */ 3985 3986 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/ 3987 /* content: static int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/ 3988 /* LDV_COMMENT_BEGIN_PREP */ 3989 #define DRV_NAME "starfire" 3990 #define DRV_VERSION "2.1" 3991 #define DRV_RELDATE "July 6, 2008" 3992 #define HAS_BROKEN_FIRMWARE 3993 #ifdef HAS_BROKEN_FIRMWARE 3994 #define PADDING_MASK 3 3995 #endif 3996 #define ZEROCOPY 3997 #if IS_ENABLED(CONFIG_VLAN_8021Q) 3998 #define VLAN_SUPPORT 3999 #endif 4000 #define PKT_BUF_SZ 1536 4001 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 4002 #else 4003 #endif 4004 #ifdef __sparc__ 4005 #define DMA_BURST_SIZE 64 4006 #else 4007 #define DMA_BURST_SIZE 128 4008 #endif 4009 #define RX_RING_SIZE 256 4010 #define TX_RING_SIZE 32 4011 #define DONE_Q_SIZE 1024 4012 #define QUEUE_ALIGN 256 4013 #if RX_RING_SIZE > 256 4014 #define RX_Q_ENTRIES Rx2048QEntries 4015 #else 4016 #define RX_Q_ENTRIES Rx256QEntries 4017 #endif 4018 #define TX_TIMEOUT (2 * HZ) 4019 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4020 #define ADDR_64BITS 4021 #define netdrv_addr_t __le64 4022 #define cpu_to_dma(x) cpu_to_le64(x) 4023 #define dma_to_cpu(x) le64_to_cpu(x) 4024 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 4025 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 4026 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 4027 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 4028 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 4029 #else 4030 #define netdrv_addr_t __le32 4031 #define cpu_to_dma(x) cpu_to_le32(x) 4032 #define dma_to_cpu(x) le32_to_cpu(x) 4033 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 4034 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 4035 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 4036 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 4037 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 4038 #endif 4039 #define skb_first_frag_len(skb) skb_headlen(skb) 4040 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 4041 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 4042 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 4043 #ifdef VLAN_SUPPORT 4044 #define RxComplType RxComplType3 4045 #else 4046 #define RxComplType RxComplType2 4047 #endif 4048 #ifdef ADDR_64BITS 4049 #define TX_DESC_TYPE TxDescType2 4050 #else 4051 #define TX_DESC_TYPE TxDescType1 4052 #endif 4053 #define TX_DESC_SPACING TxDescSpaceUnlim 4054 #if 0 4055 #endif 4056 #define PHY_CNT 2 4057 #ifdef VLAN_SUPPORT 4058 #endif 4059 #ifdef VLAN_SUPPORT 4060 #endif 4061 #ifdef VLAN_SUPPORT 4062 #endif 4063 /* LDV_COMMENT_END_PREP */ 4064 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_init_one" */ 4065 struct pci_dev * var_group6; 4066 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_init_one" */ 4067 const struct pci_device_id * var_starfire_init_one_2_p1; 4068 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "starfire_init_one" */ 4069 static int res_starfire_init_one_2; 4070 /* LDV_COMMENT_BEGIN_PREP */ 4071 #ifdef VLAN_SUPPORT 4072 #endif 4073 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 4074 #endif 4075 #ifndef final_version 4076 #endif 4077 #ifdef VLAN_SUPPORT 4078 #endif 4079 #ifdef VLAN_SUPPORT 4080 #endif 4081 #ifdef VLAN_SUPPORT 4082 #endif 4083 #ifdef VLAN_SUPPORT 4084 #endif 4085 #ifdef CONFIG_PM 4086 #endif 4087 #ifdef CONFIG_PM 4088 #endif 4089 #ifdef MODULE 4090 #endif 4091 /* LDV_COMMENT_END_PREP */ 4092 /* content: static void starfire_remove_one(struct pci_dev *pdev)*/ 4093 /* LDV_COMMENT_BEGIN_PREP */ 4094 #define DRV_NAME "starfire" 4095 #define DRV_VERSION "2.1" 4096 #define DRV_RELDATE "July 6, 2008" 4097 #define HAS_BROKEN_FIRMWARE 4098 #ifdef HAS_BROKEN_FIRMWARE 4099 #define PADDING_MASK 3 4100 #endif 4101 #define ZEROCOPY 4102 #if IS_ENABLED(CONFIG_VLAN_8021Q) 4103 #define VLAN_SUPPORT 4104 #endif 4105 #define PKT_BUF_SZ 1536 4106 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 4107 #else 4108 #endif 4109 #ifdef __sparc__ 4110 #define DMA_BURST_SIZE 64 4111 #else 4112 #define DMA_BURST_SIZE 128 4113 #endif 4114 #define RX_RING_SIZE 256 4115 #define TX_RING_SIZE 32 4116 #define DONE_Q_SIZE 1024 4117 #define QUEUE_ALIGN 256 4118 #if RX_RING_SIZE > 256 4119 #define RX_Q_ENTRIES Rx2048QEntries 4120 #else 4121 #define RX_Q_ENTRIES Rx256QEntries 4122 #endif 4123 #define TX_TIMEOUT (2 * HZ) 4124 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4125 #define ADDR_64BITS 4126 #define netdrv_addr_t __le64 4127 #define cpu_to_dma(x) cpu_to_le64(x) 4128 #define dma_to_cpu(x) le64_to_cpu(x) 4129 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 4130 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 4131 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 4132 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 4133 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 4134 #else 4135 #define netdrv_addr_t __le32 4136 #define cpu_to_dma(x) cpu_to_le32(x) 4137 #define dma_to_cpu(x) le32_to_cpu(x) 4138 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 4139 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 4140 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 4141 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 4142 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 4143 #endif 4144 #define skb_first_frag_len(skb) skb_headlen(skb) 4145 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 4146 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 4147 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 4148 #ifdef VLAN_SUPPORT 4149 #define RxComplType RxComplType3 4150 #else 4151 #define RxComplType RxComplType2 4152 #endif 4153 #ifdef ADDR_64BITS 4154 #define TX_DESC_TYPE TxDescType2 4155 #else 4156 #define TX_DESC_TYPE TxDescType1 4157 #endif 4158 #define TX_DESC_SPACING TxDescSpaceUnlim 4159 #if 0 4160 #endif 4161 #define PHY_CNT 2 4162 #ifdef VLAN_SUPPORT 4163 #endif 4164 #ifdef VLAN_SUPPORT 4165 #endif 4166 #ifdef VLAN_SUPPORT 4167 #endif 4168 #ifndef MODULE 4169 #endif 4170 #ifdef ZEROCOPY 4171 #endif 4172 #ifdef VLAN_SUPPORT 4173 #endif 4174 #ifdef ADDR_64BITS 4175 #endif 4176 #if ! defined(final_version) 4177 #endif 4178 #ifdef VLAN_SUPPORT 4179 #endif 4180 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 4181 #endif 4182 #ifndef final_version 4183 #endif 4184 #ifdef VLAN_SUPPORT 4185 #endif 4186 #ifdef VLAN_SUPPORT 4187 #endif 4188 #ifdef VLAN_SUPPORT 4189 #endif 4190 #ifdef VLAN_SUPPORT 4191 #endif 4192 #ifdef CONFIG_PM 4193 #endif 4194 /* LDV_COMMENT_END_PREP */ 4195 /* LDV_COMMENT_BEGIN_PREP */ 4196 #ifdef CONFIG_PM 4197 #endif 4198 #ifdef MODULE 4199 #endif 4200 /* LDV_COMMENT_END_PREP */ 4201 /* content: static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)*/ 4202 /* LDV_COMMENT_BEGIN_PREP */ 4203 #define DRV_NAME "starfire" 4204 #define DRV_VERSION "2.1" 4205 #define DRV_RELDATE "July 6, 2008" 4206 #define HAS_BROKEN_FIRMWARE 4207 #ifdef HAS_BROKEN_FIRMWARE 4208 #define PADDING_MASK 3 4209 #endif 4210 #define ZEROCOPY 4211 #if IS_ENABLED(CONFIG_VLAN_8021Q) 4212 #define VLAN_SUPPORT 4213 #endif 4214 #define PKT_BUF_SZ 1536 4215 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 4216 #else 4217 #endif 4218 #ifdef __sparc__ 4219 #define DMA_BURST_SIZE 64 4220 #else 4221 #define DMA_BURST_SIZE 128 4222 #endif 4223 #define RX_RING_SIZE 256 4224 #define TX_RING_SIZE 32 4225 #define DONE_Q_SIZE 1024 4226 #define QUEUE_ALIGN 256 4227 #if RX_RING_SIZE > 256 4228 #define RX_Q_ENTRIES Rx2048QEntries 4229 #else 4230 #define RX_Q_ENTRIES Rx256QEntries 4231 #endif 4232 #define TX_TIMEOUT (2 * HZ) 4233 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4234 #define ADDR_64BITS 4235 #define netdrv_addr_t __le64 4236 #define cpu_to_dma(x) cpu_to_le64(x) 4237 #define dma_to_cpu(x) le64_to_cpu(x) 4238 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 4239 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 4240 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 4241 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 4242 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 4243 #else 4244 #define netdrv_addr_t __le32 4245 #define cpu_to_dma(x) cpu_to_le32(x) 4246 #define dma_to_cpu(x) le32_to_cpu(x) 4247 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 4248 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 4249 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 4250 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 4251 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 4252 #endif 4253 #define skb_first_frag_len(skb) skb_headlen(skb) 4254 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 4255 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 4256 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 4257 #ifdef VLAN_SUPPORT 4258 #define RxComplType RxComplType3 4259 #else 4260 #define RxComplType RxComplType2 4261 #endif 4262 #ifdef ADDR_64BITS 4263 #define TX_DESC_TYPE TxDescType2 4264 #else 4265 #define TX_DESC_TYPE TxDescType1 4266 #endif 4267 #define TX_DESC_SPACING TxDescSpaceUnlim 4268 #if 0 4269 #endif 4270 #define PHY_CNT 2 4271 #ifdef VLAN_SUPPORT 4272 #endif 4273 #ifdef VLAN_SUPPORT 4274 #endif 4275 #ifdef VLAN_SUPPORT 4276 #endif 4277 #ifndef MODULE 4278 #endif 4279 #ifdef ZEROCOPY 4280 #endif 4281 #ifdef VLAN_SUPPORT 4282 #endif 4283 #ifdef ADDR_64BITS 4284 #endif 4285 #if ! defined(final_version) 4286 #endif 4287 #ifdef VLAN_SUPPORT 4288 #endif 4289 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 4290 #endif 4291 #ifndef final_version 4292 #endif 4293 #ifdef VLAN_SUPPORT 4294 #endif 4295 #ifdef VLAN_SUPPORT 4296 #endif 4297 #ifdef VLAN_SUPPORT 4298 #endif 4299 #ifdef VLAN_SUPPORT 4300 #endif 4301 #ifdef CONFIG_PM 4302 /* LDV_COMMENT_END_PREP */ 4303 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_suspend" */ 4304 pm_message_t var_starfire_suspend_29_p1; 4305 /* LDV_COMMENT_BEGIN_PREP */ 4306 #endif 4307 #ifdef CONFIG_PM 4308 #endif 4309 #ifdef MODULE 4310 #endif 4311 /* LDV_COMMENT_END_PREP */ 4312 /* content: static int starfire_resume(struct pci_dev *pdev)*/ 4313 /* LDV_COMMENT_BEGIN_PREP */ 4314 #define DRV_NAME "starfire" 4315 #define DRV_VERSION "2.1" 4316 #define DRV_RELDATE "July 6, 2008" 4317 #define HAS_BROKEN_FIRMWARE 4318 #ifdef HAS_BROKEN_FIRMWARE 4319 #define PADDING_MASK 3 4320 #endif 4321 #define ZEROCOPY 4322 #if IS_ENABLED(CONFIG_VLAN_8021Q) 4323 #define VLAN_SUPPORT 4324 #endif 4325 #define PKT_BUF_SZ 1536 4326 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 4327 #else 4328 #endif 4329 #ifdef __sparc__ 4330 #define DMA_BURST_SIZE 64 4331 #else 4332 #define DMA_BURST_SIZE 128 4333 #endif 4334 #define RX_RING_SIZE 256 4335 #define TX_RING_SIZE 32 4336 #define DONE_Q_SIZE 1024 4337 #define QUEUE_ALIGN 256 4338 #if RX_RING_SIZE > 256 4339 #define RX_Q_ENTRIES Rx2048QEntries 4340 #else 4341 #define RX_Q_ENTRIES Rx256QEntries 4342 #endif 4343 #define TX_TIMEOUT (2 * HZ) 4344 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4345 #define ADDR_64BITS 4346 #define netdrv_addr_t __le64 4347 #define cpu_to_dma(x) cpu_to_le64(x) 4348 #define dma_to_cpu(x) le64_to_cpu(x) 4349 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 4350 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 4351 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 4352 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 4353 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 4354 #else 4355 #define netdrv_addr_t __le32 4356 #define cpu_to_dma(x) cpu_to_le32(x) 4357 #define dma_to_cpu(x) le32_to_cpu(x) 4358 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 4359 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 4360 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 4361 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 4362 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 4363 #endif 4364 #define skb_first_frag_len(skb) skb_headlen(skb) 4365 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 4366 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 4367 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 4368 #ifdef VLAN_SUPPORT 4369 #define RxComplType RxComplType3 4370 #else 4371 #define RxComplType RxComplType2 4372 #endif 4373 #ifdef ADDR_64BITS 4374 #define TX_DESC_TYPE TxDescType2 4375 #else 4376 #define TX_DESC_TYPE TxDescType1 4377 #endif 4378 #define TX_DESC_SPACING TxDescSpaceUnlim 4379 #if 0 4380 #endif 4381 #define PHY_CNT 2 4382 #ifdef VLAN_SUPPORT 4383 #endif 4384 #ifdef VLAN_SUPPORT 4385 #endif 4386 #ifdef VLAN_SUPPORT 4387 #endif 4388 #ifndef MODULE 4389 #endif 4390 #ifdef ZEROCOPY 4391 #endif 4392 #ifdef VLAN_SUPPORT 4393 #endif 4394 #ifdef ADDR_64BITS 4395 #endif 4396 #if ! defined(final_version) 4397 #endif 4398 #ifdef VLAN_SUPPORT 4399 #endif 4400 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 4401 #endif 4402 #ifndef final_version 4403 #endif 4404 #ifdef VLAN_SUPPORT 4405 #endif 4406 #ifdef VLAN_SUPPORT 4407 #endif 4408 #ifdef VLAN_SUPPORT 4409 #endif 4410 #ifdef VLAN_SUPPORT 4411 #endif 4412 #ifdef CONFIG_PM 4413 /* LDV_COMMENT_END_PREP */ 4414 /* LDV_COMMENT_BEGIN_PREP */ 4415 #endif 4416 #ifdef CONFIG_PM 4417 #endif 4418 #ifdef MODULE 4419 #endif 4420 /* LDV_COMMENT_END_PREP */ 4421 4422 /** CALLBACK SECTION request_irq **/ 4423 /* content: static irqreturn_t intr_handler(int irq, void *dev_instance)*/ 4424 /* LDV_COMMENT_BEGIN_PREP */ 4425 #define DRV_NAME "starfire" 4426 #define DRV_VERSION "2.1" 4427 #define DRV_RELDATE "July 6, 2008" 4428 #define HAS_BROKEN_FIRMWARE 4429 #ifdef HAS_BROKEN_FIRMWARE 4430 #define PADDING_MASK 3 4431 #endif 4432 #define ZEROCOPY 4433 #if IS_ENABLED(CONFIG_VLAN_8021Q) 4434 #define VLAN_SUPPORT 4435 #endif 4436 #define PKT_BUF_SZ 1536 4437 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) 4438 #else 4439 #endif 4440 #ifdef __sparc__ 4441 #define DMA_BURST_SIZE 64 4442 #else 4443 #define DMA_BURST_SIZE 128 4444 #endif 4445 #define RX_RING_SIZE 256 4446 #define TX_RING_SIZE 32 4447 #define DONE_Q_SIZE 1024 4448 #define QUEUE_ALIGN 256 4449 #if RX_RING_SIZE > 256 4450 #define RX_Q_ENTRIES Rx2048QEntries 4451 #else 4452 #define RX_Q_ENTRIES Rx256QEntries 4453 #endif 4454 #define TX_TIMEOUT (2 * HZ) 4455 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4456 #define ADDR_64BITS 4457 #define netdrv_addr_t __le64 4458 #define cpu_to_dma(x) cpu_to_le64(x) 4459 #define dma_to_cpu(x) le64_to_cpu(x) 4460 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 4461 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit 4462 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit 4463 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 4464 #define RX_DESC_ADDR_SIZE RxDescAddr64bit 4465 #else 4466 #define netdrv_addr_t __le32 4467 #define cpu_to_dma(x) cpu_to_le32(x) 4468 #define dma_to_cpu(x) le32_to_cpu(x) 4469 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 4470 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit 4471 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit 4472 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit 4473 #define RX_DESC_ADDR_SIZE RxDescAddr32bit 4474 #endif 4475 #define skb_first_frag_len(skb) skb_headlen(skb) 4476 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 4477 #define FIRMWARE_RX "adaptec/starfire_rx.bin" 4478 #define FIRMWARE_TX "adaptec/starfire_tx.bin" 4479 #ifdef VLAN_SUPPORT 4480 #define RxComplType RxComplType3 4481 #else 4482 #define RxComplType RxComplType2 4483 #endif 4484 #ifdef ADDR_64BITS 4485 #define TX_DESC_TYPE TxDescType2 4486 #else 4487 #define TX_DESC_TYPE TxDescType1 4488 #endif 4489 #define TX_DESC_SPACING TxDescSpaceUnlim 4490 #if 0 4491 #endif 4492 #define PHY_CNT 2 4493 #ifdef VLAN_SUPPORT 4494 #endif 4495 #ifdef VLAN_SUPPORT 4496 #endif 4497 #ifdef VLAN_SUPPORT 4498 #endif 4499 #ifndef MODULE 4500 #endif 4501 #ifdef ZEROCOPY 4502 #endif 4503 #ifdef VLAN_SUPPORT 4504 #endif 4505 #ifdef ADDR_64BITS 4506 #endif 4507 #if ! defined(final_version) 4508 #endif 4509 #ifdef VLAN_SUPPORT 4510 #endif 4511 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 4512 #endif 4513 /* LDV_COMMENT_END_PREP */ 4514 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "intr_handler" */ 4515 int var_intr_handler_10_p0; 4516 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "intr_handler" */ 4517 void * var_intr_handler_10_p1; 4518 /* LDV_COMMENT_BEGIN_PREP */ 4519 #ifndef final_version 4520 #endif 4521 #ifdef VLAN_SUPPORT 4522 #endif 4523 #ifdef VLAN_SUPPORT 4524 #endif 4525 #ifdef VLAN_SUPPORT 4526 #endif 4527 #ifdef VLAN_SUPPORT 4528 #endif 4529 #ifdef CONFIG_PM 4530 #endif 4531 #ifdef CONFIG_PM 4532 #endif 4533 #ifdef MODULE 4534 #endif 4535 /* LDV_COMMENT_END_PREP */ 4536 4537 4538 4539 4540 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 4541 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 4542 /*============================= VARIABLE INITIALIZING PART =============================*/ 4543 LDV_IN_INTERRUPT=1;