Error Trace
[Home]
Bug # 168
Show/hide error trace Error trace
{ 19 typedef signed char __s8; 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 28 typedef __u16 __le16; 29 typedef __u16 __be16; 30 typedef __u32 __le32; 31 typedef __u32 __be32; 32 typedef __u64 __le64; 36 typedef __u32 __wsum; 280 struct kernel_symbol { unsigned long value; const char *name; } ; 34 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 108 typedef __u32 uint32_t; 111 typedef __u64 uint64_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 166 typedef phys_addr_t resource_size_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 115 typedef void (*ctor_fn_t)(); 83 struct ctl_table ; 58 struct device ; 64 struct net_device ; 465 struct file_operations ; 477 struct completion ; 478 struct pt_regs ; 546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 114 struct timespec ; 115 struct compat_timespec ; 116 struct pollfd ; 117 struct __anonstruct_futex_27 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 117 struct __anonstruct_nanosleep_28 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 117 struct __anonstruct_poll_29 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 117 union __anonunion____missing_field_name_26 { struct __anonstruct_futex_27 futex; struct __anonstruct_nanosleep_28 nanosleep; struct __anonstruct_poll_29 poll; } ; 117 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_26 __annonCompField4; } ; 50 struct task_struct ; 39 struct page ; 26 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_32 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_33 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_31 { struct __anonstruct____missing_field_name_32 __annonCompField5; struct __anonstruct____missing_field_name_33 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_31 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_34 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_34 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_35 pgd_t; 297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_37 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 445 struct seq_file ; 481 struct thread_struct ; 483 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 83 struct static_key { atomic_t enabled; } ; 23 typedef atomic64_t atomic_long_t; 359 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 233 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField13; struct __anonstruct____missing_field_name_62 __annonCompField14; } ; 26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; union fpregs_state state; } ; 180 struct seq_operations ; 386 struct perf_event ; 391 struct __anonstruct_mm_segment_t_75 { unsigned long seg; } ; 391 typedef struct __anonstruct_mm_segment_t_75 mm_segment_t; 392 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 48 struct thread_info { unsigned long flags; } ; 303 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 10 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 34 struct vm_area_struct ; 15 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 70 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ; 236 struct pci_dev ; 33 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_141 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_140 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_141 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_140 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_142 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_142 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_157 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_157 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 28 typedef s64 ktime_t; 109 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 1225 struct completion { unsigned int done; wait_queue_head_t wait; } ; 1144 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_162 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_162 kuid_t; 27 struct __anonstruct_kgid_t_163 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_163 kgid_t; 835 struct nsproxy ; 836 struct ctl_table_root ; 837 struct ctl_table_header ; 838 struct ctl_dir ; 39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 61 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ; 100 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ; 121 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ; 126 struct __anonstruct____missing_field_name_165 { struct ctl_table *ctl_table; int used; int count; int nreg; } ; 126 union __anonunion____missing_field_name_164 { struct __anonstruct____missing_field_name_165 __annonCompField21; struct callback_head rcu; } ; 126 struct ctl_table_set ; 126 struct ctl_table_header { union __anonunion____missing_field_name_164 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ; 147 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ; 153 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ; 158 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool in_dpm_list; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 618 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 97 struct __anonstruct_nodemask_t_166 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_166 nodemask_t; 144 struct pci_bus ; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ; 70 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 38 struct ldt_struct ; 38 struct vdso_image ; 38 struct __anonstruct_mm_context_t_167 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ; 38 typedef struct __anonstruct_mm_context_t_167 mm_context_t; 22 struct bio_vec ; 249 typedef unsigned int isolate_mode_t; 744 struct rw_semaphore ; 745 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 64 struct irq_domain ; 422 union __anonunion____missing_field_name_209 { unsigned long bitmap[1U]; struct callback_head callback_head; } ; 422 struct idr_layer { int prefix; int layer; struct idr_layer *ary[64U]; int count; union __anonunion____missing_field_name_209 __annonCompField33; } ; 40 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 149 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 192 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 229 struct dentry ; 230 struct iattr ; 231 struct super_block ; 232 struct file_system_type ; 233 struct kernfs_open_node ; 234 struct kernfs_iattrs ; 257 struct kernfs_root ; 257 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_218 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_218 __annonCompField34; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct vm_operations_struct ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 286 struct inode ; 511 struct sock ; 512 struct kobject ; 513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 135 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct bin_attribute ; 37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 224 struct proc_dir_entry ; 133 struct exception_table_entry { int insn; int fixup; int handler; } ; 61 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 506 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 37 struct cred ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_236 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_237 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_235 { struct __anonstruct____missing_field_name_236 __annonCompField45; struct __anonstruct____missing_field_name_237 __annonCompField46; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_235 __annonCompField47; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 95 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 111 struct xol_area ; 112 struct uprobes_state { struct xol_area *xol_area; } ; 151 struct address_space ; 152 struct mem_cgroup ; 153 union __anonunion____missing_field_name_238 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 153 union __anonunion____missing_field_name_239 { unsigned long index; void *freelist; } ; 153 struct __anonstruct____missing_field_name_243 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 153 union __anonunion____missing_field_name_242 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_243 __annonCompField50; int units; } ; 153 struct __anonstruct____missing_field_name_241 { union __anonunion____missing_field_name_242 __annonCompField51; atomic_t _refcount; } ; 153 union __anonunion____missing_field_name_240 { unsigned long counters; struct __anonstruct____missing_field_name_241 __annonCompField52; } ; 153 struct dev_pagemap ; 153 struct __anonstruct____missing_field_name_245 { struct page *next; int pages; int pobjects; } ; 153 struct __anonstruct____missing_field_name_246 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 153 struct __anonstruct____missing_field_name_247 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 153 union __anonunion____missing_field_name_244 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_245 __annonCompField54; struct callback_head callback_head; struct __anonstruct____missing_field_name_246 __annonCompField55; struct __anonstruct____missing_field_name_247 __annonCompField56; } ; 153 struct kmem_cache ; 153 union __anonunion____missing_field_name_248 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 153 struct page { unsigned long flags; union __anonunion____missing_field_name_238 __annonCompField48; union __anonunion____missing_field_name_239 __annonCompField49; union __anonunion____missing_field_name_240 __annonCompField53; union __anonunion____missing_field_name_244 __annonCompField57; union __anonunion____missing_field_name_248 __annonCompField58; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_249 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_249 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 560 struct vm_fault ; 614 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 struct kernel_param ; 58 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_254 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_254 __annonCompField59; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 13 typedef unsigned long kernel_ulong_t; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 187 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 230 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 675 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_311 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_310 { struct __anonstruct____missing_field_name_311 __annonCompField60; } ; 114 struct lockref { union __anonunion____missing_field_name_310 __annonCompField61; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_313 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_312 { struct __anonstruct____missing_field_name_313 __annonCompField62; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_312 __annonCompField63; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_314 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_315 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_314 __annonCompField64; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_315 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 union __anonunion____missing_field_name_316 { struct list_head private_list; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; void *private_data; union __anonunion____missing_field_name_316 __annonCompField65; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 519 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 526 struct pid_namespace ; 526 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ; 144 struct delayed_call { void (*fn)(void *); void *arg; } ; 282 struct backing_dev_info ; 283 struct bdi_writeback ; 285 struct export_operations ; 287 struct iovec ; 288 struct kiocb ; 289 struct pipe_inode_info ; 290 struct poll_table_struct ; 291 struct kstatfs ; 292 struct swap_info_struct ; 293 struct iov_iter ; 294 struct fscrypt_info ; 295 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 210 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_320 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_320 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_321 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_321 __annonCompField66; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 540 struct writeback_control ; 541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 317 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 376 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ; 398 struct request_queue ; 399 struct hd_struct ; 399 struct gendisk ; 399 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 514 struct posix_acl ; 541 struct inode_operations ; 541 union __anonunion____missing_field_name_326 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 541 union __anonunion____missing_field_name_327 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 541 struct file_lock_context ; 541 struct cdev ; 541 union __anonunion____missing_field_name_328 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 541 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_326 __annonCompField67; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_327 __annonCompField68; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_328 __annonCompField69; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 797 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 805 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 828 union __anonunion_f_u_329 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 828 struct file { union __anonunion_f_u_329 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 913 typedef void *fl_owner_t; 914 struct file_lock ; 915 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 921 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 942 struct net ; 948 struct nlm_lockowner ; 949 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_331 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_330 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_331 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_330 fl_u; } ; 1001 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1068 struct files_struct ; 1221 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1256 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1286 struct super_operations ; 1286 struct xattr_handler ; 1286 struct mtd_info ; 1286 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1570 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1583 struct dir_context ; 1608 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1615 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1683 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1753 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 1995 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3167 struct assoc_array_ptr ; 3167 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct user_struct ; 37 struct signal_struct ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_332 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_333 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_335 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_334 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_335 __annonCompField72; } ; 128 struct __anonstruct____missing_field_name_337 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_336 { union key_payload payload; struct __anonstruct____missing_field_name_337 __annonCompField74; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_332 __annonCompField70; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_333 __annonCompField71; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_334 __annonCompField73; union __anonunion____missing_field_name_336 __annonCompField75; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ; 85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 368 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 26 struct sem_undo_list ; 26 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_338 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_338 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 38 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_340 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_341 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_342 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_343 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_346 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_345 { struct __anonstruct__addr_bnd_346 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_344 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_345 __annonCompField76; } ; 11 struct __anonstruct__sigpoll_347 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_348 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_339 { int _pad[28U]; struct __anonstruct__kill_340 _kill; struct __anonstruct__timer_341 _timer; struct __anonstruct__rt_342 _rt; struct __anonstruct__sigchld_343 _sigchld; struct __anonstruct__sigfault_344 _sigfault; struct __anonstruct__sigpoll_347 _sigpoll; struct __anonstruct__sigsys_348 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_339 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 274 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 288 struct k_sigaction { struct sigaction sa; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 607 struct cgroup ; 608 struct sk_buff ; 14 struct bpf_prog ; 14 struct cgroup_bpf { struct bpf_prog *prog[3U]; struct bpf_prog *effective[3U]; } ; 44 struct cgroup_root ; 45 struct cgroup_subsys ; 46 struct cgroup_taskset ; 90 struct cgroup_file { struct kernfs_node *kn; } ; 91 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 142 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 222 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct cgroup_bpf bpf; int ancestor_ids[]; } ; 310 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 349 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 434 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 134 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 515 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 563 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 571 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 578 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 603 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 619 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 641 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 686 struct autogroup ; 687 struct tty_struct ; 687 struct taskstats ; 687 struct tty_audit_buf ; 687 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ; 863 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 908 struct reclaim_state ; 909 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 924 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 981 struct wake_q_node { struct wake_q_node *next; } ; 1226 struct io_context ; 1260 struct uts_namespace ; 1261 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1269 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1327 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1362 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1399 struct rt_rq ; 1399 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1417 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1481 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1500 struct sched_class ; 1500 struct compat_robust_list_head ; 1500 struct numa_group ; 1500 struct kcov ; 1500 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; int closid; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 34 struct iommu_fwspec ; 62 struct device_attribute ; 62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 143 struct device_type ; 202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 358 struct class_attribute ; 358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 453 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 523 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 551 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 723 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 786 enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3 } ; 793 struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; } ; 813 struct dma_coherent_mem ; 813 struct cma ; 813 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ; 971 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 70 struct hotplug_slot ; 70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ; 108 typedef int pci_power_t; 135 typedef unsigned int pci_channel_state_t; 136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; 161 typedef unsigned short pci_dev_flags_t; 188 typedef unsigned short pci_bus_flags_t; 246 struct pcie_link_state ; 247 struct pci_vpd ; 248 struct pci_sriov ; 250 struct pci_driver ; 250 union __anonunion____missing_field_name_388 { struct pci_sriov *sriov; struct pci_dev *physfn; } ; 250 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u16 aer_cap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned char hotplug_user_indicators; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; unsigned char ptm_root; unsigned char ptm_enabled; u8 ptm_granularity; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_388 __annonCompField89; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ; 419 struct pci_ops ; 419 struct msi_controller ; 482 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ; 606 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ; 636 struct pci_dynids { spinlock_t lock; struct list_head list; } ; 650 typedef unsigned int pci_ers_result_t; 660 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ; 693 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ; 273 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 322 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 1322 struct kvec ; 2439 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 56 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct kvec { void *iov_base; size_t iov_len; } ; 29 union __anonunion____missing_field_name_399 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; struct pipe_inode_info *pipe; } ; 29 union __anonunion____missing_field_name_400 { unsigned long nr_segs; int idx; } ; 29 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_399 __annonCompField90; union __anonunion____missing_field_name_400 __annonCompField91; } ; 1426 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ; 11 typedef unsigned short __kernel_sa_family_t; 23 typedef __kernel_sa_family_t sa_family_t; 24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ; 43 struct __anonstruct_sync_serial_settings_402 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ; 43 typedef struct __anonstruct_sync_serial_settings_402 sync_serial_settings; 50 struct __anonstruct_te1_settings_403 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ; 50 typedef struct __anonstruct_te1_settings_403 te1_settings; 55 struct __anonstruct_raw_hdlc_proto_404 { unsigned short encoding; unsigned short parity; } ; 55 typedef struct __anonstruct_raw_hdlc_proto_404 raw_hdlc_proto; 65 struct __anonstruct_fr_proto_405 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ; 65 typedef struct __anonstruct_fr_proto_405 fr_proto; 69 struct __anonstruct_fr_proto_pvc_406 { unsigned int dlci; } ; 69 typedef struct __anonstruct_fr_proto_pvc_406 fr_proto_pvc; 74 struct __anonstruct_fr_proto_pvc_info_407 { unsigned int dlci; char master[16U]; } ; 74 typedef struct __anonstruct_fr_proto_pvc_info_407 fr_proto_pvc_info; 79 struct __anonstruct_cisco_proto_408 { unsigned int interval; unsigned int timeout; } ; 79 typedef struct __anonstruct_cisco_proto_408 cisco_proto; 117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ; 197 union __anonunion_ifs_ifsu_409 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ; 197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_409 ifs_ifsu; } ; 216 union __anonunion_ifr_ifrn_410 { char ifrn_name[16U]; } ; 216 union __anonunion_ifr_ifru_411 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ; 216 struct ifreq { union __anonunion_ifr_ifrn_410 ifr_ifrn; union __anonunion_ifr_ifru_411 ifr_ifru; } ; 18 typedef s32 compat_time_t; 39 typedef s32 compat_long_t; 45 typedef u32 compat_uptr_t; 46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ; 278 struct compat_robust_list { compat_uptr_t next; } ; 282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 161 struct in6_addr ; 15 typedef u64 netdev_features_t; 70 union __anonunion_in6_u_437 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ; 70 struct in6_addr { union __anonunion_in6_u_437 in6_u; } ; 46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ; 227 struct pipe_buf_operations ; 227 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ; 27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ; 63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ; 272 struct napi_struct ; 273 struct nf_conntrack { atomic_t use; } ; 254 union __anonunion____missing_field_name_451 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ; 254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_451 __annonCompField100; } ; 278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ; 310 struct skb_frag_struct ; 310 typedef struct skb_frag_struct skb_frag_t; 311 struct __anonstruct_page_452 { struct page *p; } ; 311 struct skb_frag_struct { struct __anonstruct_page_452 page; __u32 page_offset; __u32 size; } ; 344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ; 410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ; 500 typedef unsigned int sk_buff_data_t; 501 struct __anonstruct____missing_field_name_454 { u32 stamp_us; u32 stamp_jiffies; } ; 501 union __anonunion____missing_field_name_453 { u64 v64; struct __anonstruct____missing_field_name_454 __annonCompField101; } ; 501 struct skb_mstamp { union __anonunion____missing_field_name_453 __annonCompField102; } ; 564 union __anonunion____missing_field_name_457 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ; 564 struct __anonstruct____missing_field_name_456 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_457 __annonCompField103; } ; 564 union __anonunion____missing_field_name_455 { struct __anonstruct____missing_field_name_456 __annonCompField104; struct rb_node rbnode; } ; 564 union __anonunion____missing_field_name_458 { struct net_device *dev; unsigned long dev_scratch; } ; 564 struct sec_path ; 564 struct __anonstruct____missing_field_name_460 { __u16 csum_start; __u16 csum_offset; } ; 564 union __anonunion____missing_field_name_459 { __wsum csum; struct __anonstruct____missing_field_name_460 __annonCompField107; } ; 564 union __anonunion____missing_field_name_461 { unsigned int napi_id; unsigned int sender_cpu; } ; 564 union __anonunion____missing_field_name_462 { __u32 mark; __u32 reserved_tailroom; } ; 564 union __anonunion____missing_field_name_463 { __be16 inner_protocol; __u8 inner_ipproto; } ; 564 struct sk_buff { union __anonunion____missing_field_name_455 __annonCompField105; struct sock *sk; union __anonunion____missing_field_name_458 __annonCompField106; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0U]; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; unsigned char __unused; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; unsigned char offload_fwd_mark; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_459 __annonCompField108; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_461 __annonCompField109; __u32 secmark; union __anonunion____missing_field_name_462 __annonCompField110; union __anonunion____missing_field_name_463 __annonCompField111; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ; 845 struct dst_entry ; 39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ; 130 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ; 194 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ; 238 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ; 256 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ; 285 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ; 311 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ; 340 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ; 357 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ; 456 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ; 493 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ; 521 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ; 627 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ; 659 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ; 701 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ; 734 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ; 750 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ; 770 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ; 788 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ; 804 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ; 820 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ; 837 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ; 856 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ; 906 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ; 1077 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ; 1085 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ; 1161 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ; 1537 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ; 39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; 97 struct __anonstruct_link_modes_467 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ; 97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_467 link_modes; } ; 158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ; 375 struct prot_inuse ; 376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ; 38 struct u64_stats_sync { } ; 164 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ; 61 struct icmp_mib { unsigned long mibs[28U]; } ; 67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ; 72 struct icmpv6_mib { unsigned long mibs[6U]; } ; 83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ; 93 struct tcp_mib { unsigned long mibs[16U]; } ; 100 struct udp_mib { unsigned long mibs[9U]; } ; 106 struct linux_mib { unsigned long mibs[118U]; } ; 112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ; 118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ; 26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ; 12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ; 14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ; 187 struct ipv4_devconf ; 188 struct fib_rules_ops ; 189 struct fib_table ; 190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ; 24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ; 29 struct inet_peer_base ; 29 struct xt_table ; 29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; unsigned int fib_seq; atomic_t rt_genid; } ; 144 struct neighbour ; 144 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ; 73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ; 40 struct ipv6_devconf ; 40 struct rt6_info ; 40 struct rt6_statistics ; 40 struct fib6_table ; 40 struct seg6_pernet_data ; 40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; } ; 90 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ; 96 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ; 14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ; 20 struct sctp_mib ; 21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ; 141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ; 79 struct nf_logger ; 80 struct nf_queue_handler ; 81 struct nf_hook_entry ; 81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entry *hooks[13U][8U]; bool defrag_ipv4; bool defrag_ipv6; } ; 26 struct ebt_table ; 27 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ; 19 struct hlist_nulls_node ; 19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ; 23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ; 114 struct ip_conntrack_stat { unsigned int found; unsigned int invalid; unsigned int ignore; unsigned int insert; unsigned int insert_failed; unsigned int drop; unsigned int early_drop; unsigned int error; unsigned int expect_new; unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; } ; 13 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; unsigned int users; } ; 27 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ; 32 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ; 46 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 51 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ; 56 struct nf_dccp_net { struct nf_proto_net pn; int dccp_loose; unsigned int dccp_timeout[10U]; } ; 63 struct nf_sctp_net { struct nf_proto_net pn; unsigned int timeouts[10U]; } ; 76 struct nf_udplite_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 83 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct nf_dccp_net dccp; struct nf_sctp_net sctp; struct nf_udplite_net udplite; } ; 100 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ; 107 struct nf_ct_event_notifier ; 107 struct nf_exp_event_notifier ; 107 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; } ; 138 struct nft_af_info ; 139 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ; 509 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ; 16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct hlist_node node; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ; 25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ; 21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ; 30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ; 87 struct mpls_route ; 88 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ; 16 struct proc_ns_operations ; 17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ; 11 struct net_generic ; 12 struct netns_ipvs ; 13 struct ucounts ; 13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; struct ucounts *ucounts; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ; 248 struct __anonstruct_possible_net_t_479 { struct net *net; } ; 248 typedef struct __anonstruct_possible_net_t_479 possible_net_t; 383 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_ACPI_STATIC = 4, FWNODE_PDATA = 5, FWNODE_IRQCHIP = 6 } ; 393 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 1290 struct mii_ioctl_data { __u16 phy_id; __u16 reg_num; __u16 val_in; __u16 val_out; } ; 161 struct mii_if_info { int phy_id; int advertising; int phy_id_mask; int reg_num_mask; unsigned char full_duplex; unsigned char force_media; unsigned char supports_gmii; struct net_device *dev; int (*mdio_read)(struct net_device *, int, int); void (*mdio_write)(struct net_device *, int, int, int); } ; 296 struct mii_bus ; 303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ; 41 struct mdio_driver_common { struct device_driver driver; int flags; } ; 244 struct phy_device ; 245 enum led_brightness { LED_OFF = 0, LED_HALF = 127, LED_FULL = 255 } ; 251 struct led_trigger ; 251 struct led_classdev { const char *name; enum led_brightness brightness; enum led_brightness max_brightness; int flags; unsigned long work_flags; void (*brightness_set)(struct led_classdev *, enum led_brightness ); int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness ); enum led_brightness (*brightness_get)(struct led_classdev *); int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); struct device *dev; const struct attribute_group **groups; struct list_head node; const char *default_trigger; unsigned long blink_delay_on; unsigned long blink_delay_off; struct timer_list blink_timer; int blink_brightness; int new_blink_brightness; void (*flash_resume)(struct led_classdev *); struct work_struct set_brightness_work; int delayed_set_value; struct rw_semaphore trigger_lock; struct led_trigger *trigger; struct list_head trig_list; void *trigger_data; bool activated; struct mutex led_access; } ; 226 struct led_trigger { const char *name; void (*activate)(struct led_classdev *); void (*deactivate)(struct led_classdev *); rwlock_t leddev_list_lock; struct list_head led_cdevs; struct list_head next_trig; } ; 418 struct phy_led_trigger { struct led_trigger trigger; char name[31U]; unsigned int speed; } ; 39 enum ldv_32668 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_TRGMII = 16, PHY_INTERFACE_MODE_MAX = 17 } ; 86 typedef enum ldv_32668 phy_interface_t; 149 enum ldv_32721 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; 156 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_32721 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ; 237 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; 252 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ; 345 struct phy_driver ; 345 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; u32 eee_broken_modes; int autoneg; int link_timeout; struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; struct phy_led_trigger *last_triggered; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; u8 mdix_ctrl; void (*adjust_link)(struct net_device *); } ; 457 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); } ; 884 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ; 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_PROTO_QCA = 5, DSA_TAG_LAST = 6 } ; 37 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ; 71 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ; 87 struct packet_type ; 88 struct dsa_switch ; 88 struct dsa_device_ops ; 88 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ; 141 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; } ; 148 struct dsa_switch_ops ; 148 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_ops *ops; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ; 235 struct switchdev_trans ; 236 struct switchdev_obj ; 237 struct switchdev_obj_port_fdb ; 238 struct switchdev_obj_port_mdb ; 239 struct switchdev_obj_port_vlan ; 240 struct dsa_switch_ops { struct list_head list; const char * (*probe)(struct device *, struct device *, int, void **); enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); void (*port_fast_age)(struct dsa_switch *, int); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *); int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); } ; 407 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ; 69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ; 87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ; 132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ; 144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ; 164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ; 187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ; 202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ; 236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ; 40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ; 105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ; 58 struct mnt_namespace ; 59 struct ipc_namespace ; 60 struct cgroup_namespace ; 61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ; 86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ; 19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ; 31 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; int ucount_max[7U]; } ; 63 struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_t ucount[7U]; } ; 631 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; } ; 686 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ; 41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ; 143 struct nlattr { __u16 nla_len; __u16 nla_type; } ; 105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ; 183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ; 41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ; 869 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ; 16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; } ; 117 struct netpoll_info ; 118 struct wireless_dev ; 119 struct wpan_dev ; 120 struct mpls_dev ; 121 struct udp_tunnel_info ; 70 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ; 113 typedef enum netdev_tx netdev_tx_t; 132 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ; 196 struct neigh_parms ; 197 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ; 217 struct netdev_hw_addr_list { struct list_head list; int count; } ; 222 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ; 251 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ; 302 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ; 357 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; 405 typedef enum rx_handler_result rx_handler_result_t; 406 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); 541 struct Qdisc ; 541 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ; 612 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ; 624 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ; 636 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ; 688 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ; 711 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ; 724 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ; 735 struct netdev_tc_txq { u16 count; u16 offset; } ; 746 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ; 762 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ; 790 struct tc_cls_u32_offload ; 791 struct tc_cls_flower_offload ; 791 struct tc_cls_matchall_offload ; 791 struct tc_cls_bpf_offload ; 791 union __anonunion____missing_field_name_492 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; } ; 791 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_492 __annonCompField115; bool egress_dev; } ; 808 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ; 813 union __anonunion____missing_field_name_493 { struct bpf_prog *prog; bool prog_attached; } ; 813 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_493 __annonCompField116; } ; 836 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(const struct net_device *, int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ; 1372 struct __anonstruct_adj_list_494 { struct list_head upper; struct list_head lower; } ; 1372 struct iw_handler_def ; 1372 struct iw_public_data ; 1372 struct switchdev_ops ; 1372 struct l3mdev_ops ; 1372 struct ndisc_ops ; 1372 struct vlan_info ; 1372 struct tipc_bearer ; 1372 struct in_device ; 1372 struct dn_dev ; 1372 struct inet6_dev ; 1372 struct tcf_proto ; 1372 struct cpu_rmap ; 1372 struct pcpu_lstats ; 1372 struct pcpu_sw_netstats ; 1372 struct pcpu_dstats ; 1372 struct pcpu_vstats ; 1372 union __anonunion____missing_field_name_495 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ; 1372 struct garp_port ; 1372 struct mrp_port ; 1372 struct rtnl_link_ops ; 1372 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_494 adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned int min_mtu; unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct nf_hook_entry *nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; struct hlist_head qdisc_hash[16U]; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_495 __annonCompField117; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ; 2194 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ; 2222 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ; 3168 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ; 644 struct firmware { size_t size; const u8 *data; struct page **pages; void *priv; } ; 294 struct chip_info { const char *name; int drv_flags; } ; 497 struct starfire_rx_desc { __le64 rxaddr; } ; 466 struct full_rx_done_desc { __le32 status; __le16 status3; __le16 status2; __le16 vlanid; __le16 csum; __le32 timestamp; } ; 476 typedef struct full_rx_done_desc rx_done_desc; 492 struct starfire_tx_desc_2 { __le32 status; __le32 reserved; __le64 addr; } ; 501 typedef struct starfire_tx_desc_2 starfire_tx_desc; 510 struct tx_done_desc { __le32 status; } ; 517 struct rx_ring_info { struct sk_buff *skb; dma_addr_t mapping; } ; 525 struct tx_ring_info { struct sk_buff *skb; dma_addr_t mapping; unsigned int used_slots; } ; 530 struct netdev_private { struct starfire_rx_desc *rx_ring; starfire_tx_desc *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; struct rx_ring_info rx_info[256U]; struct tx_ring_info tx_info[32U]; rx_done_desc *rx_done_q; dma_addr_t rx_done_q_dma; unsigned int rx_done; struct tx_done_desc *tx_done_q; dma_addr_t tx_done_q_dma; unsigned int tx_done; struct napi_struct napi; struct net_device *dev; struct pci_dev *pci_dev; unsigned long active_vlans[64U]; void *queue_mem; dma_addr_t queue_mem_dma; size_t queue_mem_size; spinlock_t lock; unsigned int cur_rx; unsigned int dirty_rx; unsigned int cur_tx; unsigned int dirty_tx; unsigned int reap_tx; unsigned int rx_buf_sz; int speed100; u32 tx_mode; u32 intr_timer_ctrl; u8 tx_threshold; struct mii_if_info mii_if; int phy_cnt; unsigned char phys[2U]; void *base; } ; 1 long int __builtin_expect(long, long); 34 extern struct module __this_module; 72 void set_bit(long nr, volatile unsigned long *addr); 110 void clear_bit(long nr, volatile unsigned long *addr); 204 bool test_and_set_bit(long nr, volatile unsigned long *addr); 308 bool constant_test_bit(long nr, const volatile unsigned long *addr); 14 unsigned long int find_next_bit(const unsigned long *, unsigned long, unsigned long); 42 unsigned long int find_first_bit(const unsigned long *, unsigned long); 7 __u32 __arch_swab32(__u32 val); 46 __u16 __fswab16(__u16 val); 55 __u32 __fswab32(__u32 val); 173 __u32 __swab32p(const __u32 *p); 79 __u32 __be32_to_cpup(const __be32 *p); 178 int printk(const char *, ...); 8 void ldv_dma_map_page(); 7 extern unsigned long page_offset_base; 9 extern unsigned long vmemmap_base; 23 unsigned long int __phys_addr(unsigned long); 32 void * __memcpy(void *, const void *, size_t ); 57 void * __memset(void *, int, size_t ); 27 size_t strlcpy(char *, const char *, size_t ); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 31 void _raw_spin_lock_irq(raw_spinlock_t *); 41 void _raw_spin_unlock(raw_spinlock_t *); 43 void _raw_spin_unlock_irq(raw_spinlock_t *); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 300 void spin_lock(spinlock_t *lock); 330 void spin_lock_irq(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 355 void spin_unlock_irq(spinlock_t *lock); 78 extern volatile unsigned long jiffies; 56 unsigned char readb(const volatile void *addr); 57 unsigned short int readw(const volatile void *addr); 58 unsigned int readl(const volatile void *addr); 64 void writeb(unsigned char val, volatile void *addr); 65 void writew(unsigned short val, volatile void *addr); 66 void writel(unsigned int val, volatile void *addr); 181 void * ioremap_nocache(resource_size_t , unsigned long); 192 void * ioremap(resource_size_t offset, unsigned long size); 197 void iounmap(volatile void *); 87 const char * kobject_name(const struct kobject *kobj); 139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 158 void free_irq(unsigned int, void *); 974 const char * dev_name(const struct device *dev); 1021 void * dev_get_drvdata(const struct device *dev); 1026 void dev_set_drvdata(struct device *dev, void *data); 1248 void dev_err(const struct device *, const char *, ...); 1015 int pci_enable_device(struct pci_dev *); 1032 void pci_disable_device(struct pci_dev *); 1035 void pci_set_master(struct pci_dev *); 1042 int pci_try_set_mwi(struct pci_dev *); 1088 int pci_save_state(struct pci_dev *); 1089 void pci_restore_state(struct pci_dev *); 1102 int pci_set_power_state(struct pci_dev *, pci_power_t ); 1103 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t ); 1161 int pci_request_regions(struct pci_dev *, const char *); 1163 void pci_release_regions(struct pci_dev *); 1216 int __pci_register_driver(struct pci_driver *, struct module *, const char *); 1225 void pci_unregister_driver(struct pci_driver *); 992 void * lowmem_page_address(const struct page *page); 1661 void * pci_get_drvdata(struct pci_dev *pdev); 1666 void pci_set_drvdata(struct pci_dev *pdev, void *data); 1674 const char * pci_name(const struct pci_dev *pdev); 37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool ); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 66 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int); 70 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 136 int valid_dma_direction(int dma_direction); 28 extern struct dma_map_ops *dma_ops; 30 struct dma_map_ops * get_dma_ops(struct device *dev); 42 bool arch_dma_alloc_attrs(struct device **, gfp_t *); 180 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 180 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 203 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); 315 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 327 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 456 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); 497 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 503 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); 16 void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); 31 void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); 38 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); 44 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); 79 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); 86 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); 10 void __const_udelay(unsigned long); 325 unsigned int skb_frag_size(const skb_frag_t *frag); 911 void consume_skb(struct sk_buff *); 999 int skb_pad(struct sk_buff *, int); 1198 unsigned char * skb_end_pointer(const struct sk_buff *skb); 1804 unsigned int skb_headlen(const struct sk_buff *skb); 1920 unsigned char * skb_put(struct sk_buff *, unsigned int); 2023 void skb_reserve(struct sk_buff *skb, int len); 2429 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t ); 2445 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length); 2565 struct page * skb_frag_page(const skb_frag_t *frag); 2623 void * skb_frag_address(const skb_frag_t *frag); 2783 int skb_padto(struct sk_buff *skb, unsigned int len); 3175 void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len); 31 int mii_link_ok(struct mii_if_info *); 32 int mii_nway_restart(struct mii_if_info *); 34 int mii_ethtool_get_link_ksettings(struct mii_if_info *, struct ethtool_link_ksettings *); 37 int mii_ethtool_set_link_ksettings(struct mii_if_info *, const struct ethtool_link_ksettings *); 44 int generic_mii_ioctl(struct mii_if_info *, struct mii_ioctl_data *, int, unsigned int *); 49 struct mii_ioctl_data * if_mii(struct ifreq *rq); 408 void __napi_schedule(struct napi_struct *); 411 bool napi_disable_pending(struct napi_struct *n); 425 bool napi_schedule_prep(struct napi_struct *n); 467 bool napi_complete_done(struct napi_struct *, int); 476 bool napi_complete(struct napi_struct *n); 502 void napi_disable(struct napi_struct *); 511 void napi_enable(struct napi_struct *n); 1936 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index); 2031 void * netdev_priv(const struct net_device *dev); 2062 void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int); 2434 void free_netdev(struct net_device *); 2790 void netif_tx_start_queue(struct netdev_queue *dev_queue); 2801 void netif_start_queue(struct net_device *dev); 2816 void netif_tx_wake_queue(struct netdev_queue *); 2825 void netif_wake_queue(struct net_device *dev); 2840 void netif_tx_stop_queue(struct netdev_queue *dev_queue); 2852 void netif_stop_queue(struct net_device *dev); 2859 bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue); 2870 bool netif_queue_stopped(const struct net_device *dev); 3044 bool netif_running(const struct net_device *dev); 3174 void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason ); 3196 void dev_kfree_skb_irq(struct sk_buff *skb); 3218 int netif_receive_skb(struct sk_buff *); 3339 void netif_carrier_on(struct net_device *); 3341 void netif_carrier_off(struct net_device *); 3408 void netif_device_detach(struct net_device *); 3410 void netif_device_attach(struct net_device *); 3511 void netif_trans_update(struct net_device *dev); 3667 int register_netdev(struct net_device *); 3668 void unregister_netdev(struct net_device *); 36 __be16 eth_type_trans(struct sk_buff *, struct net_device *); 48 int eth_mac_addr(struct net_device *, void *); 50 int eth_validate_addr(struct net_device *); 52 struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int); 11 u32 crc32_le(u32 , const unsigned char *, size_t ); 411 void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 42 int request_firmware(const struct firmware **, const char *, struct device *); 53 void release_firmware(const struct firmware *); 78 int intr_latency = 0; 79 int small_frames = 0; 81 int debug = 1; 82 int max_interrupt_work = 20; 83 int mtu = 0; 86 const int multicast_filter_limit = 512; 88 int enable_hw_cksum = 1; 108 int rx_copybreak = 0; 170 const char version[137U] = { '\xe', 's', 't', 'a', 'r', 'f', 'i', 'r', 'e', '.', 'c', ':', 'v', '1', '.', '0', '3', ' ', '7', '/', '2', '6', '/', '2', '0', '0', '0', ' ', ' ', 'W', 'r', 'i', 't', 't', 'e', 'n', ' ', 'b', 'y', ' ', 'D', 'o', 'n', 'a', 'l', 'd', ' ', 'B', 'e', 'c', 'k', 'e', 'r', ' ', '<', 'b', 'e', 'c', 'k', 'e', 'r', '@', 's', 'c', 'y', 'l', 'd', '.', 'c', 'o', 'm', '>', '\xa', ' ', '(', 'u', 'n', 'o', 'f', 'f', 'i', 'c', 'i', 'a', 'l', ' ', '2', '.', '2', '/', '2', '.', '4', ' ', 'k', 'e', 'r', 'n', 'e', 'l', ' ', 'p', 'o', 'r', 't', ',', ' ', 'v', 'e', 'r', 's', 'i', 'o', 'n', ' ', '2', '.', '1', ',', ' ', 'J', 'u', 'l', 'y', ' ', ' ', '6', ',', ' ', '2', '0', '0', '8', ')', '\xa', '\x0' }; 289 const struct pci_device_id starfire_pci_tbl[2U] = { { 36868U, 26901U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } }; 293 const struct pci_device_id __mod_pci__starfire_pci_tbl_device_table[2U] = { }; 299 const struct chip_info netdrv_tbl[1U] = { { "Adaptec Starfire 6915", 1 } }; 576 int mdio_read(struct net_device *dev, int phy_id, int location); 577 void mdio_write(struct net_device *dev, int phy_id, int location, int value); 578 int netdev_open(struct net_device *dev); 579 void check_duplex(struct net_device *dev); 580 void tx_timeout(struct net_device *dev); 581 void init_ring(struct net_device *dev); 582 netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 583 irqreturn_t intr_handler(int irq, void *dev_instance); 584 void netdev_error(struct net_device *dev, int intr_status); 585 int __netdev_rx(struct net_device *dev, int *quota); 586 int netdev_poll(struct napi_struct *napi, int budget); 587 void refill_rx_ring(struct net_device *dev); 589 void set_rx_mode(struct net_device *dev); 590 struct net_device_stats * get_stats(struct net_device *dev); 591 int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 592 int netdev_close(struct net_device *dev); 593 void netdev_media_change(struct net_device *dev); 594 const struct ethtool_ops ethtool_ops; 598 int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); 613 int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); 630 const struct net_device_ops netdev_ops = { 0, 0, &netdev_open, &netdev_close, &start_tx, 0, 0, 0, &set_rx_mode, ð_mac_addr, ð_validate_addr, &netdev_ioctl, 0, 0, 0, &tx_timeout, 0, 0, 0, &get_stats, &netdev_vlan_rx_add_vid, &netdev_vlan_rx_kill_vid, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 646 int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 1713 u32 set_vlan_mode(struct netdev_private *np); 1805 int check_if_running(struct net_device *dev); 1812 void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); 1820 int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd); 1830 int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd); 1842 int nway_reset(struct net_device *dev); 1848 u32 get_link(struct net_device *dev); 1854 u32 get_msglevel(struct net_device *dev); 1859 void set_msglevel(struct net_device *dev, u32 val); 1864 const struct ethtool_ops ethtool_ops = { 0, 0, &get_drvinfo, 0, 0, 0, 0, &get_msglevel, &set_msglevel, &nway_reset, &get_link, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &check_if_running, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &get_link_ksettings, &set_link_ksettings }; 1964 int starfire_suspend(struct pci_dev *pdev, pm_message_t state); 1979 int starfire_resume(struct pci_dev *pdev); 1996 void starfire_remove_one(struct pci_dev *pdev); 2020 struct pci_driver starfire_driver = { { 0, 0 }, "starfire", (const struct pci_device_id *)(&starfire_pci_tbl), &starfire_init_one, &starfire_remove_one, &starfire_suspend, 0, 0, &starfire_resume, 0, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } }; 2032 int starfire_init(); 2047 void starfire_cleanup(); 2080 void ldv_check_final_state(); 2083 void ldv_check_return_value(int); 2086 void ldv_check_return_value_probe(int); 2089 void ldv_initialize(); 2092 void ldv_handler_precall(); 2095 int nondet_int(); 2098 int LDV_IN_INTERRUPT = 0; 2101 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 7 bool ldv_is_err(const void *ptr); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 5 int LDV_DMA_MAP_CALLS = 0; 16 void ldv_dma_mapping_error(); return ; } { 2103 struct net_device *var_group1; 2104 int res_netdev_open_5; 2105 int res_netdev_close_28; 2106 struct sk_buff *var_group2; 2107 struct ifreq *var_group3; 2108 int var_netdev_ioctl_27_p2; 2109 unsigned short var_netdev_vlan_rx_add_vid_0_p1; 2110 unsigned short var_netdev_vlan_rx_add_vid_0_p2; 2111 unsigned short var_netdev_vlan_rx_kill_vid_1_p1; 2112 unsigned short var_netdev_vlan_rx_kill_vid_1_p2; 2113 struct ethtool_drvinfo *var_group4; 2114 unsigned int var_set_msglevel_26_p1; 2115 struct ethtool_link_ksettings *var_group5; 2116 const struct ethtool_link_ksettings *var_set_link_ksettings_22_p1; 2117 struct pci_dev *var_group6; 2118 const struct pci_device_id *var_starfire_init_one_2_p1; 2119 int res_starfire_init_one_2; 2120 struct pm_message var_starfire_suspend_29_p1; 2121 int var_intr_handler_10_p0; 2122 void *var_intr_handler_10_p1; 2123 int ldv_s_netdev_ops_net_device_ops; 2124 int ldv_s_starfire_driver_pci_driver; 2125 int tmp; 2126 int tmp___0; 2127 int tmp___1; 4664 ldv_s_netdev_ops_net_device_ops = 0; 4669 ldv_s_starfire_driver_pci_driver = 0; 4543 LDV_IN_INTERRUPT = 1; 4552 ldv_initialize() { /* Function call is skipped due to function is undefined */} 4661 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 2034 int tmp; 2036 printk((const char *)(&version)) { /* Function call is skipped due to function is undefined */} 2038 printk("\016starfire: polling (NAPI) enabled\n") { /* Function call is skipped due to function is undefined */} 2043 tmp = __pci_register_driver(&starfire_driver, &__this_module, "starfire") { /* Function call is skipped due to function is undefined */} } 4675 goto ldv_52857; 4675 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 4679 goto ldv_52856; 4676 ldv_52856:; 4680 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 4680 switch (tmp___0); 7263 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 1981 struct net_device *dev; 1982 void *tmp; 1983 _Bool tmp___0; { 1663 void *tmp; { 1023 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data); 1023 return __CPAchecker_TMP_0;; } 1663 return tmp;; } 1981 dev = (struct net_device *)tmp; 1983 pci_set_power_state(pdev, 0) { /* Function call is skipped due to function is undefined */} 1984 pci_restore_state(pdev) { /* Function call is skipped due to function is undefined */} { 3046 _Bool tmp; { 310 return (((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1) != 0;; } 3046 return ((int)tmp) != 0;; } { } 876 const struct firmware *fw_rx; 877 const struct firmware *fw_tx; 878 const __be32 *fw_rx_data; 879 const __be32 *fw_tx_data; 880 struct netdev_private *np; 881 void *tmp; 882 void *ioaddr; 883 int irq; 884 int i; 885 int retval; 886 unsigned long tx_size; 887 unsigned long rx_size; 888 unsigned long tx_done_q_size; 889 unsigned long rx_done_q_size; 890 unsigned long tx_ring_size; 891 unsigned long rx_ring_size; 892 __be16 *eaddrs; 893 void *setup_frm; 894 unsigned short tmp___0; 895 unsigned short tmp___1; 896 unsigned short tmp___2; 897 unsigned int tmp___3; 898 unsigned int tmp___4; 899 unsigned int tmp___5; { 2033 return ((void *)dev) + 3200U;; } 878 np = (struct netdev_private *)tmp; 879 ioaddr = np->base; 880 const int __CPAchecker_TMP_0 = (const int)(np->pci_dev->irq); 880 irq = __CPAchecker_TMP_0; 887 -request_irq((unsigned int)irq, &intr_handler, 128UL, (const char *)(&(dev->name)), (void *)dev) { 147 int tmp; 147 tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */} 147 return tmp;; } { 66 Ignored inline assembler code 67 return ;; } { 66 Ignored inline assembler code 67 return ;; } 899 unsigned long __CPAchecker_TMP_1 = (unsigned long)(np->queue_mem); 922 netif_carrier_off(dev) { /* Function call is skipped due to function is undefined */} { } 1141 struct netdev_private *np; 1142 void *tmp; 1143 int i; 1144 unsigned int tmp___0; 1145 unsigned int tmp___1; 1146 unsigned int tmp___2; 1147 unsigned int tmp___3; 1148 unsigned int tmp___4; 1149 struct sk_buff *skb; 1150 struct sk_buff *tmp___5; { 2033 return ((void *)dev) + 3200U;; } 1141 np = (struct netdev_private *)tmp; 1144 tmp___1 = 0U; 1144 np->reap_tx = tmp___1; 1144 tmp___0 = tmp___1; 1144 np->cur_tx = tmp___0; 1144 np->cur_rx = tmp___0; 1145 tmp___4 = 0U; 1145 np->tx_done = tmp___4; 1145 tmp___3 = tmp___4; 1145 np->rx_done = tmp___3; 1145 tmp___2 = tmp___3; 1145 np->dirty_tx = tmp___2; 1145 np->dirty_rx = tmp___2; 1147 unsigned int __CPAchecker_TMP_0; 1147 __CPAchecker_TMP_0 = (dev->mtu) + 32U; 1147 np->rx_buf_sz = __CPAchecker_TMP_0; 1150 i = 0; 1150 goto ldv_52538; 1152 goto ldv_52537; 1151 ldv_52537:; { 2447 struct sk_buff *tmp; 2448 tmp = __netdev_alloc_skb(dev, length, 34078752U) { /* Function call is skipped due to function is undefined */} 2448 return tmp;; } 1151 skb = tmp___5; 1152 ((np->rx_info)[i]).skb = skb; 1155 void *__CPAchecker_TMP_1 = (void *)(skb->data); 1155 size_t __CPAchecker_TMP_2 = (size_t )(np->rx_buf_sz); { 41 unsigned long long tmp; 40 struct device *__CPAchecker_TMP_0; 40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))); 40 __CPAchecker_TMP_0 = &(hwdev->dev); { 38 unsigned long long tmp; { } 184 struct dma_map_ops *ops; 185 struct dma_map_ops *tmp; 186 unsigned long long addr; 187 int tmp___0; 188 long tmp___1; 189 unsigned long tmp___2; 190 unsigned long tmp___3; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(!(tmp != 0L)); 35 assume(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0))); 36 return dma_ops;; } 185 ops = tmp; { 133 return ;; } { 138 int __CPAchecker_TMP_0; 138 assume(!(dma_direction == 0)); 138 assume(!(dma_direction == 1)); 138 assume(dma_direction == 2); __CPAchecker_TMP_0 = 1; 138 return __CPAchecker_TMP_0;; } 189 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 189 assume(!(tmp___1 != 0L)); 190 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 190 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs); 193 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 193 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */} 196 return addr;; } 40 return tmp;; } 1157 ((np->rx_ring) + ((unsigned long)i))->rxaddr = (((np->rx_info)[i]).mapping) | 1ULL; 1150 i = i + 1; 1151 ldv_52538:; 1152 goto ldv_52537; 1151 ldv_52537:; { 2447 struct sk_buff *tmp; 2448 tmp = __netdev_alloc_skb(dev, length, 34078752U) { /* Function call is skipped due to function is undefined */} 2448 return tmp;; } 1151 skb = tmp___5; 1152 ((np->rx_info)[i]).skb = skb; 1155 void *__CPAchecker_TMP_1 = (void *)(skb->data); 1155 size_t __CPAchecker_TMP_2 = (size_t )(np->rx_buf_sz); { } 41 unsigned long long tmp; 40 struct device *__CPAchecker_TMP_0; 40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))); 40 __CPAchecker_TMP_0 = &(hwdev->dev); } | Source code
1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
18 #include <asm/barrier.h>
19
20 #if BITS_PER_LONG == 32
21 # define _BITOPS_LONG_SHIFT 5
22 #elif BITS_PER_LONG == 64
23 # define _BITOPS_LONG_SHIFT 6
24 #else
25 # error "Unexpected BITS_PER_LONG"
26 #endif
27
28 #define BIT_64(n) (U64_C(1) << (n))
29
30 /*
31 * These have to be done with inline assembly: that way the bit-setting
32 * is guaranteed to be atomic. All bit operations return 0 if the bit
33 * was cleared before the operation and != 0 if it was not.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37
38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
39 /* Technically wrong, but this avoids compilation errors on some gcc
40 versions. */
41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42 #else
43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
44 #endif
45
46 #define ADDR BITOP_ADDR(addr)
47
48 /*
49 * We do the locked ops that don't return the old value as
50 * a mask operation on a byte.
51 */
52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
54 #define CONST_MASK(nr) (1 << ((nr) & 7))
55
56 /**
57 * set_bit - Atomically set a bit in memory
58 * @nr: the bit to set
59 * @addr: the address to start counting from
60 *
61 * This function is atomic and may not be reordered. See __set_bit()
62 * if you do not require the atomic guarantees.
63 *
64 * Note: there are no guarantees that this function will not be reordered
65 * on non x86 architectures, so if you are writing portable code,
66 * make sure not to rely on its reordering guarantees.
67 *
68 * Note that @nr may be almost arbitrarily large; this function is not
69 * restricted to acting on a single-word quantity.
70 */
71 static __always_inline void
72 set_bit(long nr, volatile unsigned long *addr)
73 {
74 if (IS_IMMEDIATE(nr)) {
75 asm volatile(LOCK_PREFIX "orb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" ((u8)CONST_MASK(nr))
78 : "memory");
79 } else {
80 asm volatile(LOCK_PREFIX "bts %1,%0"
81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
82 }
83 }
84
85 /**
86 * __set_bit - Set a bit in memory
87 * @nr: the bit to set
88 * @addr: the address to start counting from
89 *
90 * Unlike set_bit(), this function is non-atomic and may be reordered.
91 * If it's called on the same region of memory simultaneously, the effect
92 * may be that only one operation succeeds.
93 */
94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
95 {
96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97 }
98
99 /**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109 static __always_inline void
110 clear_bit(long nr, volatile unsigned long *addr)
111 {
112 if (IS_IMMEDIATE(nr)) {
113 asm volatile(LOCK_PREFIX "andb %1,%0"
114 : CONST_MASK_ADDR(nr, addr)
115 : "iq" ((u8)~CONST_MASK(nr)));
116 } else {
117 asm volatile(LOCK_PREFIX "btr %1,%0"
118 : BITOP_ADDR(addr)
119 : "Ir" (nr));
120 }
121 }
122
123 /*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132 {
133 barrier();
134 clear_bit(nr, addr);
135 }
136
137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
138 {
139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140 }
141
142 /*
143 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear
145 * @addr: Address to start counting from
146 *
147 * __clear_bit() is non-atomic and implies release semantics before the memory
148 * operation. It can be used for an unlock if no other CPUs can concurrently
149 * modify other bits in the word.
150 *
151 * No memory barrier is required here, because x86 cannot reorder stores past
152 * older loads. Same principle as spin_unlock.
153 */
154 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155 {
156 barrier();
157 __clear_bit(nr, addr);
158 }
159
160 /**
161 * __change_bit - Toggle a bit in memory
162 * @nr: the bit to change
163 * @addr: the address to start counting from
164 *
165 * Unlike change_bit(), this function is non-atomic and may be reordered.
166 * If it's called on the same region of memory simultaneously, the effect
167 * may be that only one operation succeeds.
168 */
169 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
170 {
171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172 }
173
174 /**
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
184 {
185 if (IS_IMMEDIATE(nr)) {
186 asm volatile(LOCK_PREFIX "xorb %1,%0"
187 : CONST_MASK_ADDR(nr, addr)
188 : "iq" ((u8)CONST_MASK(nr)));
189 } else {
190 asm volatile(LOCK_PREFIX "btc %1,%0"
191 : BITOP_ADDR(addr)
192 : "Ir" (nr));
193 }
194 }
195
196 /**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier.
203 */
204 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
205 {
206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
207 }
208
209 /**
210 * test_and_set_bit_lock - Set a bit and return its old value for lock
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This is the same as test_and_set_bit on x86.
215 */
216 static __always_inline bool
217 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218 {
219 return test_and_set_bit(nr, addr);
220 }
221
222 /**
223 * __test_and_set_bit - Set a bit and return its old value
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail. You must protect multiple accesses with a lock.
230 */
231 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
232 {
233 bool oldbit;
234
235 asm("bts %2,%1\n\t"
236 CC_SET(c)
237 : CC_OUT(c) (oldbit), ADDR
238 : "Ir" (nr));
239 return oldbit;
240 }
241
242 /**
243 * test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to clear
245 * @addr: Address to count from
246 *
247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier.
249 */
250 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
251 {
252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
253 }
254
255 /**
256 * __test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is non-atomic and can be reordered.
261 * If two examples of this operation race, one can appear to succeed
262 * but actually fail. You must protect multiple accesses with a lock.
263 *
264 * Note: the operation is performed atomically with respect to
265 * the local CPU, but not other CPUs. Portable code should not
266 * rely on this behaviour.
267 * KVM relies on this behaviour on x86 for modifying memory that is also
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c
270 */
271 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
272 {
273 bool oldbit;
274
275 asm volatile("btr %2,%1\n\t"
276 CC_SET(c)
277 : CC_OUT(c) (oldbit), ADDR
278 : "Ir" (nr));
279 return oldbit;
280 }
281
282 /* WARNING: non atomic and it can be reordered! */
283 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
284 {
285 bool oldbit;
286
287 asm volatile("btc %2,%1\n\t"
288 CC_SET(c)
289 : CC_OUT(c) (oldbit), ADDR
290 : "Ir" (nr) : "memory");
291
292 return oldbit;
293 }
294
295 /**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
302 */
303 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
304 {
305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
306 }
307
308 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
309 {
310 return ((1UL << (nr & (BITS_PER_LONG-1))) &
311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312 }
313
314 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
315 {
316 bool oldbit;
317
318 asm volatile("bt %2,%1\n\t"
319 CC_SET(c)
320 : CC_OUT(c) (oldbit)
321 : "m" (*(unsigned long *)addr), "Ir" (nr));
322
323 return oldbit;
324 }
325
326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
327 /**
328 * test_bit - Determine whether a bit is set
329 * @nr: bit number to test
330 * @addr: Address to start counting from
331 */
332 static bool test_bit(int nr, const volatile unsigned long *addr);
333 #endif
334
335 #define test_bit(nr, addr) \
336 (__builtin_constant_p((nr)) \
337 ? constant_test_bit((nr), (addr)) \
338 : variable_test_bit((nr), (addr)))
339
340 /**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346 static __always_inline unsigned long __ffs(unsigned long word)
347 {
348 asm("rep; bsf %1,%0"
349 : "=r" (word)
350 : "rm" (word));
351 return word;
352 }
353
354 /**
355 * ffz - find first zero bit in word
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360 static __always_inline unsigned long ffz(unsigned long word)
361 {
362 asm("rep; bsf %1,%0"
363 : "=r" (word)
364 : "r" (~word));
365 return word;
366 }
367
368 /*
369 * __fls: find last set bit in word
370 * @word: The word to search
371 *
372 * Undefined if no set bit exists, so code should check against 0 first.
373 */
374 static __always_inline unsigned long __fls(unsigned long word)
375 {
376 asm("bsr %1,%0"
377 : "=r" (word)
378 : "rm" (word));
379 return word;
380 }
381
382 #undef ADDR
383
384 #ifdef __KERNEL__
385 /**
386 * ffs - find first set bit in word
387 * @x: the word to search
388 *
389 * This is defined the same way as the libc and compiler builtin ffs
390 * routines, therefore differs in spirit from the other bitops.
391 *
392 * ffs(value) returns 0 if value is 0 or the position of the first
393 * set bit if value is nonzero. The first (least significant) bit
394 * is at position 1.
395 */
396 static __always_inline int ffs(int x)
397 {
398 int r;
399
400 #ifdef CONFIG_X86_64
401 /*
402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
403 * dest reg is undefined if x==0, but their CPU architect says its
404 * value is written to set it to the same as before, except that the
405 * top 32 bits will be cleared.
406 *
407 * We cannot do this on 32 bits because at the very least some
408 * 486 CPUs did not behave this way.
409 */
410 asm("bsfl %1,%0"
411 : "=r" (r)
412 : "rm" (x), "0" (-1));
413 #elif defined(CONFIG_X86_CMOV)
414 asm("bsfl %1,%0\n\t"
415 "cmovzl %2,%0"
416 : "=&r" (r) : "rm" (x), "r" (-1));
417 #else
418 asm("bsfl %1,%0\n\t"
419 "jnz 1f\n\t"
420 "movl $-1,%0\n"
421 "1:" : "=r" (r) : "rm" (x));
422 #endif
423 return r + 1;
424 }
425
426 /**
427 * fls - find last set bit in word
428 * @x: the word to search
429 *
430 * This is defined in a similar way as the libc and compiler builtin
431 * ffs, but returns the position of the most significant set bit.
432 *
433 * fls(value) returns 0 if value is 0 or the position of the last
434 * set bit if value is nonzero. The last (most significant) bit is
435 * at position 32.
436 */
437 static __always_inline int fls(int x)
438 {
439 int r;
440
441 #ifdef CONFIG_X86_64
442 /*
443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
444 * dest reg is undefined if x==0, but their CPU architect says its
445 * value is written to set it to the same as before, except that the
446 * top 32 bits will be cleared.
447 *
448 * We cannot do this on 32 bits because at the very least some
449 * 486 CPUs did not behave this way.
450 */
451 asm("bsrl %1,%0"
452 : "=r" (r)
453 : "rm" (x), "0" (-1));
454 #elif defined(CONFIG_X86_CMOV)
455 asm("bsrl %1,%0\n\t"
456 "cmovzl %2,%0"
457 : "=&r" (r) : "rm" (x), "rm" (-1));
458 #else
459 asm("bsrl %1,%0\n\t"
460 "jnz 1f\n\t"
461 "movl $-1,%0\n"
462 "1:" : "=r" (r) : "rm" (x));
463 #endif
464 return r + 1;
465 }
466
467 /**
468 * fls64 - find last set bit in a 64-bit word
469 * @x: the word to search
470 *
471 * This is defined in a similar way as the libc and compiler builtin
472 * ffsll, but returns the position of the most significant set bit.
473 *
474 * fls64(value) returns 0 if value is 0 or the position of the last
475 * set bit if value is nonzero. The last (most significant) bit is
476 * at position 64.
477 */
478 #ifdef CONFIG_X86_64
479 static __always_inline int fls64(__u64 x)
480 {
481 int bitpos = -1;
482 /*
483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
484 * dest reg is undefined if x==0, but their CPU architect says its
485 * value is written to set it to the same as before.
486 */
487 asm("bsrq %1,%q0"
488 : "+r" (bitpos)
489 : "rm" (x));
490 return bitpos + 1;
491 }
492 #else
493 #include <asm-generic/bitops/fls64.h>
494 #endif
495
496 #include <asm-generic/bitops/find.h>
497
498 #include <asm-generic/bitops/sched.h>
499
500 #include <asm/arch_hweight.h>
501
502 #include <asm-generic/bitops/const_hweight.h>
503
504 #include <asm-generic/bitops/le.h>
505
506 #include <asm-generic/bitops/ext2-atomic-setbit.h>
507
508 #endif /* __KERNEL__ */
509 #endif /* _ASM_X86_BITOPS_H */ 1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14 #include <linux/dma-contiguous.h>
15
16 #ifdef CONFIG_ISA
17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
18 #else
19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
20 #endif
21
22 #define DMA_ERROR_CODE 0
23
24 extern int iommu_merge;
25 extern struct device x86_dma_fallback_dev;
26 extern int panic_on_overflow;
27
28 extern struct dma_map_ops *dma_ops;
29
30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
31 {
32 #ifndef CONFIG_X86_DEV_DMA_OPS
33 return dma_ops;
34 #else
35 if (unlikely(!dev) || !dev->archdata.dma_ops)
36 return dma_ops;
37 else
38 return dev->archdata.dma_ops;
39 #endif
40 }
41
42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
44
45 #define HAVE_ARCH_DMA_SUPPORTED 1
46 extern int dma_supported(struct device *hwdev, u64 mask);
47
48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
49 dma_addr_t *dma_addr, gfp_t flag,
50 unsigned long attrs);
51
52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
53 void *vaddr, dma_addr_t dma_addr,
54 unsigned long attrs);
55
56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
60 #else
61
62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
63 {
64 if (!dev->dma_mask)
65 return 0;
66
67 return addr + size - 1 <= *dev->dma_mask;
68 }
69
70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
71 {
72 return paddr;
73 }
74
75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
76 {
77 return daddr;
78 }
79 #endif /* CONFIG_X86_DMA_REMAP */
80
81 static inline void
82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
83 enum dma_data_direction dir)
84 {
85 flush_write_buffers();
86 }
87
88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
89 gfp_t gfp)
90 {
91 unsigned long dma_mask = 0;
92
93 dma_mask = dev->coherent_dma_mask;
94 if (!dma_mask)
95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
96
97 return dma_mask;
98 }
99
100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
101 {
102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
103
104 if (dma_mask <= DMA_BIT_MASK(24))
105 gfp |= GFP_DMA;
106 #ifdef CONFIG_X86_64
107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
108 gfp |= GFP_DMA32;
109 #endif
110 return gfp;
111 }
112
113 #endif 1 #ifndef _ASM_X86_IO_H
2 #define _ASM_X86_IO_H
3
4 /*
5 * This file contains the definitions for the x86 IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
9 *
10 * This file is not meant to be obfuscating: it's just complicated
11 * to (a) handle it all in a way that makes gcc able to optimize it
12 * as well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
14 * mistake somewhere.
15 */
16
17 /*
18 * Thanks to James van Artsdalen for a better timing-fix than
19 * the two short jumps: using outb's to a nonexistent port seems
20 * to guarantee better timings even on fast machines.
21 *
22 * On the other hand, I'd like to be sure of a non-existent port:
23 * I feel a bit unsafe about using 0x80 (should be safe, though)
24 *
25 * Linus
26 */
27
28 /*
29 * Bit simplified and optimized by Jan Hubicka
30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
31 *
32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
33 * isa_read[wl] and isa_write[wl] fixed
34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
35 */
36
37 #define ARCH_HAS_IOREMAP_WC
38 #define ARCH_HAS_IOREMAP_WT
39
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <asm/page.h>
43 #include <asm/early_ioremap.h>
44 #include <asm/pgtable_types.h>
45
46 #define build_mmio_read(name, size, type, reg, barrier) \
47 static inline type name(const volatile void __iomem *addr) \
48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
50
51 #define build_mmio_write(name, size, type, reg, barrier) \
52 static inline void name(type val, volatile void __iomem *addr) \
53 { asm volatile("mov" size " %0,%1": :reg (val), \
54 "m" (*(volatile type __force *)addr) barrier); }
55
56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
59
60 build_mmio_read(__readb, "b", unsigned char, "=q", )
61 build_mmio_read(__readw, "w", unsigned short, "=r", )
62 build_mmio_read(__readl, "l", unsigned int, "=r", )
63
64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
67
68 build_mmio_write(__writeb, "b", unsigned char, "q", )
69 build_mmio_write(__writew, "w", unsigned short, "r", )
70 build_mmio_write(__writel, "l", unsigned int, "r", )
71
72 #define readb_relaxed(a) __readb(a)
73 #define readw_relaxed(a) __readw(a)
74 #define readl_relaxed(a) __readl(a)
75 #define __raw_readb __readb
76 #define __raw_readw __readw
77 #define __raw_readl __readl
78
79 #define writeb_relaxed(v, a) __writeb(v, a)
80 #define writew_relaxed(v, a) __writew(v, a)
81 #define writel_relaxed(v, a) __writel(v, a)
82 #define __raw_writeb __writeb
83 #define __raw_writew __writew
84 #define __raw_writel __writel
85
86 #define mmiowb() barrier()
87
88 #ifdef CONFIG_X86_64
89
90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
92
93 #define readq_relaxed(a) readq(a)
94 #define writeq_relaxed(v, a) writeq(v, a)
95
96 #define __raw_readq(a) readq(a)
97 #define __raw_writeq(val, addr) writeq(val, addr)
98
99 /* Let people know that we have them */
100 #define readq readq
101 #define writeq writeq
102
103 #endif
104
105 /**
106 * virt_to_phys - map virtual addresses to physical
107 * @address: address to remap
108 *
109 * The returned physical address is the physical (CPU) mapping for
110 * the memory address given. It is only valid to use this function on
111 * addresses directly mapped or allocated via kmalloc.
112 *
113 * This function does not give bus mappings for DMA transfers. In
114 * almost all conceivable cases a device driver should not be using
115 * this function
116 */
117
118 static inline phys_addr_t virt_to_phys(volatile void *address)
119 {
120 return __pa(address);
121 }
122
123 /**
124 * phys_to_virt - map physical address to virtual
125 * @address: address to remap
126 *
127 * The returned virtual address is a current CPU mapping for
128 * the memory address given. It is only valid to use this function on
129 * addresses that have a kernel mapping
130 *
131 * This function does not handle bus mappings for DMA transfers. In
132 * almost all conceivable cases a device driver should not be using
133 * this function
134 */
135
136 static inline void *phys_to_virt(phys_addr_t address)
137 {
138 return __va(address);
139 }
140
141 /*
142 * Change "struct page" to physical address.
143 */
144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
145
146 /*
147 * ISA I/O bus memory addresses are 1:1 with the physical address.
148 * However, we truncate the address to unsigned int to avoid undesirable
149 * promitions in legacy drivers.
150 */
151 static inline unsigned int isa_virt_to_bus(volatile void *address)
152 {
153 return (unsigned int)virt_to_phys(address);
154 }
155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
156 #define isa_bus_to_virt phys_to_virt
157
158 /*
159 * However PCI ones are not necessarily 1:1 and therefore these interfaces
160 * are forbidden in portable PCI drivers.
161 *
162 * Allow them on x86 for legacy drivers, though.
163 */
164 #define virt_to_bus virt_to_phys
165 #define bus_to_virt phys_to_virt
166
167 /**
168 * ioremap - map bus memory into CPU space
169 * @offset: bus address of the memory
170 * @size: size of the resource to map
171 *
172 * ioremap performs a platform specific sequence of operations to
173 * make bus memory CPU accessible via the readb/readw/readl/writeb/
174 * writew/writel functions and the other mmio helpers. The returned
175 * address is not guaranteed to be usable directly as a virtual
176 * address.
177 *
178 * If the area you are trying to map is a PCI BAR you should have a
179 * look at pci_iomap().
180 */
181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
183 #define ioremap_uc ioremap_uc
184
185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
187 unsigned long prot_val);
188
189 /*
190 * The default ioremap() behavior is non-cached:
191 */
192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
193 {
194 return ioremap_nocache(offset, size);
195 }
196
197 extern void iounmap(volatile void __iomem *addr);
198
199 extern void set_iounmap_nonlazy(void);
200
201 #ifdef __KERNEL__
202
203 #include <asm-generic/iomap.h>
204
205 /*
206 * Convert a virtual cached pointer to an uncached pointer
207 */
208 #define xlate_dev_kmem_ptr(p) p
209
210 static inline void
211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
212 {
213 memset((void __force *)addr, val, count);
214 }
215
216 static inline void
217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
218 {
219 memcpy(dst, (const void __force *)src, count);
220 }
221
222 static inline void
223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
224 {
225 memcpy((void __force *)dst, src, count);
226 }
227
228 /*
229 * ISA space is 'always mapped' on a typical x86 system, no need to
230 * explicitly ioremap() it. The fact that the ISA IO space is mapped
231 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
232 * are physical addresses. The following constant pointer can be
233 * used as the IO-area pointer (it can be iounmapped as well, so the
234 * analogy with PCI is quite large):
235 */
236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
237
238 /*
239 * Cache management
240 *
241 * This needed for two cases
242 * 1. Out of order aware processors
243 * 2. Accidentally out of order processors (PPro errata #51)
244 */
245
246 static inline void flush_write_buffers(void)
247 {
248 #if defined(CONFIG_X86_PPRO_FENCE)
249 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
250 #endif
251 }
252
253 #endif /* __KERNEL__ */
254
255 extern void native_io_delay(void);
256
257 extern int io_delay_type;
258 extern void io_delay_init(void);
259
260 #if defined(CONFIG_PARAVIRT)
261 #include <asm/paravirt.h>
262 #else
263
264 static inline void slow_down_io(void)
265 {
266 native_io_delay();
267 #ifdef REALLY_SLOW_IO
268 native_io_delay();
269 native_io_delay();
270 native_io_delay();
271 #endif
272 }
273
274 #endif
275
276 #define BUILDIO(bwl, bw, type) \
277 static inline void out##bwl(unsigned type value, int port) \
278 { \
279 asm volatile("out" #bwl " %" #bw "0, %w1" \
280 : : "a"(value), "Nd"(port)); \
281 } \
282 \
283 static inline unsigned type in##bwl(int port) \
284 { \
285 unsigned type value; \
286 asm volatile("in" #bwl " %w1, %" #bw "0" \
287 : "=a"(value) : "Nd"(port)); \
288 return value; \
289 } \
290 \
291 static inline void out##bwl##_p(unsigned type value, int port) \
292 { \
293 out##bwl(value, port); \
294 slow_down_io(); \
295 } \
296 \
297 static inline unsigned type in##bwl##_p(int port) \
298 { \
299 unsigned type value = in##bwl(port); \
300 slow_down_io(); \
301 return value; \
302 } \
303 \
304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
305 { \
306 asm volatile("rep; outs" #bwl \
307 : "+S"(addr), "+c"(count) : "d"(port)); \
308 } \
309 \
310 static inline void ins##bwl(int port, void *addr, unsigned long count) \
311 { \
312 asm volatile("rep; ins" #bwl \
313 : "+D"(addr), "+c"(count) : "d"(port)); \
314 }
315
316 BUILDIO(b, b, char)
317 BUILDIO(w, w, short)
318 BUILDIO(l, , int)
319
320 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
322
323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
324 enum page_cache_mode pcm);
325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
327
328 extern bool is_early_ioremap_ptep(pte_t *ptep);
329
330 #ifdef CONFIG_XEN
331 #include <xen/xen.h>
332 struct bio_vec;
333
334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
335 const struct bio_vec *vec2);
336
337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
338 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
339 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
340 #endif /* CONFIG_XEN */
341
342 #define IO_SPACE_LIMIT 0xffff
343
344 #ifdef CONFIG_MTRR
345 extern int __must_check arch_phys_wc_index(int handle);
346 #define arch_phys_wc_index arch_phys_wc_index
347
348 extern int __must_check arch_phys_wc_add(unsigned long base,
349 unsigned long size);
350 extern void arch_phys_wc_del(int handle);
351 #define arch_phys_wc_add arch_phys_wc_add
352 #endif
353
354 #ifdef CONFIG_X86_PAT
355 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
356 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
357 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
358 #endif
359
360 #endif /* _ASM_X86_IO_H */ 1
2 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 /*
4 Written 1998-2000 by Donald Becker.
5
6 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
7 send all bug reports to me, and not to Donald Becker, as this code
8 has been heavily modified from Donald's original version.
9
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
16
17 The information below comes from Donald Becker's original driver:
18
19 The author may be reached as becker@scyld.com, or C/O
20 Scyld Computing Corporation
21 410 Severn Ave., Suite 210
22 Annapolis MD 21403
23
24 Support and updates available at
25 http://www.scyld.com/network/starfire.html
26 [link no longer provides useful info -jgarzik]
27
28 */
29
30 #define DRV_NAME "starfire"
31 #define DRV_VERSION "2.1"
32 #define DRV_RELDATE "July 6, 2008"
33
34 #include <linux/interrupt.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/init.h>
41 #include <linux/delay.h>
42 #include <linux/crc32.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mm.h>
47 #include <linux/firmware.h>
48 #include <asm/processor.h> /* Processor type for cache alignment. */
49 #include <linux/uaccess.h>
50 #include <asm/io.h>
51
52 /*
53 * The current frame processor firmware fails to checksum a fragment
54 * of length 1. If and when this is fixed, the #define below can be removed.
55 */
56 #define HAS_BROKEN_FIRMWARE
57
58 /*
59 * If using the broken firmware, data must be padded to the next 32-bit boundary.
60 */
61 #ifdef HAS_BROKEN_FIRMWARE
62 #define PADDING_MASK 3
63 #endif
64
65 /*
66 * Define this if using the driver with the zero-copy patch
67 */
68 #define ZEROCOPY
69
70 #if IS_ENABLED(CONFIG_VLAN_8021Q)
71 #define VLAN_SUPPORT
72 #endif
73
74 /* The user-configurable values.
75 These may be modified when a driver module is loaded.*/
76
77 /* Used for tuning interrupt latency vs. overhead. */
78 static int intr_latency;
79 static int small_frames;
80
81 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
82 static int max_interrupt_work = 20;
83 static int mtu;
84 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
85 The Starfire has a 512 element hash table based on the Ethernet CRC. */
86 static const int multicast_filter_limit = 512;
87 /* Whether to do TCP/UDP checksums in hardware */
88 static int enable_hw_cksum = 1;
89
90 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
91 /*
92 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
93 * Setting to > 1518 effectively disables this feature.
94 *
95 * NOTE:
96 * The ia64 doesn't allow for unaligned loads even of integers being
97 * misaligned on a 2 byte boundary. Thus always force copying of
98 * packets as the starfire doesn't allow for misaligned DMAs ;-(
99 * 23/10/2000 - Jes
100 *
101 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
102 * at least, having unaligned frames leads to a rather serious performance
103 * penalty. -Ion
104 */
105 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
106 static int rx_copybreak = PKT_BUF_SZ;
107 #else
108 static int rx_copybreak /* = 0 */;
109 #endif
110
111 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
112 #ifdef __sparc__
113 #define DMA_BURST_SIZE 64
114 #else
115 #define DMA_BURST_SIZE 128
116 #endif
117
118 /* Operational parameters that are set at compile time. */
119
120 /* The "native" ring sizes are either 256 or 2048.
121 However in some modes a descriptor may be marked to wrap the ring earlier.
122 */
123 #define RX_RING_SIZE 256
124 #define TX_RING_SIZE 32
125 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
126 #define DONE_Q_SIZE 1024
127 /* All queues must be aligned on a 256-byte boundary */
128 #define QUEUE_ALIGN 256
129
130 #if RX_RING_SIZE > 256
131 #define RX_Q_ENTRIES Rx2048QEntries
132 #else
133 #define RX_Q_ENTRIES Rx256QEntries
134 #endif
135
136 /* Operational parameters that usually are not changed. */
137 /* Time in jiffies before concluding the transmitter is hung. */
138 #define TX_TIMEOUT (2 * HZ)
139
140 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
141 /* 64-bit dma_addr_t */
142 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
143 #define netdrv_addr_t __le64
144 #define cpu_to_dma(x) cpu_to_le64(x)
145 #define dma_to_cpu(x) le64_to_cpu(x)
146 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
147 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
148 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
149 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
150 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
151 #else /* 32-bit dma_addr_t */
152 #define netdrv_addr_t __le32
153 #define cpu_to_dma(x) cpu_to_le32(x)
154 #define dma_to_cpu(x) le32_to_cpu(x)
155 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
156 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
157 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
158 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
159 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
160 #endif
161
162 #define skb_first_frag_len(skb) skb_headlen(skb)
163 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
164
165 /* Firmware names */
166 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
167 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
168
169 /* These identify the driver base version and may not be removed. */
170 static const char version[] =
171 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
172 " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
173
174 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
175 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
176 MODULE_LICENSE("GPL");
177 MODULE_VERSION(DRV_VERSION);
178 MODULE_FIRMWARE(FIRMWARE_RX);
179 MODULE_FIRMWARE(FIRMWARE_TX);
180
181 module_param(max_interrupt_work, int, 0);
182 module_param(mtu, int, 0);
183 module_param(debug, int, 0);
184 module_param(rx_copybreak, int, 0);
185 module_param(intr_latency, int, 0);
186 module_param(small_frames, int, 0);
187 module_param(enable_hw_cksum, int, 0);
188 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
189 MODULE_PARM_DESC(mtu, "MTU (all boards)");
190 MODULE_PARM_DESC(debug, "Debug level (0-6)");
191 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
192 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
193 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
194 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
195
196 /*
197 Theory of Operation
198
199 I. Board Compatibility
200
201 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
202
203 II. Board-specific settings
204
205 III. Driver operation
206
207 IIIa. Ring buffers
208
209 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
210 ring sizes are set fixed by the hardware, but may optionally be wrapped
211 earlier by the END bit in the descriptor.
212 This driver uses that hardware queue size for the Rx ring, where a large
213 number of entries has no ill effect beyond increases the potential backlog.
214 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
215 disables the queue layer priority ordering and we have no mechanism to
216 utilize the hardware two-level priority queue. When modifying the
217 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
218 levels.
219
220 IIIb/c. Transmit/Receive Structure
221
222 See the Adaptec manual for the many possible structures, and options for
223 each structure. There are far too many to document all of them here.
224
225 For transmit this driver uses type 0/1 transmit descriptors (depending
226 on the 32/64 bitness of the architecture), and relies on automatic
227 minimum-length padding. It does not use the completion queue
228 consumer index, but instead checks for non-zero status entries.
229
230 For receive this driver uses type 2/3 receive descriptors. The driver
231 allocates full frame size skbuffs for the Rx ring buffers, so all frames
232 should fit in a single descriptor. The driver does not use the completion
233 queue consumer index, but instead checks for non-zero status entries.
234
235 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
236 is allocated and the frame is copied to the new skbuff. When the incoming
237 frame is larger, the skbuff is passed directly up the protocol stack.
238 Buffers consumed this way are replaced by newly allocated skbuffs in a later
239 phase of receive.
240
241 A notable aspect of operation is that unaligned buffers are not permitted by
242 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
243 isn't longword aligned, which may cause problems on some machine
244 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
245 the frame into a new skbuff unconditionally. Copied frames are put into the
246 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
247
248 IIId. Synchronization
249
250 The driver runs as two independent, single-threaded flows of control. One
251 is the send-packet routine, which enforces single-threaded use by the
252 dev->tbusy flag. The other thread is the interrupt handler, which is single
253 threaded by the hardware and interrupt handling software.
254
255 The send packet thread has partial control over the Tx ring and the netif_queue
256 status. If the number of free Tx slots in the ring falls below a certain number
257 (currently hardcoded to 4), it signals the upper layer to stop the queue.
258
259 The interrupt handler has exclusive control over the Rx ring and records stats
260 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
261 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
262 number of free Tx slow is above the threshold, it signals the upper layer to
263 restart the queue.
264
265 IV. Notes
266
267 IVb. References
268
269 The Adaptec Starfire manuals, available only from Adaptec.
270 http://www.scyld.com/expert/100mbps.html
271 http://www.scyld.com/expert/NWay.html
272
273 IVc. Errata
274
275 - StopOnPerr is broken, don't enable
276 - Hardware ethernet padding exposes random data, perform software padding
277 instead (unverified -- works correctly for all the hardware I have)
278
279 */
280
281
282
283 enum chip_capability_flags {CanHaveMII=1, };
284
285 enum chipset {
286 CH_6915 = 0,
287 };
288
289 static const struct pci_device_id starfire_pci_tbl[] = {
290 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
291 { 0, }
292 };
293 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
294
295 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
296 static const struct chip_info {
297 const char *name;
298 int drv_flags;
299 } netdrv_tbl[] = {
300 { "Adaptec Starfire 6915", CanHaveMII },
301 };
302
303
304 /* Offsets to the device registers.
305 Unlike software-only systems, device drivers interact with complex hardware.
306 It's not useful to define symbolic names for every register bit in the
307 device. The name can only partially document the semantics and make
308 the driver longer and more difficult to read.
309 In general, only the important configuration values or bits changed
310 multiple times should be defined symbolically.
311 */
312 enum register_offsets {
313 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
314 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
315 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
316 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
317 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
318 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
319 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
320 TxThreshold=0x500B0,
321 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
322 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
323 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
324 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
325 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
326 TxMode=0x55000, VlanType=0x55064,
327 PerfFilterTable=0x56000, HashTable=0x56100,
328 TxGfpMem=0x58000, RxGfpMem=0x5a000,
329 };
330
331 /*
332 * Bits in the interrupt status/mask registers.
333 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
334 * enables all the interrupt sources that are or'ed into those status bits.
335 */
336 enum intr_status_bits {
337 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
338 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
339 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
340 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
341 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
342 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
343 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
344 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
345 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
346 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
347 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
348 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
349 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
350 IntrTxGfp=0x02, IntrPCIPad=0x01,
351 /* not quite bits */
352 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
353 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
354 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
355 };
356
357 /* Bits in the RxFilterMode register. */
358 enum rx_mode_bits {
359 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
360 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
361 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
362 WakeupOnGFP=0x0800,
363 };
364
365 /* Bits in the TxMode register */
366 enum tx_mode_bits {
367 MiiSoftReset=0x8000, MIILoopback=0x4000,
368 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
369 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
370 };
371
372 /* Bits in the TxDescCtrl register. */
373 enum tx_ctrl_bits {
374 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
375 TxDescSpace128=0x30, TxDescSpace256=0x40,
376 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
377 TxDescType3=0x03, TxDescType4=0x04,
378 TxNoDMACompletion=0x08,
379 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
380 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
381 TxDMABurstSizeShift=8,
382 };
383
384 /* Bits in the RxDescQCtrl register. */
385 enum rx_ctrl_bits {
386 RxBufferLenShift=16, RxMinDescrThreshShift=0,
387 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
388 Rx2048QEntries=0x4000, Rx256QEntries=0,
389 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
390 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
391 RxDescSpace4=0x000, RxDescSpace8=0x100,
392 RxDescSpace16=0x200, RxDescSpace32=0x300,
393 RxDescSpace64=0x400, RxDescSpace128=0x500,
394 RxConsumerWrEn=0x80,
395 };
396
397 /* Bits in the RxDMACtrl register. */
398 enum rx_dmactrl_bits {
399 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
400 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
401 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
402 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
403 RxChecksumRejectTCPOnly=0x01000000,
404 RxCompletionQ2Enable=0x800000,
405 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
406 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
407 RxDMAQ2NonIP=0x400000,
408 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
409 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
410 RxBurstSizeShift=0,
411 };
412
413 /* Bits in the RxCompletionAddr register */
414 enum rx_compl_bits {
415 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
416 RxComplProducerWrEn=0x40,
417 RxComplType0=0x00, RxComplType1=0x10,
418 RxComplType2=0x20, RxComplType3=0x30,
419 RxComplThreshShift=0,
420 };
421
422 /* Bits in the TxCompletionAddr register */
423 enum tx_compl_bits {
424 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
425 TxComplProducerWrEn=0x40,
426 TxComplIntrStatus=0x20,
427 CommonQueueMode=0x10,
428 TxComplThreshShift=0,
429 };
430
431 /* Bits in the GenCtrl register */
432 enum gen_ctrl_bits {
433 RxEnable=0x05, TxEnable=0x0a,
434 RxGFPEnable=0x10, TxGFPEnable=0x20,
435 };
436
437 /* Bits in the IntrTimerCtrl register */
438 enum intr_ctrl_bits {
439 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
440 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
441 IntrLatencyMask=0x1f,
442 };
443
444 /* The Rx and Tx buffer descriptors. */
445 struct starfire_rx_desc {
446 netdrv_addr_t rxaddr;
447 };
448 enum rx_desc_bits {
449 RxDescValid=1, RxDescEndRing=2,
450 };
451
452 /* Completion queue entry. */
453 struct short_rx_done_desc {
454 __le32 status; /* Low 16 bits is length. */
455 };
456 struct basic_rx_done_desc {
457 __le32 status; /* Low 16 bits is length. */
458 __le16 vlanid;
459 __le16 status2;
460 };
461 struct csum_rx_done_desc {
462 __le32 status; /* Low 16 bits is length. */
463 __le16 csum; /* Partial checksum */
464 __le16 status2;
465 };
466 struct full_rx_done_desc {
467 __le32 status; /* Low 16 bits is length. */
468 __le16 status3;
469 __le16 status2;
470 __le16 vlanid;
471 __le16 csum; /* partial checksum */
472 __le32 timestamp;
473 };
474 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
475 #ifdef VLAN_SUPPORT
476 typedef struct full_rx_done_desc rx_done_desc;
477 #define RxComplType RxComplType3
478 #else /* not VLAN_SUPPORT */
479 typedef struct csum_rx_done_desc rx_done_desc;
480 #define RxComplType RxComplType2
481 #endif /* not VLAN_SUPPORT */
482
483 enum rx_done_bits {
484 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
485 };
486
487 /* Type 1 Tx descriptor. */
488 struct starfire_tx_desc_1 {
489 __le32 status; /* Upper bits are status, lower 16 length. */
490 __le32 addr;
491 };
492
493 /* Type 2 Tx descriptor. */
494 struct starfire_tx_desc_2 {
495 __le32 status; /* Upper bits are status, lower 16 length. */
496 __le32 reserved;
497 __le64 addr;
498 };
499
500 #ifdef ADDR_64BITS
501 typedef struct starfire_tx_desc_2 starfire_tx_desc;
502 #define TX_DESC_TYPE TxDescType2
503 #else /* not ADDR_64BITS */
504 typedef struct starfire_tx_desc_1 starfire_tx_desc;
505 #define TX_DESC_TYPE TxDescType1
506 #endif /* not ADDR_64BITS */
507 #define TX_DESC_SPACING TxDescSpaceUnlim
508
509 enum tx_desc_bits {
510 TxDescID=0xB0000000,
511 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
512 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
513 };
514 struct tx_done_desc {
515 __le32 status; /* timestamp, index. */
516 #if 0
517 __le32 intrstatus; /* interrupt status */
518 #endif
519 };
520
521 struct rx_ring_info {
522 struct sk_buff *skb;
523 dma_addr_t mapping;
524 };
525 struct tx_ring_info {
526 struct sk_buff *skb;
527 dma_addr_t mapping;
528 unsigned int used_slots;
529 };
530
531 #define PHY_CNT 2
532 struct netdev_private {
533 /* Descriptor rings first for alignment. */
534 struct starfire_rx_desc *rx_ring;
535 starfire_tx_desc *tx_ring;
536 dma_addr_t rx_ring_dma;
537 dma_addr_t tx_ring_dma;
538 /* The addresses of rx/tx-in-place skbuffs. */
539 struct rx_ring_info rx_info[RX_RING_SIZE];
540 struct tx_ring_info tx_info[TX_RING_SIZE];
541 /* Pointers to completion queues (full pages). */
542 rx_done_desc *rx_done_q;
543 dma_addr_t rx_done_q_dma;
544 unsigned int rx_done;
545 struct tx_done_desc *tx_done_q;
546 dma_addr_t tx_done_q_dma;
547 unsigned int tx_done;
548 struct napi_struct napi;
549 struct net_device *dev;
550 struct pci_dev *pci_dev;
551 #ifdef VLAN_SUPPORT
552 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
553 #endif
554 void *queue_mem;
555 dma_addr_t queue_mem_dma;
556 size_t queue_mem_size;
557
558 /* Frequently used values: keep some adjacent for cache effect. */
559 spinlock_t lock;
560 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
561 unsigned int cur_tx, dirty_tx, reap_tx;
562 unsigned int rx_buf_sz; /* Based on MTU+slack. */
563 /* These values keep track of the transceiver/media in use. */
564 int speed100; /* Set if speed == 100MBit. */
565 u32 tx_mode;
566 u32 intr_timer_ctrl;
567 u8 tx_threshold;
568 /* MII transceiver section. */
569 struct mii_if_info mii_if; /* MII lib hooks/info */
570 int phy_cnt; /* MII device addresses. */
571 unsigned char phys[PHY_CNT]; /* MII device addresses. */
572 void __iomem *base;
573 };
574
575
576 static int mdio_read(struct net_device *dev, int phy_id, int location);
577 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
578 static int netdev_open(struct net_device *dev);
579 static void check_duplex(struct net_device *dev);
580 static void tx_timeout(struct net_device *dev);
581 static void init_ring(struct net_device *dev);
582 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
583 static irqreturn_t intr_handler(int irq, void *dev_instance);
584 static void netdev_error(struct net_device *dev, int intr_status);
585 static int __netdev_rx(struct net_device *dev, int *quota);
586 static int netdev_poll(struct napi_struct *napi, int budget);
587 static void refill_rx_ring(struct net_device *dev);
588 static void netdev_error(struct net_device *dev, int intr_status);
589 static void set_rx_mode(struct net_device *dev);
590 static struct net_device_stats *get_stats(struct net_device *dev);
591 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
592 static int netdev_close(struct net_device *dev);
593 static void netdev_media_change(struct net_device *dev);
594 static const struct ethtool_ops ethtool_ops;
595
596
597 #ifdef VLAN_SUPPORT
598 static int netdev_vlan_rx_add_vid(struct net_device *dev,
599 __be16 proto, u16 vid)
600 {
601 struct netdev_private *np = netdev_priv(dev);
602
603 spin_lock(&np->lock);
604 if (debug > 1)
605 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
606 set_bit(vid, np->active_vlans);
607 set_rx_mode(dev);
608 spin_unlock(&np->lock);
609
610 return 0;
611 }
612
613 static int netdev_vlan_rx_kill_vid(struct net_device *dev,
614 __be16 proto, u16 vid)
615 {
616 struct netdev_private *np = netdev_priv(dev);
617
618 spin_lock(&np->lock);
619 if (debug > 1)
620 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
621 clear_bit(vid, np->active_vlans);
622 set_rx_mode(dev);
623 spin_unlock(&np->lock);
624
625 return 0;
626 }
627 #endif /* VLAN_SUPPORT */
628
629
630 static const struct net_device_ops netdev_ops = {
631 .ndo_open = netdev_open,
632 .ndo_stop = netdev_close,
633 .ndo_start_xmit = start_tx,
634 .ndo_tx_timeout = tx_timeout,
635 .ndo_get_stats = get_stats,
636 .ndo_set_rx_mode = set_rx_mode,
637 .ndo_do_ioctl = netdev_ioctl,
638 .ndo_set_mac_address = eth_mac_addr,
639 .ndo_validate_addr = eth_validate_addr,
640 #ifdef VLAN_SUPPORT
641 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
642 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
643 #endif
644 };
645
646 static int starfire_init_one(struct pci_dev *pdev,
647 const struct pci_device_id *ent)
648 {
649 struct device *d = &pdev->dev;
650 struct netdev_private *np;
651 int i, irq, chip_idx = ent->driver_data;
652 struct net_device *dev;
653 long ioaddr;
654 void __iomem *base;
655 int drv_flags, io_size;
656 int boguscnt;
657
658 /* when built into the kernel, we only print version if device is found */
659 #ifndef MODULE
660 static int printed_version;
661 if (!printed_version++)
662 printk(version);
663 #endif
664
665 if (pci_enable_device (pdev))
666 return -EIO;
667
668 ioaddr = pci_resource_start(pdev, 0);
669 io_size = pci_resource_len(pdev, 0);
670 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
671 dev_err(d, "no PCI MEM resources, aborting\n");
672 return -ENODEV;
673 }
674
675 dev = alloc_etherdev(sizeof(*np));
676 if (!dev)
677 return -ENOMEM;
678
679 SET_NETDEV_DEV(dev, &pdev->dev);
680
681 irq = pdev->irq;
682
683 if (pci_request_regions (pdev, DRV_NAME)) {
684 dev_err(d, "cannot reserve PCI resources, aborting\n");
685 goto err_out_free_netdev;
686 }
687
688 base = ioremap(ioaddr, io_size);
689 if (!base) {
690 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
691 io_size, ioaddr);
692 goto err_out_free_res;
693 }
694
695 pci_set_master(pdev);
696
697 /* enable MWI -- it vastly improves Rx performance on sparc64 */
698 pci_try_set_mwi(pdev);
699
700 #ifdef ZEROCOPY
701 /* Starfire can do TCP/UDP checksumming */
702 if (enable_hw_cksum)
703 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
704 #endif /* ZEROCOPY */
705
706 #ifdef VLAN_SUPPORT
707 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
708 #endif /* VLAN_RX_KILL_VID */
709 #ifdef ADDR_64BITS
710 dev->features |= NETIF_F_HIGHDMA;
711 #endif /* ADDR_64BITS */
712
713 /* Serial EEPROM reads are hidden by the hardware. */
714 for (i = 0; i < 6; i++)
715 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
716
717 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
718 if (debug > 4)
719 for (i = 0; i < 0x20; i++)
720 printk("%2.2x%s",
721 (unsigned int)readb(base + EEPROMCtrl + i),
722 i % 16 != 15 ? " " : "\n");
723 #endif
724
725 /* Issue soft reset */
726 writel(MiiSoftReset, base + TxMode);
727 udelay(1000);
728 writel(0, base + TxMode);
729
730 /* Reset the chip to erase previous misconfiguration. */
731 writel(1, base + PCIDeviceConfig);
732 boguscnt = 1000;
733 while (--boguscnt > 0) {
734 udelay(10);
735 if ((readl(base + PCIDeviceConfig) & 1) == 0)
736 break;
737 }
738 if (boguscnt == 0)
739 printk("%s: chipset reset never completed!\n", dev->name);
740 /* wait a little longer */
741 udelay(1000);
742
743 np = netdev_priv(dev);
744 np->dev = dev;
745 np->base = base;
746 spin_lock_init(&np->lock);
747 pci_set_drvdata(pdev, dev);
748
749 np->pci_dev = pdev;
750
751 np->mii_if.dev = dev;
752 np->mii_if.mdio_read = mdio_read;
753 np->mii_if.mdio_write = mdio_write;
754 np->mii_if.phy_id_mask = 0x1f;
755 np->mii_if.reg_num_mask = 0x1f;
756
757 drv_flags = netdrv_tbl[chip_idx].drv_flags;
758
759 np->speed100 = 1;
760
761 /* timer resolution is 128 * 0.8us */
762 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
763 Timer10X | EnableIntrMasking;
764
765 if (small_frames > 0) {
766 np->intr_timer_ctrl |= SmallFrameBypass;
767 switch (small_frames) {
768 case 1 ... 64:
769 np->intr_timer_ctrl |= SmallFrame64;
770 break;
771 case 65 ... 128:
772 np->intr_timer_ctrl |= SmallFrame128;
773 break;
774 case 129 ... 256:
775 np->intr_timer_ctrl |= SmallFrame256;
776 break;
777 default:
778 np->intr_timer_ctrl |= SmallFrame512;
779 if (small_frames > 512)
780 printk("Adjusting small_frames down to 512\n");
781 break;
782 }
783 }
784
785 dev->netdev_ops = &netdev_ops;
786 dev->watchdog_timeo = TX_TIMEOUT;
787 dev->ethtool_ops = ðtool_ops;
788
789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
790
791 if (mtu)
792 dev->mtu = mtu;
793
794 if (register_netdev(dev))
795 goto err_out_cleardev;
796
797 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
798 dev->name, netdrv_tbl[chip_idx].name, base,
799 dev->dev_addr, irq);
800
801 if (drv_flags & CanHaveMII) {
802 int phy, phy_idx = 0;
803 int mii_status;
804 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
805 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
806 mdelay(100);
807 boguscnt = 1000;
808 while (--boguscnt > 0)
809 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
810 break;
811 if (boguscnt == 0) {
812 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
813 continue;
814 }
815 mii_status = mdio_read(dev, phy, MII_BMSR);
816 if (mii_status != 0) {
817 np->phys[phy_idx++] = phy;
818 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
819 printk(KERN_INFO "%s: MII PHY found at address %d, status "
820 "%#4.4x advertising %#4.4x.\n",
821 dev->name, phy, mii_status, np->mii_if.advertising);
822 /* there can be only one PHY on-board */
823 break;
824 }
825 }
826 np->phy_cnt = phy_idx;
827 if (np->phy_cnt > 0)
828 np->mii_if.phy_id = np->phys[0];
829 else
830 memset(&np->mii_if, 0, sizeof(np->mii_if));
831 }
832
833 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
834 dev->name, enable_hw_cksum ? "enabled" : "disabled");
835 return 0;
836
837 err_out_cleardev:
838 iounmap(base);
839 err_out_free_res:
840 pci_release_regions (pdev);
841 err_out_free_netdev:
842 free_netdev(dev);
843 return -ENODEV;
844 }
845
846
847 /* Read the MII Management Data I/O (MDIO) interfaces. */
848 static int mdio_read(struct net_device *dev, int phy_id, int location)
849 {
850 struct netdev_private *np = netdev_priv(dev);
851 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
852 int result, boguscnt=1000;
853 /* ??? Should we add a busy-wait here? */
854 do {
855 result = readl(mdio_addr);
856 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
857 if (boguscnt == 0)
858 return 0;
859 if ((result & 0xffff) == 0xffff)
860 return 0;
861 return result & 0xffff;
862 }
863
864
865 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
866 {
867 struct netdev_private *np = netdev_priv(dev);
868 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
869 writel(value, mdio_addr);
870 /* The busy-wait will occur before a read. */
871 }
872
873
874 static int netdev_open(struct net_device *dev)
875 {
876 const struct firmware *fw_rx, *fw_tx;
877 const __be32 *fw_rx_data, *fw_tx_data;
878 struct netdev_private *np = netdev_priv(dev);
879 void __iomem *ioaddr = np->base;
880 const int irq = np->pci_dev->irq;
881 int i, retval;
882 size_t tx_size, rx_size;
883 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
884
885 /* Do we ever need to reset the chip??? */
886
887 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
888 if (retval)
889 return retval;
890
891 /* Disable the Rx and Tx, and reset the chip. */
892 writel(0, ioaddr + GenCtrl);
893 writel(1, ioaddr + PCIDeviceConfig);
894 if (debug > 1)
895 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
896 dev->name, irq);
897
898 /* Allocate the various queues. */
899 if (!np->queue_mem) {
900 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
901 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
902 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
903 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
904 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
905 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
906 if (np->queue_mem == NULL) {
907 free_irq(irq, dev);
908 return -ENOMEM;
909 }
910
911 np->tx_done_q = np->queue_mem;
912 np->tx_done_q_dma = np->queue_mem_dma;
913 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
914 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
915 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
916 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
917 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
918 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
919 }
920
921 /* Start with no carrier, it gets adjusted later */
922 netif_carrier_off(dev);
923 init_ring(dev);
924 /* Set the size of the Rx buffers. */
925 writel((np->rx_buf_sz << RxBufferLenShift) |
926 (0 << RxMinDescrThreshShift) |
927 RxPrefetchMode | RxVariableQ |
928 RX_Q_ENTRIES |
929 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
930 RxDescSpace4,
931 ioaddr + RxDescQCtrl);
932
933 /* Set up the Rx DMA controller. */
934 writel(RxChecksumIgnore |
935 (0 << RxEarlyIntThreshShift) |
936 (6 << RxHighPrioThreshShift) |
937 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
938 ioaddr + RxDMACtrl);
939
940 /* Set Tx descriptor */
941 writel((2 << TxHiPriFIFOThreshShift) |
942 (0 << TxPadLenShift) |
943 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
944 TX_DESC_Q_ADDR_SIZE |
945 TX_DESC_SPACING | TX_DESC_TYPE,
946 ioaddr + TxDescCtrl);
947
948 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
949 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
950 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
951 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
952 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
953
954 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
955 writel(np->rx_done_q_dma |
956 RxComplType |
957 (0 << RxComplThreshShift),
958 ioaddr + RxCompletionAddr);
959
960 if (debug > 1)
961 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
962
963 /* Fill both the Tx SA register and the Rx perfect filter. */
964 for (i = 0; i < 6; i++)
965 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
966 /* The first entry is special because it bypasses the VLAN filter.
967 Don't use it. */
968 writew(0, ioaddr + PerfFilterTable);
969 writew(0, ioaddr + PerfFilterTable + 4);
970 writew(0, ioaddr + PerfFilterTable + 8);
971 for (i = 1; i < 16; i++) {
972 __be16 *eaddrs = (__be16 *)dev->dev_addr;
973 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
974 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
975 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
976 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
977 }
978
979 /* Initialize other registers. */
980 /* Configure the PCI bus bursts and FIFO thresholds. */
981 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
982 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
983 udelay(1000);
984 writel(np->tx_mode, ioaddr + TxMode);
985 np->tx_threshold = 4;
986 writel(np->tx_threshold, ioaddr + TxThreshold);
987
988 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
989
990 napi_enable(&np->napi);
991
992 netif_start_queue(dev);
993
994 if (debug > 1)
995 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
996 set_rx_mode(dev);
997
998 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
999 check_duplex(dev);
1000
1001 /* Enable GPIO interrupts on link change */
1002 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1003
1004 /* Set the interrupt mask */
1005 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1006 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1007 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1008 ioaddr + IntrEnable);
1009 /* Enable PCI interrupts. */
1010 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1011 ioaddr + PCIDeviceConfig);
1012
1013 #ifdef VLAN_SUPPORT
1014 /* Set VLAN type to 802.1q */
1015 writel(ETH_P_8021Q, ioaddr + VlanType);
1016 #endif /* VLAN_SUPPORT */
1017
1018 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1019 if (retval) {
1020 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1021 FIRMWARE_RX);
1022 goto out_init;
1023 }
1024 if (fw_rx->size % 4) {
1025 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1026 fw_rx->size, FIRMWARE_RX);
1027 retval = -EINVAL;
1028 goto out_rx;
1029 }
1030 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1031 if (retval) {
1032 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1033 FIRMWARE_TX);
1034 goto out_rx;
1035 }
1036 if (fw_tx->size % 4) {
1037 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1038 fw_tx->size, FIRMWARE_TX);
1039 retval = -EINVAL;
1040 goto out_tx;
1041 }
1042 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1043 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1044 rx_size = fw_rx->size / 4;
1045 tx_size = fw_tx->size / 4;
1046
1047 /* Load Rx/Tx firmware into the frame processors */
1048 for (i = 0; i < rx_size; i++)
1049 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1050 for (i = 0; i < tx_size; i++)
1051 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1052 if (enable_hw_cksum)
1053 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1054 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1055 else
1056 /* Enable the Rx and Tx units only. */
1057 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1058
1059 if (debug > 1)
1060 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1061 dev->name);
1062
1063 out_tx:
1064 release_firmware(fw_tx);
1065 out_rx:
1066 release_firmware(fw_rx);
1067 out_init:
1068 if (retval)
1069 netdev_close(dev);
1070 return retval;
1071 }
1072
1073
1074 static void check_duplex(struct net_device *dev)
1075 {
1076 struct netdev_private *np = netdev_priv(dev);
1077 u16 reg0;
1078 int silly_count = 1000;
1079
1080 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1081 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1082 udelay(500);
1083 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1084 /* do nothing */;
1085 if (!silly_count) {
1086 printk("%s: MII reset failed!\n", dev->name);
1087 return;
1088 }
1089
1090 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1091
1092 if (!np->mii_if.force_media) {
1093 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1094 } else {
1095 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1096 if (np->speed100)
1097 reg0 |= BMCR_SPEED100;
1098 if (np->mii_if.full_duplex)
1099 reg0 |= BMCR_FULLDPLX;
1100 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1101 dev->name,
1102 np->speed100 ? "100" : "10",
1103 np->mii_if.full_duplex ? "full" : "half");
1104 }
1105 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1106 }
1107
1108
1109 static void tx_timeout(struct net_device *dev)
1110 {
1111 struct netdev_private *np = netdev_priv(dev);
1112 void __iomem *ioaddr = np->base;
1113 int old_debug;
1114
1115 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1116 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1117
1118 /* Perhaps we should reinitialize the hardware here. */
1119
1120 /*
1121 * Stop and restart the interface.
1122 * Cheat and increase the debug level temporarily.
1123 */
1124 old_debug = debug;
1125 debug = 2;
1126 netdev_close(dev);
1127 netdev_open(dev);
1128 debug = old_debug;
1129
1130 /* Trigger an immediate transmit demand. */
1131
1132 netif_trans_update(dev); /* prevent tx timeout */
1133 dev->stats.tx_errors++;
1134 netif_wake_queue(dev);
1135 }
1136
1137
1138 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1139 static void init_ring(struct net_device *dev)
1140 {
1141 struct netdev_private *np = netdev_priv(dev);
1142 int i;
1143
1144 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1145 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1146
1147 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1148
1149 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1150 for (i = 0; i < RX_RING_SIZE; i++) {
1151 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1152 np->rx_info[i].skb = skb;
1153 if (skb == NULL)
1154 break;
1155 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1156 /* Grrr, we cannot offset to correctly align the IP header. */
1157 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1158 }
1159 writew(i - 1, np->base + RxDescQIdx);
1160 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1161
1162 /* Clear the remainder of the Rx buffer ring. */
1163 for ( ; i < RX_RING_SIZE; i++) {
1164 np->rx_ring[i].rxaddr = 0;
1165 np->rx_info[i].skb = NULL;
1166 np->rx_info[i].mapping = 0;
1167 }
1168 /* Mark the last entry as wrapping the ring. */
1169 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1170
1171 /* Clear the completion rings. */
1172 for (i = 0; i < DONE_Q_SIZE; i++) {
1173 np->rx_done_q[i].status = 0;
1174 np->tx_done_q[i].status = 0;
1175 }
1176
1177 for (i = 0; i < TX_RING_SIZE; i++)
1178 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1179 }
1180
1181
1182 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1183 {
1184 struct netdev_private *np = netdev_priv(dev);
1185 unsigned int entry;
1186 u32 status;
1187 int i;
1188
1189 /*
1190 * be cautious here, wrapping the queue has weird semantics
1191 * and we may not have enough slots even when it seems we do.
1192 */
1193 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1194 netif_stop_queue(dev);
1195 return NETDEV_TX_BUSY;
1196 }
1197
1198 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1199 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1200 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1201 return NETDEV_TX_OK;
1202 }
1203 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1204
1205 entry = np->cur_tx % TX_RING_SIZE;
1206 for (i = 0; i < skb_num_frags(skb); i++) {
1207 int wrap_ring = 0;
1208 status = TxDescID;
1209
1210 if (i == 0) {
1211 np->tx_info[entry].skb = skb;
1212 status |= TxCRCEn;
1213 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1214 status |= TxRingWrap;
1215 wrap_ring = 1;
1216 }
1217 if (np->reap_tx) {
1218 status |= TxDescIntr;
1219 np->reap_tx = 0;
1220 }
1221 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1222 status |= TxCalTCP;
1223 dev->stats.tx_compressed++;
1224 }
1225 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1226
1227 np->tx_info[entry].mapping =
1228 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1229 } else {
1230 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1231 status |= skb_frag_size(this_frag);
1232 np->tx_info[entry].mapping =
1233 pci_map_single(np->pci_dev,
1234 skb_frag_address(this_frag),
1235 skb_frag_size(this_frag),
1236 PCI_DMA_TODEVICE);
1237 }
1238
1239 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1240 np->tx_ring[entry].status = cpu_to_le32(status);
1241 if (debug > 3)
1242 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1243 dev->name, np->cur_tx, np->dirty_tx,
1244 entry, status);
1245 if (wrap_ring) {
1246 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1247 np->cur_tx += np->tx_info[entry].used_slots;
1248 entry = 0;
1249 } else {
1250 np->tx_info[entry].used_slots = 1;
1251 np->cur_tx += np->tx_info[entry].used_slots;
1252 entry++;
1253 }
1254 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1255 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1256 np->reap_tx = 1;
1257 }
1258
1259 /* Non-x86: explicitly flush descriptor cache lines here. */
1260 /* Ensure all descriptors are written back before the transmit is
1261 initiated. - Jes */
1262 wmb();
1263
1264 /* Update the producer index. */
1265 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1266
1267 /* 4 is arbitrary, but should be ok */
1268 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1269 netif_stop_queue(dev);
1270
1271 return NETDEV_TX_OK;
1272 }
1273
1274
1275 /* The interrupt handler does all of the Rx thread work and cleans up
1276 after the Tx thread. */
1277 static irqreturn_t intr_handler(int irq, void *dev_instance)
1278 {
1279 struct net_device *dev = dev_instance;
1280 struct netdev_private *np = netdev_priv(dev);
1281 void __iomem *ioaddr = np->base;
1282 int boguscnt = max_interrupt_work;
1283 int consumer;
1284 int tx_status;
1285 int handled = 0;
1286
1287 do {
1288 u32 intr_status = readl(ioaddr + IntrClear);
1289
1290 if (debug > 4)
1291 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1292 dev->name, intr_status);
1293
1294 if (intr_status == 0 || intr_status == (u32) -1)
1295 break;
1296
1297 handled = 1;
1298
1299 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1300 u32 enable;
1301
1302 if (likely(napi_schedule_prep(&np->napi))) {
1303 __napi_schedule(&np->napi);
1304 enable = readl(ioaddr + IntrEnable);
1305 enable &= ~(IntrRxDone | IntrRxEmpty);
1306 writel(enable, ioaddr + IntrEnable);
1307 /* flush PCI posting buffers */
1308 readl(ioaddr + IntrEnable);
1309 } else {
1310 /* Paranoia check */
1311 enable = readl(ioaddr + IntrEnable);
1312 if (enable & (IntrRxDone | IntrRxEmpty)) {
1313 printk(KERN_INFO
1314 "%s: interrupt while in poll!\n",
1315 dev->name);
1316 enable &= ~(IntrRxDone | IntrRxEmpty);
1317 writel(enable, ioaddr + IntrEnable);
1318 }
1319 }
1320 }
1321
1322 /* Scavenge the skbuff list based on the Tx-done queue.
1323 There are redundant checks here that may be cleaned up
1324 after the driver has proven to be reliable. */
1325 consumer = readl(ioaddr + TxConsumerIdx);
1326 if (debug > 3)
1327 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1328 dev->name, consumer);
1329
1330 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1331 if (debug > 3)
1332 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1333 dev->name, np->dirty_tx, np->tx_done, tx_status);
1334 if ((tx_status & 0xe0000000) == 0xa0000000) {
1335 dev->stats.tx_packets++;
1336 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1337 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1338 struct sk_buff *skb = np->tx_info[entry].skb;
1339 np->tx_info[entry].skb = NULL;
1340 pci_unmap_single(np->pci_dev,
1341 np->tx_info[entry].mapping,
1342 skb_first_frag_len(skb),
1343 PCI_DMA_TODEVICE);
1344 np->tx_info[entry].mapping = 0;
1345 np->dirty_tx += np->tx_info[entry].used_slots;
1346 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1347 {
1348 int i;
1349 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1350 pci_unmap_single(np->pci_dev,
1351 np->tx_info[entry].mapping,
1352 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1353 PCI_DMA_TODEVICE);
1354 np->dirty_tx++;
1355 entry++;
1356 }
1357 }
1358
1359 dev_kfree_skb_irq(skb);
1360 }
1361 np->tx_done_q[np->tx_done].status = 0;
1362 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1363 }
1364 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1365
1366 if (netif_queue_stopped(dev) &&
1367 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1368 /* The ring is no longer full, wake the queue. */
1369 netif_wake_queue(dev);
1370 }
1371
1372 /* Stats overflow */
1373 if (intr_status & IntrStatsMax)
1374 get_stats(dev);
1375
1376 /* Media change interrupt. */
1377 if (intr_status & IntrLinkChange)
1378 netdev_media_change(dev);
1379
1380 /* Abnormal error summary/uncommon events handlers. */
1381 if (intr_status & IntrAbnormalSummary)
1382 netdev_error(dev, intr_status);
1383
1384 if (--boguscnt < 0) {
1385 if (debug > 1)
1386 printk(KERN_WARNING "%s: Too much work at interrupt, "
1387 "status=%#8.8x.\n",
1388 dev->name, intr_status);
1389 break;
1390 }
1391 } while (1);
1392
1393 if (debug > 4)
1394 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1395 dev->name, (int) readl(ioaddr + IntrStatus));
1396 return IRQ_RETVAL(handled);
1397 }
1398
1399
1400 /*
1401 * This routine is logically part of the interrupt/poll handler, but separated
1402 * for clarity and better register allocation.
1403 */
1404 static int __netdev_rx(struct net_device *dev, int *quota)
1405 {
1406 struct netdev_private *np = netdev_priv(dev);
1407 u32 desc_status;
1408 int retcode = 0;
1409
1410 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1411 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1412 struct sk_buff *skb;
1413 u16 pkt_len;
1414 int entry;
1415 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1416
1417 if (debug > 4)
1418 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1419 if (!(desc_status & RxOK)) {
1420 /* There was an error. */
1421 if (debug > 2)
1422 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1423 dev->stats.rx_errors++;
1424 if (desc_status & RxFIFOErr)
1425 dev->stats.rx_fifo_errors++;
1426 goto next_rx;
1427 }
1428
1429 if (*quota <= 0) { /* out of rx quota */
1430 retcode = 1;
1431 goto out;
1432 }
1433 (*quota)--;
1434
1435 pkt_len = desc_status; /* Implicitly Truncate */
1436 entry = (desc_status >> 16) & 0x7ff;
1437
1438 if (debug > 4)
1439 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1440 /* Check if the packet is long enough to accept without copying
1441 to a minimally-sized skbuff. */
1442 if (pkt_len < rx_copybreak &&
1443 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1444 skb_reserve(skb, 2); /* 16 byte align the IP header */
1445 pci_dma_sync_single_for_cpu(np->pci_dev,
1446 np->rx_info[entry].mapping,
1447 pkt_len, PCI_DMA_FROMDEVICE);
1448 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1449 pci_dma_sync_single_for_device(np->pci_dev,
1450 np->rx_info[entry].mapping,
1451 pkt_len, PCI_DMA_FROMDEVICE);
1452 skb_put(skb, pkt_len);
1453 } else {
1454 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1455 skb = np->rx_info[entry].skb;
1456 skb_put(skb, pkt_len);
1457 np->rx_info[entry].skb = NULL;
1458 np->rx_info[entry].mapping = 0;
1459 }
1460 #ifndef final_version /* Remove after testing. */
1461 /* You will want this info for the initial debug. */
1462 if (debug > 5) {
1463 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1464 skb->data, skb->data + 6,
1465 skb->data[12], skb->data[13]);
1466 }
1467 #endif
1468
1469 skb->protocol = eth_type_trans(skb, dev);
1470 #ifdef VLAN_SUPPORT
1471 if (debug > 4)
1472 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1473 #endif
1474 if (le16_to_cpu(desc->status2) & 0x0100) {
1475 skb->ip_summed = CHECKSUM_UNNECESSARY;
1476 dev->stats.rx_compressed++;
1477 }
1478 /*
1479 * This feature doesn't seem to be working, at least
1480 * with the two firmware versions I have. If the GFP sees
1481 * an IP fragment, it either ignores it completely, or reports
1482 * "bad checksum" on it.
1483 *
1484 * Maybe I missed something -- corrections are welcome.
1485 * Until then, the printk stays. :-) -Ion
1486 */
1487 else if (le16_to_cpu(desc->status2) & 0x0040) {
1488 skb->ip_summed = CHECKSUM_COMPLETE;
1489 skb->csum = le16_to_cpu(desc->csum);
1490 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1491 }
1492 #ifdef VLAN_SUPPORT
1493 if (le16_to_cpu(desc->status2) & 0x0200) {
1494 u16 vlid = le16_to_cpu(desc->vlanid);
1495
1496 if (debug > 4) {
1497 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1498 vlid);
1499 }
1500 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1501 }
1502 #endif /* VLAN_SUPPORT */
1503 netif_receive_skb(skb);
1504 dev->stats.rx_packets++;
1505
1506 next_rx:
1507 np->cur_rx++;
1508 desc->status = 0;
1509 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1510 }
1511
1512 if (*quota == 0) { /* out of rx quota */
1513 retcode = 1;
1514 goto out;
1515 }
1516 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1517
1518 out:
1519 refill_rx_ring(dev);
1520 if (debug > 5)
1521 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1522 retcode, np->rx_done, desc_status);
1523 return retcode;
1524 }
1525
1526 static int netdev_poll(struct napi_struct *napi, int budget)
1527 {
1528 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1529 struct net_device *dev = np->dev;
1530 u32 intr_status;
1531 void __iomem *ioaddr = np->base;
1532 int quota = budget;
1533
1534 do {
1535 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1536
1537 if (__netdev_rx(dev, "a))
1538 goto out;
1539
1540 intr_status = readl(ioaddr + IntrStatus);
1541 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1542
1543 napi_complete(napi);
1544 intr_status = readl(ioaddr + IntrEnable);
1545 intr_status |= IntrRxDone | IntrRxEmpty;
1546 writel(intr_status, ioaddr + IntrEnable);
1547
1548 out:
1549 if (debug > 5)
1550 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1551 budget - quota);
1552
1553 /* Restart Rx engine if stopped. */
1554 return budget - quota;
1555 }
1556
1557 static void refill_rx_ring(struct net_device *dev)
1558 {
1559 struct netdev_private *np = netdev_priv(dev);
1560 struct sk_buff *skb;
1561 int entry = -1;
1562
1563 /* Refill the Rx ring buffers. */
1564 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1565 entry = np->dirty_rx % RX_RING_SIZE;
1566 if (np->rx_info[entry].skb == NULL) {
1567 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1568 np->rx_info[entry].skb = skb;
1569 if (skb == NULL)
1570 break; /* Better luck next round. */
1571 np->rx_info[entry].mapping =
1572 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1573 np->rx_ring[entry].rxaddr =
1574 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1575 }
1576 if (entry == RX_RING_SIZE - 1)
1577 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1578 }
1579 if (entry >= 0)
1580 writew(entry, np->base + RxDescQIdx);
1581 }
1582
1583
1584 static void netdev_media_change(struct net_device *dev)
1585 {
1586 struct netdev_private *np = netdev_priv(dev);
1587 void __iomem *ioaddr = np->base;
1588 u16 reg0, reg1, reg4, reg5;
1589 u32 new_tx_mode;
1590 u32 new_intr_timer_ctrl;
1591
1592 /* reset status first */
1593 mdio_read(dev, np->phys[0], MII_BMCR);
1594 mdio_read(dev, np->phys[0], MII_BMSR);
1595
1596 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1597 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1598
1599 if (reg1 & BMSR_LSTATUS) {
1600 /* link is up */
1601 if (reg0 & BMCR_ANENABLE) {
1602 /* autonegotiation is enabled */
1603 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1604 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1605 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1606 np->speed100 = 1;
1607 np->mii_if.full_duplex = 1;
1608 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1609 np->speed100 = 1;
1610 np->mii_if.full_duplex = 0;
1611 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1612 np->speed100 = 0;
1613 np->mii_if.full_duplex = 1;
1614 } else {
1615 np->speed100 = 0;
1616 np->mii_if.full_duplex = 0;
1617 }
1618 } else {
1619 /* autonegotiation is disabled */
1620 if (reg0 & BMCR_SPEED100)
1621 np->speed100 = 1;
1622 else
1623 np->speed100 = 0;
1624 if (reg0 & BMCR_FULLDPLX)
1625 np->mii_if.full_duplex = 1;
1626 else
1627 np->mii_if.full_duplex = 0;
1628 }
1629 netif_carrier_on(dev);
1630 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1631 dev->name,
1632 np->speed100 ? "100" : "10",
1633 np->mii_if.full_duplex ? "full" : "half");
1634
1635 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1636 if (np->mii_if.full_duplex)
1637 new_tx_mode |= FullDuplex;
1638 if (np->tx_mode != new_tx_mode) {
1639 np->tx_mode = new_tx_mode;
1640 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1641 udelay(1000);
1642 writel(np->tx_mode, ioaddr + TxMode);
1643 }
1644
1645 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1646 if (np->speed100)
1647 new_intr_timer_ctrl |= Timer10X;
1648 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1649 np->intr_timer_ctrl = new_intr_timer_ctrl;
1650 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1651 }
1652 } else {
1653 netif_carrier_off(dev);
1654 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1655 }
1656 }
1657
1658
1659 static void netdev_error(struct net_device *dev, int intr_status)
1660 {
1661 struct netdev_private *np = netdev_priv(dev);
1662
1663 /* Came close to underrunning the Tx FIFO, increase threshold. */
1664 if (intr_status & IntrTxDataLow) {
1665 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1666 writel(++np->tx_threshold, np->base + TxThreshold);
1667 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1668 dev->name, np->tx_threshold * 16);
1669 } else
1670 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1671 }
1672 if (intr_status & IntrRxGFPDead) {
1673 dev->stats.rx_fifo_errors++;
1674 dev->stats.rx_errors++;
1675 }
1676 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1677 dev->stats.tx_fifo_errors++;
1678 dev->stats.tx_errors++;
1679 }
1680 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1681 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1682 dev->name, intr_status);
1683 }
1684
1685
1686 static struct net_device_stats *get_stats(struct net_device *dev)
1687 {
1688 struct netdev_private *np = netdev_priv(dev);
1689 void __iomem *ioaddr = np->base;
1690
1691 /* This adapter architecture needs no SMP locks. */
1692 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1693 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1694 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1695 dev->stats.tx_aborted_errors =
1696 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1697 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1698 dev->stats.collisions =
1699 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1700
1701 /* The chip only need report frame silently dropped. */
1702 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1703 writew(0, ioaddr + RxDMAStatus);
1704 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1705 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1706 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1707 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1708
1709 return &dev->stats;
1710 }
1711
1712 #ifdef VLAN_SUPPORT
1713 static u32 set_vlan_mode(struct netdev_private *np)
1714 {
1715 u32 ret = VlanMode;
1716 u16 vid;
1717 void __iomem *filter_addr = np->base + HashTable + 8;
1718 int vlan_count = 0;
1719
1720 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1721 if (vlan_count == 32)
1722 break;
1723 writew(vid, filter_addr);
1724 filter_addr += 16;
1725 vlan_count++;
1726 }
1727 if (vlan_count == 32) {
1728 ret |= PerfectFilterVlan;
1729 while (vlan_count < 32) {
1730 writew(0, filter_addr);
1731 filter_addr += 16;
1732 vlan_count++;
1733 }
1734 }
1735 return ret;
1736 }
1737 #endif /* VLAN_SUPPORT */
1738
1739 static void set_rx_mode(struct net_device *dev)
1740 {
1741 struct netdev_private *np = netdev_priv(dev);
1742 void __iomem *ioaddr = np->base;
1743 u32 rx_mode = MinVLANPrio;
1744 struct netdev_hw_addr *ha;
1745 int i;
1746
1747 #ifdef VLAN_SUPPORT
1748 rx_mode |= set_vlan_mode(np);
1749 #endif /* VLAN_SUPPORT */
1750
1751 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1752 rx_mode |= AcceptAll;
1753 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1754 (dev->flags & IFF_ALLMULTI)) {
1755 /* Too many to match, or accept all multicasts. */
1756 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1757 } else if (netdev_mc_count(dev) <= 14) {
1758 /* Use the 16 element perfect filter, skip first two entries. */
1759 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1760 __be16 *eaddrs;
1761 netdev_for_each_mc_addr(ha, dev) {
1762 eaddrs = (__be16 *) ha->addr;
1763 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1764 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1765 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1766 }
1767 eaddrs = (__be16 *)dev->dev_addr;
1768 i = netdev_mc_count(dev) + 2;
1769 while (i++ < 16) {
1770 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1771 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1772 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1773 }
1774 rx_mode |= AcceptBroadcast|PerfectFilter;
1775 } else {
1776 /* Must use a multicast hash table. */
1777 void __iomem *filter_addr;
1778 __be16 *eaddrs;
1779 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1780
1781 memset(mc_filter, 0, sizeof(mc_filter));
1782 netdev_for_each_mc_addr(ha, dev) {
1783 /* The chip uses the upper 9 CRC bits
1784 as index into the hash table */
1785 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1786 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1787
1788 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1789 }
1790 /* Clear the perfect filter list, skip first two entries. */
1791 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1792 eaddrs = (__be16 *)dev->dev_addr;
1793 for (i = 2; i < 16; i++) {
1794 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1795 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1796 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1797 }
1798 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1799 writew(mc_filter[i], filter_addr);
1800 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1801 }
1802 writel(rx_mode, ioaddr + RxFilterMode);
1803 }
1804
1805 static int check_if_running(struct net_device *dev)
1806 {
1807 if (!netif_running(dev))
1808 return -EINVAL;
1809 return 0;
1810 }
1811
1812 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1813 {
1814 struct netdev_private *np = netdev_priv(dev);
1815 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1816 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1817 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1818 }
1819
1820 static int get_link_ksettings(struct net_device *dev,
1821 struct ethtool_link_ksettings *cmd)
1822 {
1823 struct netdev_private *np = netdev_priv(dev);
1824 spin_lock_irq(&np->lock);
1825 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1826 spin_unlock_irq(&np->lock);
1827 return 0;
1828 }
1829
1830 static int set_link_ksettings(struct net_device *dev,
1831 const struct ethtool_link_ksettings *cmd)
1832 {
1833 struct netdev_private *np = netdev_priv(dev);
1834 int res;
1835 spin_lock_irq(&np->lock);
1836 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1837 spin_unlock_irq(&np->lock);
1838 check_duplex(dev);
1839 return res;
1840 }
1841
1842 static int nway_reset(struct net_device *dev)
1843 {
1844 struct netdev_private *np = netdev_priv(dev);
1845 return mii_nway_restart(&np->mii_if);
1846 }
1847
1848 static u32 get_link(struct net_device *dev)
1849 {
1850 struct netdev_private *np = netdev_priv(dev);
1851 return mii_link_ok(&np->mii_if);
1852 }
1853
1854 static u32 get_msglevel(struct net_device *dev)
1855 {
1856 return debug;
1857 }
1858
1859 static void set_msglevel(struct net_device *dev, u32 val)
1860 {
1861 debug = val;
1862 }
1863
1864 static const struct ethtool_ops ethtool_ops = {
1865 .begin = check_if_running,
1866 .get_drvinfo = get_drvinfo,
1867 .nway_reset = nway_reset,
1868 .get_link = get_link,
1869 .get_msglevel = get_msglevel,
1870 .set_msglevel = set_msglevel,
1871 .get_link_ksettings = get_link_ksettings,
1872 .set_link_ksettings = set_link_ksettings,
1873 };
1874
1875 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1876 {
1877 struct netdev_private *np = netdev_priv(dev);
1878 struct mii_ioctl_data *data = if_mii(rq);
1879 int rc;
1880
1881 if (!netif_running(dev))
1882 return -EINVAL;
1883
1884 spin_lock_irq(&np->lock);
1885 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1886 spin_unlock_irq(&np->lock);
1887
1888 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1889 check_duplex(dev);
1890
1891 return rc;
1892 }
1893
1894 static int netdev_close(struct net_device *dev)
1895 {
1896 struct netdev_private *np = netdev_priv(dev);
1897 void __iomem *ioaddr = np->base;
1898 int i;
1899
1900 netif_stop_queue(dev);
1901
1902 napi_disable(&np->napi);
1903
1904 if (debug > 1) {
1905 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1906 dev->name, (int) readl(ioaddr + IntrStatus));
1907 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1908 dev->name, np->cur_tx, np->dirty_tx,
1909 np->cur_rx, np->dirty_rx);
1910 }
1911
1912 /* Disable interrupts by clearing the interrupt mask. */
1913 writel(0, ioaddr + IntrEnable);
1914
1915 /* Stop the chip's Tx and Rx processes. */
1916 writel(0, ioaddr + GenCtrl);
1917 readl(ioaddr + GenCtrl);
1918
1919 if (debug > 5) {
1920 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1921 (long long) np->tx_ring_dma);
1922 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1923 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1924 i, le32_to_cpu(np->tx_ring[i].status),
1925 (long long) dma_to_cpu(np->tx_ring[i].addr),
1926 le32_to_cpu(np->tx_done_q[i].status));
1927 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1928 (long long) np->rx_ring_dma, np->rx_done_q);
1929 if (np->rx_done_q)
1930 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1931 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1932 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1933 }
1934 }
1935
1936 free_irq(np->pci_dev->irq, dev);
1937
1938 /* Free all the skbuffs in the Rx queue. */
1939 for (i = 0; i < RX_RING_SIZE; i++) {
1940 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1941 if (np->rx_info[i].skb != NULL) {
1942 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1943 dev_kfree_skb(np->rx_info[i].skb);
1944 }
1945 np->rx_info[i].skb = NULL;
1946 np->rx_info[i].mapping = 0;
1947 }
1948 for (i = 0; i < TX_RING_SIZE; i++) {
1949 struct sk_buff *skb = np->tx_info[i].skb;
1950 if (skb == NULL)
1951 continue;
1952 pci_unmap_single(np->pci_dev,
1953 np->tx_info[i].mapping,
1954 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1955 np->tx_info[i].mapping = 0;
1956 dev_kfree_skb(skb);
1957 np->tx_info[i].skb = NULL;
1958 }
1959
1960 return 0;
1961 }
1962
1963 #ifdef CONFIG_PM
1964 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
1965 {
1966 struct net_device *dev = pci_get_drvdata(pdev);
1967
1968 if (netif_running(dev)) {
1969 netif_device_detach(dev);
1970 netdev_close(dev);
1971 }
1972
1973 pci_save_state(pdev);
1974 pci_set_power_state(pdev, pci_choose_state(pdev,state));
1975
1976 return 0;
1977 }
1978
1979 static int starfire_resume(struct pci_dev *pdev)
1980 {
1981 struct net_device *dev = pci_get_drvdata(pdev);
1982
1983 pci_set_power_state(pdev, PCI_D0);
1984 pci_restore_state(pdev);
1985
1986 if (netif_running(dev)) {
1987 netdev_open(dev);
1988 netif_device_attach(dev);
1989 }
1990
1991 return 0;
1992 }
1993 #endif /* CONFIG_PM */
1994
1995
1996 static void starfire_remove_one(struct pci_dev *pdev)
1997 {
1998 struct net_device *dev = pci_get_drvdata(pdev);
1999 struct netdev_private *np = netdev_priv(dev);
2000
2001 BUG_ON(!dev);
2002
2003 unregister_netdev(dev);
2004
2005 if (np->queue_mem)
2006 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2007
2008
2009 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2010 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2011 pci_disable_device(pdev);
2012
2013 iounmap(np->base);
2014 pci_release_regions(pdev);
2015
2016 free_netdev(dev); /* Will also free np!! */
2017 }
2018
2019
2020 static struct pci_driver starfire_driver = {
2021 .name = DRV_NAME,
2022 .probe = starfire_init_one,
2023 .remove = starfire_remove_one,
2024 #ifdef CONFIG_PM
2025 .suspend = starfire_suspend,
2026 .resume = starfire_resume,
2027 #endif /* CONFIG_PM */
2028 .id_table = starfire_pci_tbl,
2029 };
2030
2031
2032 static int __init starfire_init (void)
2033 {
2034 /* when a module, this is printed whether or not devices are found in probe */
2035 #ifdef MODULE
2036 printk(version);
2037
2038 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2039 #endif
2040
2041 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2042
2043 return pci_register_driver(&starfire_driver);
2044 }
2045
2046
2047 static void __exit starfire_cleanup (void)
2048 {
2049 pci_unregister_driver (&starfire_driver);
2050 }
2051
2052
2053 module_init(starfire_init);
2054 module_exit(starfire_cleanup);
2055
2056
2057 /*
2058 * Local variables:
2059 * c-basic-offset: 8
2060 * tab-width: 8
2061 * End:
2062 */
2063
2064
2065
2066
2067
2068 /* LDV_COMMENT_BEGIN_MAIN */
2069 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2070
2071 /*###########################################################################*/
2072
2073 /*############## Driver Environment Generator 0.2 output ####################*/
2074
2075 /*###########################################################################*/
2076
2077
2078
2079 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2080 void ldv_check_final_state(void);
2081
2082 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2083 void ldv_check_return_value(int res);
2084
2085 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2086 void ldv_check_return_value_probe(int res);
2087
2088 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2089 void ldv_initialize(void);
2090
2091 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2092 void ldv_handler_precall(void);
2093
2094 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2095 int nondet_int(void);
2096
2097 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2098 int LDV_IN_INTERRUPT;
2099
2100 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2101 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2102
2103
2104
2105 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2106 /*============================= VARIABLE DECLARATION PART =============================*/
2107 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
2108 /* content: static int netdev_open(struct net_device *dev)*/
2109 /* LDV_COMMENT_BEGIN_PREP */
2110 #define DRV_NAME "starfire"
2111 #define DRV_VERSION "2.1"
2112 #define DRV_RELDATE "July 6, 2008"
2113 #define HAS_BROKEN_FIRMWARE
2114 #ifdef HAS_BROKEN_FIRMWARE
2115 #define PADDING_MASK 3
2116 #endif
2117 #define ZEROCOPY
2118 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2119 #define VLAN_SUPPORT
2120 #endif
2121 #define PKT_BUF_SZ 1536
2122 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2123 #else
2124 #endif
2125 #ifdef __sparc__
2126 #define DMA_BURST_SIZE 64
2127 #else
2128 #define DMA_BURST_SIZE 128
2129 #endif
2130 #define RX_RING_SIZE 256
2131 #define TX_RING_SIZE 32
2132 #define DONE_Q_SIZE 1024
2133 #define QUEUE_ALIGN 256
2134 #if RX_RING_SIZE > 256
2135 #define RX_Q_ENTRIES Rx2048QEntries
2136 #else
2137 #define RX_Q_ENTRIES Rx256QEntries
2138 #endif
2139 #define TX_TIMEOUT (2 * HZ)
2140 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2141 #define ADDR_64BITS
2142 #define netdrv_addr_t __le64
2143 #define cpu_to_dma(x) cpu_to_le64(x)
2144 #define dma_to_cpu(x) le64_to_cpu(x)
2145 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2146 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2147 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2148 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2149 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2150 #else
2151 #define netdrv_addr_t __le32
2152 #define cpu_to_dma(x) cpu_to_le32(x)
2153 #define dma_to_cpu(x) le32_to_cpu(x)
2154 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2155 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2156 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2157 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2158 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2159 #endif
2160 #define skb_first_frag_len(skb) skb_headlen(skb)
2161 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2162 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2163 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2164 #ifdef VLAN_SUPPORT
2165 #define RxComplType RxComplType3
2166 #else
2167 #define RxComplType RxComplType2
2168 #endif
2169 #ifdef ADDR_64BITS
2170 #define TX_DESC_TYPE TxDescType2
2171 #else
2172 #define TX_DESC_TYPE TxDescType1
2173 #endif
2174 #define TX_DESC_SPACING TxDescSpaceUnlim
2175 #if 0
2176 #endif
2177 #define PHY_CNT 2
2178 #ifdef VLAN_SUPPORT
2179 #endif
2180 #ifdef VLAN_SUPPORT
2181 #endif
2182 #ifdef VLAN_SUPPORT
2183 #endif
2184 #ifndef MODULE
2185 #endif
2186 #ifdef ZEROCOPY
2187 #endif
2188 #ifdef VLAN_SUPPORT
2189 #endif
2190 #ifdef ADDR_64BITS
2191 #endif
2192 #if ! defined(final_version)
2193 #endif
2194 /* LDV_COMMENT_END_PREP */
2195 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_open" */
2196 struct net_device * var_group1;
2197 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "netdev_open" */
2198 static int res_netdev_open_5;
2199 /* LDV_COMMENT_BEGIN_PREP */
2200 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2201 #endif
2202 #ifndef final_version
2203 #endif
2204 #ifdef VLAN_SUPPORT
2205 #endif
2206 #ifdef VLAN_SUPPORT
2207 #endif
2208 #ifdef VLAN_SUPPORT
2209 #endif
2210 #ifdef VLAN_SUPPORT
2211 #endif
2212 #ifdef CONFIG_PM
2213 #endif
2214 #ifdef CONFIG_PM
2215 #endif
2216 #ifdef MODULE
2217 #endif
2218 /* LDV_COMMENT_END_PREP */
2219 /* content: static int netdev_close(struct net_device *dev)*/
2220 /* LDV_COMMENT_BEGIN_PREP */
2221 #define DRV_NAME "starfire"
2222 #define DRV_VERSION "2.1"
2223 #define DRV_RELDATE "July 6, 2008"
2224 #define HAS_BROKEN_FIRMWARE
2225 #ifdef HAS_BROKEN_FIRMWARE
2226 #define PADDING_MASK 3
2227 #endif
2228 #define ZEROCOPY
2229 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2230 #define VLAN_SUPPORT
2231 #endif
2232 #define PKT_BUF_SZ 1536
2233 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2234 #else
2235 #endif
2236 #ifdef __sparc__
2237 #define DMA_BURST_SIZE 64
2238 #else
2239 #define DMA_BURST_SIZE 128
2240 #endif
2241 #define RX_RING_SIZE 256
2242 #define TX_RING_SIZE 32
2243 #define DONE_Q_SIZE 1024
2244 #define QUEUE_ALIGN 256
2245 #if RX_RING_SIZE > 256
2246 #define RX_Q_ENTRIES Rx2048QEntries
2247 #else
2248 #define RX_Q_ENTRIES Rx256QEntries
2249 #endif
2250 #define TX_TIMEOUT (2 * HZ)
2251 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2252 #define ADDR_64BITS
2253 #define netdrv_addr_t __le64
2254 #define cpu_to_dma(x) cpu_to_le64(x)
2255 #define dma_to_cpu(x) le64_to_cpu(x)
2256 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2257 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2258 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2259 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2260 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2261 #else
2262 #define netdrv_addr_t __le32
2263 #define cpu_to_dma(x) cpu_to_le32(x)
2264 #define dma_to_cpu(x) le32_to_cpu(x)
2265 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2266 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2267 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2268 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2269 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2270 #endif
2271 #define skb_first_frag_len(skb) skb_headlen(skb)
2272 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2273 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2274 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2275 #ifdef VLAN_SUPPORT
2276 #define RxComplType RxComplType3
2277 #else
2278 #define RxComplType RxComplType2
2279 #endif
2280 #ifdef ADDR_64BITS
2281 #define TX_DESC_TYPE TxDescType2
2282 #else
2283 #define TX_DESC_TYPE TxDescType1
2284 #endif
2285 #define TX_DESC_SPACING TxDescSpaceUnlim
2286 #if 0
2287 #endif
2288 #define PHY_CNT 2
2289 #ifdef VLAN_SUPPORT
2290 #endif
2291 #ifdef VLAN_SUPPORT
2292 #endif
2293 #ifdef VLAN_SUPPORT
2294 #endif
2295 #ifndef MODULE
2296 #endif
2297 #ifdef ZEROCOPY
2298 #endif
2299 #ifdef VLAN_SUPPORT
2300 #endif
2301 #ifdef ADDR_64BITS
2302 #endif
2303 #if ! defined(final_version)
2304 #endif
2305 #ifdef VLAN_SUPPORT
2306 #endif
2307 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2308 #endif
2309 #ifndef final_version
2310 #endif
2311 #ifdef VLAN_SUPPORT
2312 #endif
2313 #ifdef VLAN_SUPPORT
2314 #endif
2315 #ifdef VLAN_SUPPORT
2316 #endif
2317 #ifdef VLAN_SUPPORT
2318 #endif
2319 /* LDV_COMMENT_END_PREP */
2320 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "netdev_close" */
2321 static int res_netdev_close_28;
2322 /* LDV_COMMENT_BEGIN_PREP */
2323 #ifdef CONFIG_PM
2324 #endif
2325 #ifdef CONFIG_PM
2326 #endif
2327 #ifdef MODULE
2328 #endif
2329 /* LDV_COMMENT_END_PREP */
2330 /* content: static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)*/
2331 /* LDV_COMMENT_BEGIN_PREP */
2332 #define DRV_NAME "starfire"
2333 #define DRV_VERSION "2.1"
2334 #define DRV_RELDATE "July 6, 2008"
2335 #define HAS_BROKEN_FIRMWARE
2336 #ifdef HAS_BROKEN_FIRMWARE
2337 #define PADDING_MASK 3
2338 #endif
2339 #define ZEROCOPY
2340 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2341 #define VLAN_SUPPORT
2342 #endif
2343 #define PKT_BUF_SZ 1536
2344 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2345 #else
2346 #endif
2347 #ifdef __sparc__
2348 #define DMA_BURST_SIZE 64
2349 #else
2350 #define DMA_BURST_SIZE 128
2351 #endif
2352 #define RX_RING_SIZE 256
2353 #define TX_RING_SIZE 32
2354 #define DONE_Q_SIZE 1024
2355 #define QUEUE_ALIGN 256
2356 #if RX_RING_SIZE > 256
2357 #define RX_Q_ENTRIES Rx2048QEntries
2358 #else
2359 #define RX_Q_ENTRIES Rx256QEntries
2360 #endif
2361 #define TX_TIMEOUT (2 * HZ)
2362 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2363 #define ADDR_64BITS
2364 #define netdrv_addr_t __le64
2365 #define cpu_to_dma(x) cpu_to_le64(x)
2366 #define dma_to_cpu(x) le64_to_cpu(x)
2367 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2368 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2369 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2370 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2371 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2372 #else
2373 #define netdrv_addr_t __le32
2374 #define cpu_to_dma(x) cpu_to_le32(x)
2375 #define dma_to_cpu(x) le32_to_cpu(x)
2376 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2377 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2378 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2379 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2380 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2381 #endif
2382 #define skb_first_frag_len(skb) skb_headlen(skb)
2383 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2384 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2385 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2386 #ifdef VLAN_SUPPORT
2387 #define RxComplType RxComplType3
2388 #else
2389 #define RxComplType RxComplType2
2390 #endif
2391 #ifdef ADDR_64BITS
2392 #define TX_DESC_TYPE TxDescType2
2393 #else
2394 #define TX_DESC_TYPE TxDescType1
2395 #endif
2396 #define TX_DESC_SPACING TxDescSpaceUnlim
2397 #if 0
2398 #endif
2399 #define PHY_CNT 2
2400 #ifdef VLAN_SUPPORT
2401 #endif
2402 #ifdef VLAN_SUPPORT
2403 #endif
2404 #ifdef VLAN_SUPPORT
2405 #endif
2406 #ifndef MODULE
2407 #endif
2408 #ifdef ZEROCOPY
2409 #endif
2410 #ifdef VLAN_SUPPORT
2411 #endif
2412 #ifdef ADDR_64BITS
2413 #endif
2414 #if ! defined(final_version)
2415 #endif
2416 #ifdef VLAN_SUPPORT
2417 #endif
2418 /* LDV_COMMENT_END_PREP */
2419 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "start_tx" */
2420 struct sk_buff * var_group2;
2421 /* LDV_COMMENT_BEGIN_PREP */
2422 #ifndef final_version
2423 #endif
2424 #ifdef VLAN_SUPPORT
2425 #endif
2426 #ifdef VLAN_SUPPORT
2427 #endif
2428 #ifdef VLAN_SUPPORT
2429 #endif
2430 #ifdef VLAN_SUPPORT
2431 #endif
2432 #ifdef CONFIG_PM
2433 #endif
2434 #ifdef CONFIG_PM
2435 #endif
2436 #ifdef MODULE
2437 #endif
2438 /* LDV_COMMENT_END_PREP */
2439 /* content: static void tx_timeout(struct net_device *dev)*/
2440 /* LDV_COMMENT_BEGIN_PREP */
2441 #define DRV_NAME "starfire"
2442 #define DRV_VERSION "2.1"
2443 #define DRV_RELDATE "July 6, 2008"
2444 #define HAS_BROKEN_FIRMWARE
2445 #ifdef HAS_BROKEN_FIRMWARE
2446 #define PADDING_MASK 3
2447 #endif
2448 #define ZEROCOPY
2449 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2450 #define VLAN_SUPPORT
2451 #endif
2452 #define PKT_BUF_SZ 1536
2453 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2454 #else
2455 #endif
2456 #ifdef __sparc__
2457 #define DMA_BURST_SIZE 64
2458 #else
2459 #define DMA_BURST_SIZE 128
2460 #endif
2461 #define RX_RING_SIZE 256
2462 #define TX_RING_SIZE 32
2463 #define DONE_Q_SIZE 1024
2464 #define QUEUE_ALIGN 256
2465 #if RX_RING_SIZE > 256
2466 #define RX_Q_ENTRIES Rx2048QEntries
2467 #else
2468 #define RX_Q_ENTRIES Rx256QEntries
2469 #endif
2470 #define TX_TIMEOUT (2 * HZ)
2471 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2472 #define ADDR_64BITS
2473 #define netdrv_addr_t __le64
2474 #define cpu_to_dma(x) cpu_to_le64(x)
2475 #define dma_to_cpu(x) le64_to_cpu(x)
2476 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2477 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2478 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2479 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2480 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2481 #else
2482 #define netdrv_addr_t __le32
2483 #define cpu_to_dma(x) cpu_to_le32(x)
2484 #define dma_to_cpu(x) le32_to_cpu(x)
2485 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2486 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2487 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2488 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2489 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2490 #endif
2491 #define skb_first_frag_len(skb) skb_headlen(skb)
2492 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2493 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2494 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2495 #ifdef VLAN_SUPPORT
2496 #define RxComplType RxComplType3
2497 #else
2498 #define RxComplType RxComplType2
2499 #endif
2500 #ifdef ADDR_64BITS
2501 #define TX_DESC_TYPE TxDescType2
2502 #else
2503 #define TX_DESC_TYPE TxDescType1
2504 #endif
2505 #define TX_DESC_SPACING TxDescSpaceUnlim
2506 #if 0
2507 #endif
2508 #define PHY_CNT 2
2509 #ifdef VLAN_SUPPORT
2510 #endif
2511 #ifdef VLAN_SUPPORT
2512 #endif
2513 #ifdef VLAN_SUPPORT
2514 #endif
2515 #ifndef MODULE
2516 #endif
2517 #ifdef ZEROCOPY
2518 #endif
2519 #ifdef VLAN_SUPPORT
2520 #endif
2521 #ifdef ADDR_64BITS
2522 #endif
2523 #if ! defined(final_version)
2524 #endif
2525 #ifdef VLAN_SUPPORT
2526 #endif
2527 /* LDV_COMMENT_END_PREP */
2528 /* LDV_COMMENT_BEGIN_PREP */
2529 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2530 #endif
2531 #ifndef final_version
2532 #endif
2533 #ifdef VLAN_SUPPORT
2534 #endif
2535 #ifdef VLAN_SUPPORT
2536 #endif
2537 #ifdef VLAN_SUPPORT
2538 #endif
2539 #ifdef VLAN_SUPPORT
2540 #endif
2541 #ifdef CONFIG_PM
2542 #endif
2543 #ifdef CONFIG_PM
2544 #endif
2545 #ifdef MODULE
2546 #endif
2547 /* LDV_COMMENT_END_PREP */
2548 /* content: static struct net_device_stats *get_stats(struct net_device *dev)*/
2549 /* LDV_COMMENT_BEGIN_PREP */
2550 #define DRV_NAME "starfire"
2551 #define DRV_VERSION "2.1"
2552 #define DRV_RELDATE "July 6, 2008"
2553 #define HAS_BROKEN_FIRMWARE
2554 #ifdef HAS_BROKEN_FIRMWARE
2555 #define PADDING_MASK 3
2556 #endif
2557 #define ZEROCOPY
2558 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2559 #define VLAN_SUPPORT
2560 #endif
2561 #define PKT_BUF_SZ 1536
2562 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2563 #else
2564 #endif
2565 #ifdef __sparc__
2566 #define DMA_BURST_SIZE 64
2567 #else
2568 #define DMA_BURST_SIZE 128
2569 #endif
2570 #define RX_RING_SIZE 256
2571 #define TX_RING_SIZE 32
2572 #define DONE_Q_SIZE 1024
2573 #define QUEUE_ALIGN 256
2574 #if RX_RING_SIZE > 256
2575 #define RX_Q_ENTRIES Rx2048QEntries
2576 #else
2577 #define RX_Q_ENTRIES Rx256QEntries
2578 #endif
2579 #define TX_TIMEOUT (2 * HZ)
2580 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2581 #define ADDR_64BITS
2582 #define netdrv_addr_t __le64
2583 #define cpu_to_dma(x) cpu_to_le64(x)
2584 #define dma_to_cpu(x) le64_to_cpu(x)
2585 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2586 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2587 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2588 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2589 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2590 #else
2591 #define netdrv_addr_t __le32
2592 #define cpu_to_dma(x) cpu_to_le32(x)
2593 #define dma_to_cpu(x) le32_to_cpu(x)
2594 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2595 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2596 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2597 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2598 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2599 #endif
2600 #define skb_first_frag_len(skb) skb_headlen(skb)
2601 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2602 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2603 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2604 #ifdef VLAN_SUPPORT
2605 #define RxComplType RxComplType3
2606 #else
2607 #define RxComplType RxComplType2
2608 #endif
2609 #ifdef ADDR_64BITS
2610 #define TX_DESC_TYPE TxDescType2
2611 #else
2612 #define TX_DESC_TYPE TxDescType1
2613 #endif
2614 #define TX_DESC_SPACING TxDescSpaceUnlim
2615 #if 0
2616 #endif
2617 #define PHY_CNT 2
2618 #ifdef VLAN_SUPPORT
2619 #endif
2620 #ifdef VLAN_SUPPORT
2621 #endif
2622 #ifdef VLAN_SUPPORT
2623 #endif
2624 #ifndef MODULE
2625 #endif
2626 #ifdef ZEROCOPY
2627 #endif
2628 #ifdef VLAN_SUPPORT
2629 #endif
2630 #ifdef ADDR_64BITS
2631 #endif
2632 #if ! defined(final_version)
2633 #endif
2634 #ifdef VLAN_SUPPORT
2635 #endif
2636 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2637 #endif
2638 #ifndef final_version
2639 #endif
2640 #ifdef VLAN_SUPPORT
2641 #endif
2642 #ifdef VLAN_SUPPORT
2643 #endif
2644 /* LDV_COMMENT_END_PREP */
2645 /* LDV_COMMENT_BEGIN_PREP */
2646 #ifdef VLAN_SUPPORT
2647 #endif
2648 #ifdef VLAN_SUPPORT
2649 #endif
2650 #ifdef CONFIG_PM
2651 #endif
2652 #ifdef CONFIG_PM
2653 #endif
2654 #ifdef MODULE
2655 #endif
2656 /* LDV_COMMENT_END_PREP */
2657 /* content: static void set_rx_mode(struct net_device *dev)*/
2658 /* LDV_COMMENT_BEGIN_PREP */
2659 #define DRV_NAME "starfire"
2660 #define DRV_VERSION "2.1"
2661 #define DRV_RELDATE "July 6, 2008"
2662 #define HAS_BROKEN_FIRMWARE
2663 #ifdef HAS_BROKEN_FIRMWARE
2664 #define PADDING_MASK 3
2665 #endif
2666 #define ZEROCOPY
2667 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2668 #define VLAN_SUPPORT
2669 #endif
2670 #define PKT_BUF_SZ 1536
2671 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2672 #else
2673 #endif
2674 #ifdef __sparc__
2675 #define DMA_BURST_SIZE 64
2676 #else
2677 #define DMA_BURST_SIZE 128
2678 #endif
2679 #define RX_RING_SIZE 256
2680 #define TX_RING_SIZE 32
2681 #define DONE_Q_SIZE 1024
2682 #define QUEUE_ALIGN 256
2683 #if RX_RING_SIZE > 256
2684 #define RX_Q_ENTRIES Rx2048QEntries
2685 #else
2686 #define RX_Q_ENTRIES Rx256QEntries
2687 #endif
2688 #define TX_TIMEOUT (2 * HZ)
2689 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2690 #define ADDR_64BITS
2691 #define netdrv_addr_t __le64
2692 #define cpu_to_dma(x) cpu_to_le64(x)
2693 #define dma_to_cpu(x) le64_to_cpu(x)
2694 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2695 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2696 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2697 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2698 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2699 #else
2700 #define netdrv_addr_t __le32
2701 #define cpu_to_dma(x) cpu_to_le32(x)
2702 #define dma_to_cpu(x) le32_to_cpu(x)
2703 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2704 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2705 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2706 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2707 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2708 #endif
2709 #define skb_first_frag_len(skb) skb_headlen(skb)
2710 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2711 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2712 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2713 #ifdef VLAN_SUPPORT
2714 #define RxComplType RxComplType3
2715 #else
2716 #define RxComplType RxComplType2
2717 #endif
2718 #ifdef ADDR_64BITS
2719 #define TX_DESC_TYPE TxDescType2
2720 #else
2721 #define TX_DESC_TYPE TxDescType1
2722 #endif
2723 #define TX_DESC_SPACING TxDescSpaceUnlim
2724 #if 0
2725 #endif
2726 #define PHY_CNT 2
2727 #ifdef VLAN_SUPPORT
2728 #endif
2729 #ifdef VLAN_SUPPORT
2730 #endif
2731 #ifdef VLAN_SUPPORT
2732 #endif
2733 #ifndef MODULE
2734 #endif
2735 #ifdef ZEROCOPY
2736 #endif
2737 #ifdef VLAN_SUPPORT
2738 #endif
2739 #ifdef ADDR_64BITS
2740 #endif
2741 #if ! defined(final_version)
2742 #endif
2743 #ifdef VLAN_SUPPORT
2744 #endif
2745 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2746 #endif
2747 #ifndef final_version
2748 #endif
2749 #ifdef VLAN_SUPPORT
2750 #endif
2751 #ifdef VLAN_SUPPORT
2752 #endif
2753 #ifdef VLAN_SUPPORT
2754 #endif
2755 /* LDV_COMMENT_END_PREP */
2756 /* LDV_COMMENT_BEGIN_PREP */
2757 #ifdef CONFIG_PM
2758 #endif
2759 #ifdef CONFIG_PM
2760 #endif
2761 #ifdef MODULE
2762 #endif
2763 /* LDV_COMMENT_END_PREP */
2764 /* content: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
2765 /* LDV_COMMENT_BEGIN_PREP */
2766 #define DRV_NAME "starfire"
2767 #define DRV_VERSION "2.1"
2768 #define DRV_RELDATE "July 6, 2008"
2769 #define HAS_BROKEN_FIRMWARE
2770 #ifdef HAS_BROKEN_FIRMWARE
2771 #define PADDING_MASK 3
2772 #endif
2773 #define ZEROCOPY
2774 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2775 #define VLAN_SUPPORT
2776 #endif
2777 #define PKT_BUF_SZ 1536
2778 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2779 #else
2780 #endif
2781 #ifdef __sparc__
2782 #define DMA_BURST_SIZE 64
2783 #else
2784 #define DMA_BURST_SIZE 128
2785 #endif
2786 #define RX_RING_SIZE 256
2787 #define TX_RING_SIZE 32
2788 #define DONE_Q_SIZE 1024
2789 #define QUEUE_ALIGN 256
2790 #if RX_RING_SIZE > 256
2791 #define RX_Q_ENTRIES Rx2048QEntries
2792 #else
2793 #define RX_Q_ENTRIES Rx256QEntries
2794 #endif
2795 #define TX_TIMEOUT (2 * HZ)
2796 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2797 #define ADDR_64BITS
2798 #define netdrv_addr_t __le64
2799 #define cpu_to_dma(x) cpu_to_le64(x)
2800 #define dma_to_cpu(x) le64_to_cpu(x)
2801 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2802 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2803 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2804 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2805 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2806 #else
2807 #define netdrv_addr_t __le32
2808 #define cpu_to_dma(x) cpu_to_le32(x)
2809 #define dma_to_cpu(x) le32_to_cpu(x)
2810 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2811 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2812 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2813 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2814 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2815 #endif
2816 #define skb_first_frag_len(skb) skb_headlen(skb)
2817 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2818 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2819 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2820 #ifdef VLAN_SUPPORT
2821 #define RxComplType RxComplType3
2822 #else
2823 #define RxComplType RxComplType2
2824 #endif
2825 #ifdef ADDR_64BITS
2826 #define TX_DESC_TYPE TxDescType2
2827 #else
2828 #define TX_DESC_TYPE TxDescType1
2829 #endif
2830 #define TX_DESC_SPACING TxDescSpaceUnlim
2831 #if 0
2832 #endif
2833 #define PHY_CNT 2
2834 #ifdef VLAN_SUPPORT
2835 #endif
2836 #ifdef VLAN_SUPPORT
2837 #endif
2838 #ifdef VLAN_SUPPORT
2839 #endif
2840 #ifndef MODULE
2841 #endif
2842 #ifdef ZEROCOPY
2843 #endif
2844 #ifdef VLAN_SUPPORT
2845 #endif
2846 #ifdef ADDR_64BITS
2847 #endif
2848 #if ! defined(final_version)
2849 #endif
2850 #ifdef VLAN_SUPPORT
2851 #endif
2852 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2853 #endif
2854 #ifndef final_version
2855 #endif
2856 #ifdef VLAN_SUPPORT
2857 #endif
2858 #ifdef VLAN_SUPPORT
2859 #endif
2860 #ifdef VLAN_SUPPORT
2861 #endif
2862 #ifdef VLAN_SUPPORT
2863 #endif
2864 /* LDV_COMMENT_END_PREP */
2865 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_ioctl" */
2866 struct ifreq * var_group3;
2867 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_ioctl" */
2868 int var_netdev_ioctl_27_p2;
2869 /* LDV_COMMENT_BEGIN_PREP */
2870 #ifdef CONFIG_PM
2871 #endif
2872 #ifdef CONFIG_PM
2873 #endif
2874 #ifdef MODULE
2875 #endif
2876 /* LDV_COMMENT_END_PREP */
2877 /* content: static int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)*/
2878 /* LDV_COMMENT_BEGIN_PREP */
2879 #define DRV_NAME "starfire"
2880 #define DRV_VERSION "2.1"
2881 #define DRV_RELDATE "July 6, 2008"
2882 #define HAS_BROKEN_FIRMWARE
2883 #ifdef HAS_BROKEN_FIRMWARE
2884 #define PADDING_MASK 3
2885 #endif
2886 #define ZEROCOPY
2887 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2888 #define VLAN_SUPPORT
2889 #endif
2890 #define PKT_BUF_SZ 1536
2891 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2892 #else
2893 #endif
2894 #ifdef __sparc__
2895 #define DMA_BURST_SIZE 64
2896 #else
2897 #define DMA_BURST_SIZE 128
2898 #endif
2899 #define RX_RING_SIZE 256
2900 #define TX_RING_SIZE 32
2901 #define DONE_Q_SIZE 1024
2902 #define QUEUE_ALIGN 256
2903 #if RX_RING_SIZE > 256
2904 #define RX_Q_ENTRIES Rx2048QEntries
2905 #else
2906 #define RX_Q_ENTRIES Rx256QEntries
2907 #endif
2908 #define TX_TIMEOUT (2 * HZ)
2909 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2910 #define ADDR_64BITS
2911 #define netdrv_addr_t __le64
2912 #define cpu_to_dma(x) cpu_to_le64(x)
2913 #define dma_to_cpu(x) le64_to_cpu(x)
2914 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2915 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2916 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2917 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2918 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2919 #else
2920 #define netdrv_addr_t __le32
2921 #define cpu_to_dma(x) cpu_to_le32(x)
2922 #define dma_to_cpu(x) le32_to_cpu(x)
2923 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2924 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2925 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2926 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2927 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2928 #endif
2929 #define skb_first_frag_len(skb) skb_headlen(skb)
2930 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2931 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2932 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2933 #ifdef VLAN_SUPPORT
2934 #define RxComplType RxComplType3
2935 #else
2936 #define RxComplType RxComplType2
2937 #endif
2938 #ifdef ADDR_64BITS
2939 #define TX_DESC_TYPE TxDescType2
2940 #else
2941 #define TX_DESC_TYPE TxDescType1
2942 #endif
2943 #define TX_DESC_SPACING TxDescSpaceUnlim
2944 #if 0
2945 #endif
2946 #define PHY_CNT 2
2947 #ifdef VLAN_SUPPORT
2948 #endif
2949 #ifdef VLAN_SUPPORT
2950 /* LDV_COMMENT_END_PREP */
2951 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_add_vid" */
2952 __be16 var_netdev_vlan_rx_add_vid_0_p1;
2953 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_add_vid" */
2954 u16 var_netdev_vlan_rx_add_vid_0_p2;
2955 /* LDV_COMMENT_BEGIN_PREP */
2956 #endif
2957 #ifdef VLAN_SUPPORT
2958 #endif
2959 #ifndef MODULE
2960 #endif
2961 #ifdef ZEROCOPY
2962 #endif
2963 #ifdef VLAN_SUPPORT
2964 #endif
2965 #ifdef ADDR_64BITS
2966 #endif
2967 #if ! defined(final_version)
2968 #endif
2969 #ifdef VLAN_SUPPORT
2970 #endif
2971 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2972 #endif
2973 #ifndef final_version
2974 #endif
2975 #ifdef VLAN_SUPPORT
2976 #endif
2977 #ifdef VLAN_SUPPORT
2978 #endif
2979 #ifdef VLAN_SUPPORT
2980 #endif
2981 #ifdef VLAN_SUPPORT
2982 #endif
2983 #ifdef CONFIG_PM
2984 #endif
2985 #ifdef CONFIG_PM
2986 #endif
2987 #ifdef MODULE
2988 #endif
2989 /* LDV_COMMENT_END_PREP */
2990 /* content: static int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)*/
2991 /* LDV_COMMENT_BEGIN_PREP */
2992 #define DRV_NAME "starfire"
2993 #define DRV_VERSION "2.1"
2994 #define DRV_RELDATE "July 6, 2008"
2995 #define HAS_BROKEN_FIRMWARE
2996 #ifdef HAS_BROKEN_FIRMWARE
2997 #define PADDING_MASK 3
2998 #endif
2999 #define ZEROCOPY
3000 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3001 #define VLAN_SUPPORT
3002 #endif
3003 #define PKT_BUF_SZ 1536
3004 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3005 #else
3006 #endif
3007 #ifdef __sparc__
3008 #define DMA_BURST_SIZE 64
3009 #else
3010 #define DMA_BURST_SIZE 128
3011 #endif
3012 #define RX_RING_SIZE 256
3013 #define TX_RING_SIZE 32
3014 #define DONE_Q_SIZE 1024
3015 #define QUEUE_ALIGN 256
3016 #if RX_RING_SIZE > 256
3017 #define RX_Q_ENTRIES Rx2048QEntries
3018 #else
3019 #define RX_Q_ENTRIES Rx256QEntries
3020 #endif
3021 #define TX_TIMEOUT (2 * HZ)
3022 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3023 #define ADDR_64BITS
3024 #define netdrv_addr_t __le64
3025 #define cpu_to_dma(x) cpu_to_le64(x)
3026 #define dma_to_cpu(x) le64_to_cpu(x)
3027 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3028 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3029 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3030 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3031 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3032 #else
3033 #define netdrv_addr_t __le32
3034 #define cpu_to_dma(x) cpu_to_le32(x)
3035 #define dma_to_cpu(x) le32_to_cpu(x)
3036 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3037 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3038 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3039 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3040 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3041 #endif
3042 #define skb_first_frag_len(skb) skb_headlen(skb)
3043 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3044 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3045 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3046 #ifdef VLAN_SUPPORT
3047 #define RxComplType RxComplType3
3048 #else
3049 #define RxComplType RxComplType2
3050 #endif
3051 #ifdef ADDR_64BITS
3052 #define TX_DESC_TYPE TxDescType2
3053 #else
3054 #define TX_DESC_TYPE TxDescType1
3055 #endif
3056 #define TX_DESC_SPACING TxDescSpaceUnlim
3057 #if 0
3058 #endif
3059 #define PHY_CNT 2
3060 #ifdef VLAN_SUPPORT
3061 #endif
3062 #ifdef VLAN_SUPPORT
3063 /* LDV_COMMENT_END_PREP */
3064 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_kill_vid" */
3065 __be16 var_netdev_vlan_rx_kill_vid_1_p1;
3066 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_kill_vid" */
3067 u16 var_netdev_vlan_rx_kill_vid_1_p2;
3068 /* LDV_COMMENT_BEGIN_PREP */
3069 #endif
3070 #ifdef VLAN_SUPPORT
3071 #endif
3072 #ifndef MODULE
3073 #endif
3074 #ifdef ZEROCOPY
3075 #endif
3076 #ifdef VLAN_SUPPORT
3077 #endif
3078 #ifdef ADDR_64BITS
3079 #endif
3080 #if ! defined(final_version)
3081 #endif
3082 #ifdef VLAN_SUPPORT
3083 #endif
3084 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3085 #endif
3086 #ifndef final_version
3087 #endif
3088 #ifdef VLAN_SUPPORT
3089 #endif
3090 #ifdef VLAN_SUPPORT
3091 #endif
3092 #ifdef VLAN_SUPPORT
3093 #endif
3094 #ifdef VLAN_SUPPORT
3095 #endif
3096 #ifdef CONFIG_PM
3097 #endif
3098 #ifdef CONFIG_PM
3099 #endif
3100 #ifdef MODULE
3101 #endif
3102 /* LDV_COMMENT_END_PREP */
3103
3104 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
3105 /* content: static int check_if_running(struct net_device *dev)*/
3106 /* LDV_COMMENT_BEGIN_PREP */
3107 #define DRV_NAME "starfire"
3108 #define DRV_VERSION "2.1"
3109 #define DRV_RELDATE "July 6, 2008"
3110 #define HAS_BROKEN_FIRMWARE
3111 #ifdef HAS_BROKEN_FIRMWARE
3112 #define PADDING_MASK 3
3113 #endif
3114 #define ZEROCOPY
3115 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3116 #define VLAN_SUPPORT
3117 #endif
3118 #define PKT_BUF_SZ 1536
3119 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3120 #else
3121 #endif
3122 #ifdef __sparc__
3123 #define DMA_BURST_SIZE 64
3124 #else
3125 #define DMA_BURST_SIZE 128
3126 #endif
3127 #define RX_RING_SIZE 256
3128 #define TX_RING_SIZE 32
3129 #define DONE_Q_SIZE 1024
3130 #define QUEUE_ALIGN 256
3131 #if RX_RING_SIZE > 256
3132 #define RX_Q_ENTRIES Rx2048QEntries
3133 #else
3134 #define RX_Q_ENTRIES Rx256QEntries
3135 #endif
3136 #define TX_TIMEOUT (2 * HZ)
3137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3138 #define ADDR_64BITS
3139 #define netdrv_addr_t __le64
3140 #define cpu_to_dma(x) cpu_to_le64(x)
3141 #define dma_to_cpu(x) le64_to_cpu(x)
3142 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3143 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3144 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3145 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3146 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3147 #else
3148 #define netdrv_addr_t __le32
3149 #define cpu_to_dma(x) cpu_to_le32(x)
3150 #define dma_to_cpu(x) le32_to_cpu(x)
3151 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3152 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3153 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3154 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3155 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3156 #endif
3157 #define skb_first_frag_len(skb) skb_headlen(skb)
3158 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3159 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3160 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3161 #ifdef VLAN_SUPPORT
3162 #define RxComplType RxComplType3
3163 #else
3164 #define RxComplType RxComplType2
3165 #endif
3166 #ifdef ADDR_64BITS
3167 #define TX_DESC_TYPE TxDescType2
3168 #else
3169 #define TX_DESC_TYPE TxDescType1
3170 #endif
3171 #define TX_DESC_SPACING TxDescSpaceUnlim
3172 #if 0
3173 #endif
3174 #define PHY_CNT 2
3175 #ifdef VLAN_SUPPORT
3176 #endif
3177 #ifdef VLAN_SUPPORT
3178 #endif
3179 #ifdef VLAN_SUPPORT
3180 #endif
3181 #ifndef MODULE
3182 #endif
3183 #ifdef ZEROCOPY
3184 #endif
3185 #ifdef VLAN_SUPPORT
3186 #endif
3187 #ifdef ADDR_64BITS
3188 #endif
3189 #if ! defined(final_version)
3190 #endif
3191 #ifdef VLAN_SUPPORT
3192 #endif
3193 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3194 #endif
3195 #ifndef final_version
3196 #endif
3197 #ifdef VLAN_SUPPORT
3198 #endif
3199 #ifdef VLAN_SUPPORT
3200 #endif
3201 #ifdef VLAN_SUPPORT
3202 #endif
3203 #ifdef VLAN_SUPPORT
3204 #endif
3205 /* LDV_COMMENT_END_PREP */
3206 /* LDV_COMMENT_BEGIN_PREP */
3207 #ifdef CONFIG_PM
3208 #endif
3209 #ifdef CONFIG_PM
3210 #endif
3211 #ifdef MODULE
3212 #endif
3213 /* LDV_COMMENT_END_PREP */
3214 /* content: static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
3215 /* LDV_COMMENT_BEGIN_PREP */
3216 #define DRV_NAME "starfire"
3217 #define DRV_VERSION "2.1"
3218 #define DRV_RELDATE "July 6, 2008"
3219 #define HAS_BROKEN_FIRMWARE
3220 #ifdef HAS_BROKEN_FIRMWARE
3221 #define PADDING_MASK 3
3222 #endif
3223 #define ZEROCOPY
3224 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3225 #define VLAN_SUPPORT
3226 #endif
3227 #define PKT_BUF_SZ 1536
3228 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3229 #else
3230 #endif
3231 #ifdef __sparc__
3232 #define DMA_BURST_SIZE 64
3233 #else
3234 #define DMA_BURST_SIZE 128
3235 #endif
3236 #define RX_RING_SIZE 256
3237 #define TX_RING_SIZE 32
3238 #define DONE_Q_SIZE 1024
3239 #define QUEUE_ALIGN 256
3240 #if RX_RING_SIZE > 256
3241 #define RX_Q_ENTRIES Rx2048QEntries
3242 #else
3243 #define RX_Q_ENTRIES Rx256QEntries
3244 #endif
3245 #define TX_TIMEOUT (2 * HZ)
3246 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3247 #define ADDR_64BITS
3248 #define netdrv_addr_t __le64
3249 #define cpu_to_dma(x) cpu_to_le64(x)
3250 #define dma_to_cpu(x) le64_to_cpu(x)
3251 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3252 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3253 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3254 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3255 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3256 #else
3257 #define netdrv_addr_t __le32
3258 #define cpu_to_dma(x) cpu_to_le32(x)
3259 #define dma_to_cpu(x) le32_to_cpu(x)
3260 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3261 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3262 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3263 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3264 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3265 #endif
3266 #define skb_first_frag_len(skb) skb_headlen(skb)
3267 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3268 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3269 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3270 #ifdef VLAN_SUPPORT
3271 #define RxComplType RxComplType3
3272 #else
3273 #define RxComplType RxComplType2
3274 #endif
3275 #ifdef ADDR_64BITS
3276 #define TX_DESC_TYPE TxDescType2
3277 #else
3278 #define TX_DESC_TYPE TxDescType1
3279 #endif
3280 #define TX_DESC_SPACING TxDescSpaceUnlim
3281 #if 0
3282 #endif
3283 #define PHY_CNT 2
3284 #ifdef VLAN_SUPPORT
3285 #endif
3286 #ifdef VLAN_SUPPORT
3287 #endif
3288 #ifdef VLAN_SUPPORT
3289 #endif
3290 #ifndef MODULE
3291 #endif
3292 #ifdef ZEROCOPY
3293 #endif
3294 #ifdef VLAN_SUPPORT
3295 #endif
3296 #ifdef ADDR_64BITS
3297 #endif
3298 #if ! defined(final_version)
3299 #endif
3300 #ifdef VLAN_SUPPORT
3301 #endif
3302 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3303 #endif
3304 #ifndef final_version
3305 #endif
3306 #ifdef VLAN_SUPPORT
3307 #endif
3308 #ifdef VLAN_SUPPORT
3309 #endif
3310 #ifdef VLAN_SUPPORT
3311 #endif
3312 #ifdef VLAN_SUPPORT
3313 #endif
3314 /* LDV_COMMENT_END_PREP */
3315 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_drvinfo" */
3316 struct ethtool_drvinfo * var_group4;
3317 /* LDV_COMMENT_BEGIN_PREP */
3318 #ifdef CONFIG_PM
3319 #endif
3320 #ifdef CONFIG_PM
3321 #endif
3322 #ifdef MODULE
3323 #endif
3324 /* LDV_COMMENT_END_PREP */
3325 /* content: static int nway_reset(struct net_device *dev)*/
3326 /* LDV_COMMENT_BEGIN_PREP */
3327 #define DRV_NAME "starfire"
3328 #define DRV_VERSION "2.1"
3329 #define DRV_RELDATE "July 6, 2008"
3330 #define HAS_BROKEN_FIRMWARE
3331 #ifdef HAS_BROKEN_FIRMWARE
3332 #define PADDING_MASK 3
3333 #endif
3334 #define ZEROCOPY
3335 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3336 #define VLAN_SUPPORT
3337 #endif
3338 #define PKT_BUF_SZ 1536
3339 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3340 #else
3341 #endif
3342 #ifdef __sparc__
3343 #define DMA_BURST_SIZE 64
3344 #else
3345 #define DMA_BURST_SIZE 128
3346 #endif
3347 #define RX_RING_SIZE 256
3348 #define TX_RING_SIZE 32
3349 #define DONE_Q_SIZE 1024
3350 #define QUEUE_ALIGN 256
3351 #if RX_RING_SIZE > 256
3352 #define RX_Q_ENTRIES Rx2048QEntries
3353 #else
3354 #define RX_Q_ENTRIES Rx256QEntries
3355 #endif
3356 #define TX_TIMEOUT (2 * HZ)
3357 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3358 #define ADDR_64BITS
3359 #define netdrv_addr_t __le64
3360 #define cpu_to_dma(x) cpu_to_le64(x)
3361 #define dma_to_cpu(x) le64_to_cpu(x)
3362 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3363 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3364 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3365 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3366 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3367 #else
3368 #define netdrv_addr_t __le32
3369 #define cpu_to_dma(x) cpu_to_le32(x)
3370 #define dma_to_cpu(x) le32_to_cpu(x)
3371 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3372 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3373 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3374 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3375 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3376 #endif
3377 #define skb_first_frag_len(skb) skb_headlen(skb)
3378 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3379 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3380 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3381 #ifdef VLAN_SUPPORT
3382 #define RxComplType RxComplType3
3383 #else
3384 #define RxComplType RxComplType2
3385 #endif
3386 #ifdef ADDR_64BITS
3387 #define TX_DESC_TYPE TxDescType2
3388 #else
3389 #define TX_DESC_TYPE TxDescType1
3390 #endif
3391 #define TX_DESC_SPACING TxDescSpaceUnlim
3392 #if 0
3393 #endif
3394 #define PHY_CNT 2
3395 #ifdef VLAN_SUPPORT
3396 #endif
3397 #ifdef VLAN_SUPPORT
3398 #endif
3399 #ifdef VLAN_SUPPORT
3400 #endif
3401 #ifndef MODULE
3402 #endif
3403 #ifdef ZEROCOPY
3404 #endif
3405 #ifdef VLAN_SUPPORT
3406 #endif
3407 #ifdef ADDR_64BITS
3408 #endif
3409 #if ! defined(final_version)
3410 #endif
3411 #ifdef VLAN_SUPPORT
3412 #endif
3413 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3414 #endif
3415 #ifndef final_version
3416 #endif
3417 #ifdef VLAN_SUPPORT
3418 #endif
3419 #ifdef VLAN_SUPPORT
3420 #endif
3421 #ifdef VLAN_SUPPORT
3422 #endif
3423 #ifdef VLAN_SUPPORT
3424 #endif
3425 /* LDV_COMMENT_END_PREP */
3426 /* LDV_COMMENT_BEGIN_PREP */
3427 #ifdef CONFIG_PM
3428 #endif
3429 #ifdef CONFIG_PM
3430 #endif
3431 #ifdef MODULE
3432 #endif
3433 /* LDV_COMMENT_END_PREP */
3434 /* content: static u32 get_link(struct net_device *dev)*/
3435 /* LDV_COMMENT_BEGIN_PREP */
3436 #define DRV_NAME "starfire"
3437 #define DRV_VERSION "2.1"
3438 #define DRV_RELDATE "July 6, 2008"
3439 #define HAS_BROKEN_FIRMWARE
3440 #ifdef HAS_BROKEN_FIRMWARE
3441 #define PADDING_MASK 3
3442 #endif
3443 #define ZEROCOPY
3444 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3445 #define VLAN_SUPPORT
3446 #endif
3447 #define PKT_BUF_SZ 1536
3448 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3449 #else
3450 #endif
3451 #ifdef __sparc__
3452 #define DMA_BURST_SIZE 64
3453 #else
3454 #define DMA_BURST_SIZE 128
3455 #endif
3456 #define RX_RING_SIZE 256
3457 #define TX_RING_SIZE 32
3458 #define DONE_Q_SIZE 1024
3459 #define QUEUE_ALIGN 256
3460 #if RX_RING_SIZE > 256
3461 #define RX_Q_ENTRIES Rx2048QEntries
3462 #else
3463 #define RX_Q_ENTRIES Rx256QEntries
3464 #endif
3465 #define TX_TIMEOUT (2 * HZ)
3466 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3467 #define ADDR_64BITS
3468 #define netdrv_addr_t __le64
3469 #define cpu_to_dma(x) cpu_to_le64(x)
3470 #define dma_to_cpu(x) le64_to_cpu(x)
3471 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3472 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3473 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3474 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3475 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3476 #else
3477 #define netdrv_addr_t __le32
3478 #define cpu_to_dma(x) cpu_to_le32(x)
3479 #define dma_to_cpu(x) le32_to_cpu(x)
3480 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3481 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3482 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3483 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3484 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3485 #endif
3486 #define skb_first_frag_len(skb) skb_headlen(skb)
3487 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3488 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3489 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3490 #ifdef VLAN_SUPPORT
3491 #define RxComplType RxComplType3
3492 #else
3493 #define RxComplType RxComplType2
3494 #endif
3495 #ifdef ADDR_64BITS
3496 #define TX_DESC_TYPE TxDescType2
3497 #else
3498 #define TX_DESC_TYPE TxDescType1
3499 #endif
3500 #define TX_DESC_SPACING TxDescSpaceUnlim
3501 #if 0
3502 #endif
3503 #define PHY_CNT 2
3504 #ifdef VLAN_SUPPORT
3505 #endif
3506 #ifdef VLAN_SUPPORT
3507 #endif
3508 #ifdef VLAN_SUPPORT
3509 #endif
3510 #ifndef MODULE
3511 #endif
3512 #ifdef ZEROCOPY
3513 #endif
3514 #ifdef VLAN_SUPPORT
3515 #endif
3516 #ifdef ADDR_64BITS
3517 #endif
3518 #if ! defined(final_version)
3519 #endif
3520 #ifdef VLAN_SUPPORT
3521 #endif
3522 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3523 #endif
3524 #ifndef final_version
3525 #endif
3526 #ifdef VLAN_SUPPORT
3527 #endif
3528 #ifdef VLAN_SUPPORT
3529 #endif
3530 #ifdef VLAN_SUPPORT
3531 #endif
3532 #ifdef VLAN_SUPPORT
3533 #endif
3534 /* LDV_COMMENT_END_PREP */
3535 /* LDV_COMMENT_BEGIN_PREP */
3536 #ifdef CONFIG_PM
3537 #endif
3538 #ifdef CONFIG_PM
3539 #endif
3540 #ifdef MODULE
3541 #endif
3542 /* LDV_COMMENT_END_PREP */
3543 /* content: static u32 get_msglevel(struct net_device *dev)*/
3544 /* LDV_COMMENT_BEGIN_PREP */
3545 #define DRV_NAME "starfire"
3546 #define DRV_VERSION "2.1"
3547 #define DRV_RELDATE "July 6, 2008"
3548 #define HAS_BROKEN_FIRMWARE
3549 #ifdef HAS_BROKEN_FIRMWARE
3550 #define PADDING_MASK 3
3551 #endif
3552 #define ZEROCOPY
3553 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3554 #define VLAN_SUPPORT
3555 #endif
3556 #define PKT_BUF_SZ 1536
3557 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3558 #else
3559 #endif
3560 #ifdef __sparc__
3561 #define DMA_BURST_SIZE 64
3562 #else
3563 #define DMA_BURST_SIZE 128
3564 #endif
3565 #define RX_RING_SIZE 256
3566 #define TX_RING_SIZE 32
3567 #define DONE_Q_SIZE 1024
3568 #define QUEUE_ALIGN 256
3569 #if RX_RING_SIZE > 256
3570 #define RX_Q_ENTRIES Rx2048QEntries
3571 #else
3572 #define RX_Q_ENTRIES Rx256QEntries
3573 #endif
3574 #define TX_TIMEOUT (2 * HZ)
3575 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3576 #define ADDR_64BITS
3577 #define netdrv_addr_t __le64
3578 #define cpu_to_dma(x) cpu_to_le64(x)
3579 #define dma_to_cpu(x) le64_to_cpu(x)
3580 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3581 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3582 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3583 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3584 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3585 #else
3586 #define netdrv_addr_t __le32
3587 #define cpu_to_dma(x) cpu_to_le32(x)
3588 #define dma_to_cpu(x) le32_to_cpu(x)
3589 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3590 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3591 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3592 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3593 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3594 #endif
3595 #define skb_first_frag_len(skb) skb_headlen(skb)
3596 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3597 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3598 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3599 #ifdef VLAN_SUPPORT
3600 #define RxComplType RxComplType3
3601 #else
3602 #define RxComplType RxComplType2
3603 #endif
3604 #ifdef ADDR_64BITS
3605 #define TX_DESC_TYPE TxDescType2
3606 #else
3607 #define TX_DESC_TYPE TxDescType1
3608 #endif
3609 #define TX_DESC_SPACING TxDescSpaceUnlim
3610 #if 0
3611 #endif
3612 #define PHY_CNT 2
3613 #ifdef VLAN_SUPPORT
3614 #endif
3615 #ifdef VLAN_SUPPORT
3616 #endif
3617 #ifdef VLAN_SUPPORT
3618 #endif
3619 #ifndef MODULE
3620 #endif
3621 #ifdef ZEROCOPY
3622 #endif
3623 #ifdef VLAN_SUPPORT
3624 #endif
3625 #ifdef ADDR_64BITS
3626 #endif
3627 #if ! defined(final_version)
3628 #endif
3629 #ifdef VLAN_SUPPORT
3630 #endif
3631 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3632 #endif
3633 #ifndef final_version
3634 #endif
3635 #ifdef VLAN_SUPPORT
3636 #endif
3637 #ifdef VLAN_SUPPORT
3638 #endif
3639 #ifdef VLAN_SUPPORT
3640 #endif
3641 #ifdef VLAN_SUPPORT
3642 #endif
3643 /* LDV_COMMENT_END_PREP */
3644 /* LDV_COMMENT_BEGIN_PREP */
3645 #ifdef CONFIG_PM
3646 #endif
3647 #ifdef CONFIG_PM
3648 #endif
3649 #ifdef MODULE
3650 #endif
3651 /* LDV_COMMENT_END_PREP */
3652 /* content: static void set_msglevel(struct net_device *dev, u32 val)*/
3653 /* LDV_COMMENT_BEGIN_PREP */
3654 #define DRV_NAME "starfire"
3655 #define DRV_VERSION "2.1"
3656 #define DRV_RELDATE "July 6, 2008"
3657 #define HAS_BROKEN_FIRMWARE
3658 #ifdef HAS_BROKEN_FIRMWARE
3659 #define PADDING_MASK 3
3660 #endif
3661 #define ZEROCOPY
3662 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3663 #define VLAN_SUPPORT
3664 #endif
3665 #define PKT_BUF_SZ 1536
3666 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3667 #else
3668 #endif
3669 #ifdef __sparc__
3670 #define DMA_BURST_SIZE 64
3671 #else
3672 #define DMA_BURST_SIZE 128
3673 #endif
3674 #define RX_RING_SIZE 256
3675 #define TX_RING_SIZE 32
3676 #define DONE_Q_SIZE 1024
3677 #define QUEUE_ALIGN 256
3678 #if RX_RING_SIZE > 256
3679 #define RX_Q_ENTRIES Rx2048QEntries
3680 #else
3681 #define RX_Q_ENTRIES Rx256QEntries
3682 #endif
3683 #define TX_TIMEOUT (2 * HZ)
3684 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3685 #define ADDR_64BITS
3686 #define netdrv_addr_t __le64
3687 #define cpu_to_dma(x) cpu_to_le64(x)
3688 #define dma_to_cpu(x) le64_to_cpu(x)
3689 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3690 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3691 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3692 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3693 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3694 #else
3695 #define netdrv_addr_t __le32
3696 #define cpu_to_dma(x) cpu_to_le32(x)
3697 #define dma_to_cpu(x) le32_to_cpu(x)
3698 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3699 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3700 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3701 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3702 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3703 #endif
3704 #define skb_first_frag_len(skb) skb_headlen(skb)
3705 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3706 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3707 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3708 #ifdef VLAN_SUPPORT
3709 #define RxComplType RxComplType3
3710 #else
3711 #define RxComplType RxComplType2
3712 #endif
3713 #ifdef ADDR_64BITS
3714 #define TX_DESC_TYPE TxDescType2
3715 #else
3716 #define TX_DESC_TYPE TxDescType1
3717 #endif
3718 #define TX_DESC_SPACING TxDescSpaceUnlim
3719 #if 0
3720 #endif
3721 #define PHY_CNT 2
3722 #ifdef VLAN_SUPPORT
3723 #endif
3724 #ifdef VLAN_SUPPORT
3725 #endif
3726 #ifdef VLAN_SUPPORT
3727 #endif
3728 #ifndef MODULE
3729 #endif
3730 #ifdef ZEROCOPY
3731 #endif
3732 #ifdef VLAN_SUPPORT
3733 #endif
3734 #ifdef ADDR_64BITS
3735 #endif
3736 #if ! defined(final_version)
3737 #endif
3738 #ifdef VLAN_SUPPORT
3739 #endif
3740 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3741 #endif
3742 #ifndef final_version
3743 #endif
3744 #ifdef VLAN_SUPPORT
3745 #endif
3746 #ifdef VLAN_SUPPORT
3747 #endif
3748 #ifdef VLAN_SUPPORT
3749 #endif
3750 #ifdef VLAN_SUPPORT
3751 #endif
3752 /* LDV_COMMENT_END_PREP */
3753 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "set_msglevel" */
3754 u32 var_set_msglevel_26_p1;
3755 /* LDV_COMMENT_BEGIN_PREP */
3756 #ifdef CONFIG_PM
3757 #endif
3758 #ifdef CONFIG_PM
3759 #endif
3760 #ifdef MODULE
3761 #endif
3762 /* LDV_COMMENT_END_PREP */
3763 /* content: static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd)*/
3764 /* LDV_COMMENT_BEGIN_PREP */
3765 #define DRV_NAME "starfire"
3766 #define DRV_VERSION "2.1"
3767 #define DRV_RELDATE "July 6, 2008"
3768 #define HAS_BROKEN_FIRMWARE
3769 #ifdef HAS_BROKEN_FIRMWARE
3770 #define PADDING_MASK 3
3771 #endif
3772 #define ZEROCOPY
3773 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3774 #define VLAN_SUPPORT
3775 #endif
3776 #define PKT_BUF_SZ 1536
3777 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3778 #else
3779 #endif
3780 #ifdef __sparc__
3781 #define DMA_BURST_SIZE 64
3782 #else
3783 #define DMA_BURST_SIZE 128
3784 #endif
3785 #define RX_RING_SIZE 256
3786 #define TX_RING_SIZE 32
3787 #define DONE_Q_SIZE 1024
3788 #define QUEUE_ALIGN 256
3789 #if RX_RING_SIZE > 256
3790 #define RX_Q_ENTRIES Rx2048QEntries
3791 #else
3792 #define RX_Q_ENTRIES Rx256QEntries
3793 #endif
3794 #define TX_TIMEOUT (2 * HZ)
3795 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3796 #define ADDR_64BITS
3797 #define netdrv_addr_t __le64
3798 #define cpu_to_dma(x) cpu_to_le64(x)
3799 #define dma_to_cpu(x) le64_to_cpu(x)
3800 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3801 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3802 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3803 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3804 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3805 #else
3806 #define netdrv_addr_t __le32
3807 #define cpu_to_dma(x) cpu_to_le32(x)
3808 #define dma_to_cpu(x) le32_to_cpu(x)
3809 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3810 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3811 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3812 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3813 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3814 #endif
3815 #define skb_first_frag_len(skb) skb_headlen(skb)
3816 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3817 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3818 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3819 #ifdef VLAN_SUPPORT
3820 #define RxComplType RxComplType3
3821 #else
3822 #define RxComplType RxComplType2
3823 #endif
3824 #ifdef ADDR_64BITS
3825 #define TX_DESC_TYPE TxDescType2
3826 #else
3827 #define TX_DESC_TYPE TxDescType1
3828 #endif
3829 #define TX_DESC_SPACING TxDescSpaceUnlim
3830 #if 0
3831 #endif
3832 #define PHY_CNT 2
3833 #ifdef VLAN_SUPPORT
3834 #endif
3835 #ifdef VLAN_SUPPORT
3836 #endif
3837 #ifdef VLAN_SUPPORT
3838 #endif
3839 #ifndef MODULE
3840 #endif
3841 #ifdef ZEROCOPY
3842 #endif
3843 #ifdef VLAN_SUPPORT
3844 #endif
3845 #ifdef ADDR_64BITS
3846 #endif
3847 #if ! defined(final_version)
3848 #endif
3849 #ifdef VLAN_SUPPORT
3850 #endif
3851 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3852 #endif
3853 #ifndef final_version
3854 #endif
3855 #ifdef VLAN_SUPPORT
3856 #endif
3857 #ifdef VLAN_SUPPORT
3858 #endif
3859 #ifdef VLAN_SUPPORT
3860 #endif
3861 #ifdef VLAN_SUPPORT
3862 #endif
3863 /* LDV_COMMENT_END_PREP */
3864 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_link_ksettings" */
3865 struct ethtool_link_ksettings * var_group5;
3866 /* LDV_COMMENT_BEGIN_PREP */
3867 #ifdef CONFIG_PM
3868 #endif
3869 #ifdef CONFIG_PM
3870 #endif
3871 #ifdef MODULE
3872 #endif
3873 /* LDV_COMMENT_END_PREP */
3874 /* content: static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd)*/
3875 /* LDV_COMMENT_BEGIN_PREP */
3876 #define DRV_NAME "starfire"
3877 #define DRV_VERSION "2.1"
3878 #define DRV_RELDATE "July 6, 2008"
3879 #define HAS_BROKEN_FIRMWARE
3880 #ifdef HAS_BROKEN_FIRMWARE
3881 #define PADDING_MASK 3
3882 #endif
3883 #define ZEROCOPY
3884 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3885 #define VLAN_SUPPORT
3886 #endif
3887 #define PKT_BUF_SZ 1536
3888 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3889 #else
3890 #endif
3891 #ifdef __sparc__
3892 #define DMA_BURST_SIZE 64
3893 #else
3894 #define DMA_BURST_SIZE 128
3895 #endif
3896 #define RX_RING_SIZE 256
3897 #define TX_RING_SIZE 32
3898 #define DONE_Q_SIZE 1024
3899 #define QUEUE_ALIGN 256
3900 #if RX_RING_SIZE > 256
3901 #define RX_Q_ENTRIES Rx2048QEntries
3902 #else
3903 #define RX_Q_ENTRIES Rx256QEntries
3904 #endif
3905 #define TX_TIMEOUT (2 * HZ)
3906 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3907 #define ADDR_64BITS
3908 #define netdrv_addr_t __le64
3909 #define cpu_to_dma(x) cpu_to_le64(x)
3910 #define dma_to_cpu(x) le64_to_cpu(x)
3911 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3912 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3913 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3914 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3915 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3916 #else
3917 #define netdrv_addr_t __le32
3918 #define cpu_to_dma(x) cpu_to_le32(x)
3919 #define dma_to_cpu(x) le32_to_cpu(x)
3920 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3921 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3922 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3923 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3924 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3925 #endif
3926 #define skb_first_frag_len(skb) skb_headlen(skb)
3927 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3928 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3929 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3930 #ifdef VLAN_SUPPORT
3931 #define RxComplType RxComplType3
3932 #else
3933 #define RxComplType RxComplType2
3934 #endif
3935 #ifdef ADDR_64BITS
3936 #define TX_DESC_TYPE TxDescType2
3937 #else
3938 #define TX_DESC_TYPE TxDescType1
3939 #endif
3940 #define TX_DESC_SPACING TxDescSpaceUnlim
3941 #if 0
3942 #endif
3943 #define PHY_CNT 2
3944 #ifdef VLAN_SUPPORT
3945 #endif
3946 #ifdef VLAN_SUPPORT
3947 #endif
3948 #ifdef VLAN_SUPPORT
3949 #endif
3950 #ifndef MODULE
3951 #endif
3952 #ifdef ZEROCOPY
3953 #endif
3954 #ifdef VLAN_SUPPORT
3955 #endif
3956 #ifdef ADDR_64BITS
3957 #endif
3958 #if ! defined(final_version)
3959 #endif
3960 #ifdef VLAN_SUPPORT
3961 #endif
3962 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3963 #endif
3964 #ifndef final_version
3965 #endif
3966 #ifdef VLAN_SUPPORT
3967 #endif
3968 #ifdef VLAN_SUPPORT
3969 #endif
3970 #ifdef VLAN_SUPPORT
3971 #endif
3972 #ifdef VLAN_SUPPORT
3973 #endif
3974 /* LDV_COMMENT_END_PREP */
3975 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "set_link_ksettings" */
3976 const struct ethtool_link_ksettings * var_set_link_ksettings_22_p1;
3977 /* LDV_COMMENT_BEGIN_PREP */
3978 #ifdef CONFIG_PM
3979 #endif
3980 #ifdef CONFIG_PM
3981 #endif
3982 #ifdef MODULE
3983 #endif
3984 /* LDV_COMMENT_END_PREP */
3985
3986 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
3987 /* content: static int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
3988 /* LDV_COMMENT_BEGIN_PREP */
3989 #define DRV_NAME "starfire"
3990 #define DRV_VERSION "2.1"
3991 #define DRV_RELDATE "July 6, 2008"
3992 #define HAS_BROKEN_FIRMWARE
3993 #ifdef HAS_BROKEN_FIRMWARE
3994 #define PADDING_MASK 3
3995 #endif
3996 #define ZEROCOPY
3997 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3998 #define VLAN_SUPPORT
3999 #endif
4000 #define PKT_BUF_SZ 1536
4001 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4002 #else
4003 #endif
4004 #ifdef __sparc__
4005 #define DMA_BURST_SIZE 64
4006 #else
4007 #define DMA_BURST_SIZE 128
4008 #endif
4009 #define RX_RING_SIZE 256
4010 #define TX_RING_SIZE 32
4011 #define DONE_Q_SIZE 1024
4012 #define QUEUE_ALIGN 256
4013 #if RX_RING_SIZE > 256
4014 #define RX_Q_ENTRIES Rx2048QEntries
4015 #else
4016 #define RX_Q_ENTRIES Rx256QEntries
4017 #endif
4018 #define TX_TIMEOUT (2 * HZ)
4019 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4020 #define ADDR_64BITS
4021 #define netdrv_addr_t __le64
4022 #define cpu_to_dma(x) cpu_to_le64(x)
4023 #define dma_to_cpu(x) le64_to_cpu(x)
4024 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4025 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4026 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4027 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4028 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4029 #else
4030 #define netdrv_addr_t __le32
4031 #define cpu_to_dma(x) cpu_to_le32(x)
4032 #define dma_to_cpu(x) le32_to_cpu(x)
4033 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4034 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4035 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4036 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4037 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4038 #endif
4039 #define skb_first_frag_len(skb) skb_headlen(skb)
4040 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4041 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4042 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4043 #ifdef VLAN_SUPPORT
4044 #define RxComplType RxComplType3
4045 #else
4046 #define RxComplType RxComplType2
4047 #endif
4048 #ifdef ADDR_64BITS
4049 #define TX_DESC_TYPE TxDescType2
4050 #else
4051 #define TX_DESC_TYPE TxDescType1
4052 #endif
4053 #define TX_DESC_SPACING TxDescSpaceUnlim
4054 #if 0
4055 #endif
4056 #define PHY_CNT 2
4057 #ifdef VLAN_SUPPORT
4058 #endif
4059 #ifdef VLAN_SUPPORT
4060 #endif
4061 #ifdef VLAN_SUPPORT
4062 #endif
4063 /* LDV_COMMENT_END_PREP */
4064 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_init_one" */
4065 struct pci_dev * var_group6;
4066 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_init_one" */
4067 const struct pci_device_id * var_starfire_init_one_2_p1;
4068 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "starfire_init_one" */
4069 static int res_starfire_init_one_2;
4070 /* LDV_COMMENT_BEGIN_PREP */
4071 #ifdef VLAN_SUPPORT
4072 #endif
4073 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4074 #endif
4075 #ifndef final_version
4076 #endif
4077 #ifdef VLAN_SUPPORT
4078 #endif
4079 #ifdef VLAN_SUPPORT
4080 #endif
4081 #ifdef VLAN_SUPPORT
4082 #endif
4083 #ifdef VLAN_SUPPORT
4084 #endif
4085 #ifdef CONFIG_PM
4086 #endif
4087 #ifdef CONFIG_PM
4088 #endif
4089 #ifdef MODULE
4090 #endif
4091 /* LDV_COMMENT_END_PREP */
4092 /* content: static void starfire_remove_one(struct pci_dev *pdev)*/
4093 /* LDV_COMMENT_BEGIN_PREP */
4094 #define DRV_NAME "starfire"
4095 #define DRV_VERSION "2.1"
4096 #define DRV_RELDATE "July 6, 2008"
4097 #define HAS_BROKEN_FIRMWARE
4098 #ifdef HAS_BROKEN_FIRMWARE
4099 #define PADDING_MASK 3
4100 #endif
4101 #define ZEROCOPY
4102 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4103 #define VLAN_SUPPORT
4104 #endif
4105 #define PKT_BUF_SZ 1536
4106 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4107 #else
4108 #endif
4109 #ifdef __sparc__
4110 #define DMA_BURST_SIZE 64
4111 #else
4112 #define DMA_BURST_SIZE 128
4113 #endif
4114 #define RX_RING_SIZE 256
4115 #define TX_RING_SIZE 32
4116 #define DONE_Q_SIZE 1024
4117 #define QUEUE_ALIGN 256
4118 #if RX_RING_SIZE > 256
4119 #define RX_Q_ENTRIES Rx2048QEntries
4120 #else
4121 #define RX_Q_ENTRIES Rx256QEntries
4122 #endif
4123 #define TX_TIMEOUT (2 * HZ)
4124 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4125 #define ADDR_64BITS
4126 #define netdrv_addr_t __le64
4127 #define cpu_to_dma(x) cpu_to_le64(x)
4128 #define dma_to_cpu(x) le64_to_cpu(x)
4129 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4130 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4131 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4132 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4133 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4134 #else
4135 #define netdrv_addr_t __le32
4136 #define cpu_to_dma(x) cpu_to_le32(x)
4137 #define dma_to_cpu(x) le32_to_cpu(x)
4138 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4139 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4140 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4141 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4142 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4143 #endif
4144 #define skb_first_frag_len(skb) skb_headlen(skb)
4145 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4146 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4147 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4148 #ifdef VLAN_SUPPORT
4149 #define RxComplType RxComplType3
4150 #else
4151 #define RxComplType RxComplType2
4152 #endif
4153 #ifdef ADDR_64BITS
4154 #define TX_DESC_TYPE TxDescType2
4155 #else
4156 #define TX_DESC_TYPE TxDescType1
4157 #endif
4158 #define TX_DESC_SPACING TxDescSpaceUnlim
4159 #if 0
4160 #endif
4161 #define PHY_CNT 2
4162 #ifdef VLAN_SUPPORT
4163 #endif
4164 #ifdef VLAN_SUPPORT
4165 #endif
4166 #ifdef VLAN_SUPPORT
4167 #endif
4168 #ifndef MODULE
4169 #endif
4170 #ifdef ZEROCOPY
4171 #endif
4172 #ifdef VLAN_SUPPORT
4173 #endif
4174 #ifdef ADDR_64BITS
4175 #endif
4176 #if ! defined(final_version)
4177 #endif
4178 #ifdef VLAN_SUPPORT
4179 #endif
4180 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4181 #endif
4182 #ifndef final_version
4183 #endif
4184 #ifdef VLAN_SUPPORT
4185 #endif
4186 #ifdef VLAN_SUPPORT
4187 #endif
4188 #ifdef VLAN_SUPPORT
4189 #endif
4190 #ifdef VLAN_SUPPORT
4191 #endif
4192 #ifdef CONFIG_PM
4193 #endif
4194 /* LDV_COMMENT_END_PREP */
4195 /* LDV_COMMENT_BEGIN_PREP */
4196 #ifdef CONFIG_PM
4197 #endif
4198 #ifdef MODULE
4199 #endif
4200 /* LDV_COMMENT_END_PREP */
4201 /* content: static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)*/
4202 /* LDV_COMMENT_BEGIN_PREP */
4203 #define DRV_NAME "starfire"
4204 #define DRV_VERSION "2.1"
4205 #define DRV_RELDATE "July 6, 2008"
4206 #define HAS_BROKEN_FIRMWARE
4207 #ifdef HAS_BROKEN_FIRMWARE
4208 #define PADDING_MASK 3
4209 #endif
4210 #define ZEROCOPY
4211 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4212 #define VLAN_SUPPORT
4213 #endif
4214 #define PKT_BUF_SZ 1536
4215 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4216 #else
4217 #endif
4218 #ifdef __sparc__
4219 #define DMA_BURST_SIZE 64
4220 #else
4221 #define DMA_BURST_SIZE 128
4222 #endif
4223 #define RX_RING_SIZE 256
4224 #define TX_RING_SIZE 32
4225 #define DONE_Q_SIZE 1024
4226 #define QUEUE_ALIGN 256
4227 #if RX_RING_SIZE > 256
4228 #define RX_Q_ENTRIES Rx2048QEntries
4229 #else
4230 #define RX_Q_ENTRIES Rx256QEntries
4231 #endif
4232 #define TX_TIMEOUT (2 * HZ)
4233 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4234 #define ADDR_64BITS
4235 #define netdrv_addr_t __le64
4236 #define cpu_to_dma(x) cpu_to_le64(x)
4237 #define dma_to_cpu(x) le64_to_cpu(x)
4238 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4239 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4240 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4241 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4242 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4243 #else
4244 #define netdrv_addr_t __le32
4245 #define cpu_to_dma(x) cpu_to_le32(x)
4246 #define dma_to_cpu(x) le32_to_cpu(x)
4247 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4248 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4249 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4250 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4251 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4252 #endif
4253 #define skb_first_frag_len(skb) skb_headlen(skb)
4254 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4255 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4256 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4257 #ifdef VLAN_SUPPORT
4258 #define RxComplType RxComplType3
4259 #else
4260 #define RxComplType RxComplType2
4261 #endif
4262 #ifdef ADDR_64BITS
4263 #define TX_DESC_TYPE TxDescType2
4264 #else
4265 #define TX_DESC_TYPE TxDescType1
4266 #endif
4267 #define TX_DESC_SPACING TxDescSpaceUnlim
4268 #if 0
4269 #endif
4270 #define PHY_CNT 2
4271 #ifdef VLAN_SUPPORT
4272 #endif
4273 #ifdef VLAN_SUPPORT
4274 #endif
4275 #ifdef VLAN_SUPPORT
4276 #endif
4277 #ifndef MODULE
4278 #endif
4279 #ifdef ZEROCOPY
4280 #endif
4281 #ifdef VLAN_SUPPORT
4282 #endif
4283 #ifdef ADDR_64BITS
4284 #endif
4285 #if ! defined(final_version)
4286 #endif
4287 #ifdef VLAN_SUPPORT
4288 #endif
4289 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4290 #endif
4291 #ifndef final_version
4292 #endif
4293 #ifdef VLAN_SUPPORT
4294 #endif
4295 #ifdef VLAN_SUPPORT
4296 #endif
4297 #ifdef VLAN_SUPPORT
4298 #endif
4299 #ifdef VLAN_SUPPORT
4300 #endif
4301 #ifdef CONFIG_PM
4302 /* LDV_COMMENT_END_PREP */
4303 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_suspend" */
4304 pm_message_t var_starfire_suspend_29_p1;
4305 /* LDV_COMMENT_BEGIN_PREP */
4306 #endif
4307 #ifdef CONFIG_PM
4308 #endif
4309 #ifdef MODULE
4310 #endif
4311 /* LDV_COMMENT_END_PREP */
4312 /* content: static int starfire_resume(struct pci_dev *pdev)*/
4313 /* LDV_COMMENT_BEGIN_PREP */
4314 #define DRV_NAME "starfire"
4315 #define DRV_VERSION "2.1"
4316 #define DRV_RELDATE "July 6, 2008"
4317 #define HAS_BROKEN_FIRMWARE
4318 #ifdef HAS_BROKEN_FIRMWARE
4319 #define PADDING_MASK 3
4320 #endif
4321 #define ZEROCOPY
4322 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4323 #define VLAN_SUPPORT
4324 #endif
4325 #define PKT_BUF_SZ 1536
4326 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4327 #else
4328 #endif
4329 #ifdef __sparc__
4330 #define DMA_BURST_SIZE 64
4331 #else
4332 #define DMA_BURST_SIZE 128
4333 #endif
4334 #define RX_RING_SIZE 256
4335 #define TX_RING_SIZE 32
4336 #define DONE_Q_SIZE 1024
4337 #define QUEUE_ALIGN 256
4338 #if RX_RING_SIZE > 256
4339 #define RX_Q_ENTRIES Rx2048QEntries
4340 #else
4341 #define RX_Q_ENTRIES Rx256QEntries
4342 #endif
4343 #define TX_TIMEOUT (2 * HZ)
4344 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4345 #define ADDR_64BITS
4346 #define netdrv_addr_t __le64
4347 #define cpu_to_dma(x) cpu_to_le64(x)
4348 #define dma_to_cpu(x) le64_to_cpu(x)
4349 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4350 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4351 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4352 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4353 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4354 #else
4355 #define netdrv_addr_t __le32
4356 #define cpu_to_dma(x) cpu_to_le32(x)
4357 #define dma_to_cpu(x) le32_to_cpu(x)
4358 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4359 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4360 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4361 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4362 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4363 #endif
4364 #define skb_first_frag_len(skb) skb_headlen(skb)
4365 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4366 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4367 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4368 #ifdef VLAN_SUPPORT
4369 #define RxComplType RxComplType3
4370 #else
4371 #define RxComplType RxComplType2
4372 #endif
4373 #ifdef ADDR_64BITS
4374 #define TX_DESC_TYPE TxDescType2
4375 #else
4376 #define TX_DESC_TYPE TxDescType1
4377 #endif
4378 #define TX_DESC_SPACING TxDescSpaceUnlim
4379 #if 0
4380 #endif
4381 #define PHY_CNT 2
4382 #ifdef VLAN_SUPPORT
4383 #endif
4384 #ifdef VLAN_SUPPORT
4385 #endif
4386 #ifdef VLAN_SUPPORT
4387 #endif
4388 #ifndef MODULE
4389 #endif
4390 #ifdef ZEROCOPY
4391 #endif
4392 #ifdef VLAN_SUPPORT
4393 #endif
4394 #ifdef ADDR_64BITS
4395 #endif
4396 #if ! defined(final_version)
4397 #endif
4398 #ifdef VLAN_SUPPORT
4399 #endif
4400 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4401 #endif
4402 #ifndef final_version
4403 #endif
4404 #ifdef VLAN_SUPPORT
4405 #endif
4406 #ifdef VLAN_SUPPORT
4407 #endif
4408 #ifdef VLAN_SUPPORT
4409 #endif
4410 #ifdef VLAN_SUPPORT
4411 #endif
4412 #ifdef CONFIG_PM
4413 /* LDV_COMMENT_END_PREP */
4414 /* LDV_COMMENT_BEGIN_PREP */
4415 #endif
4416 #ifdef CONFIG_PM
4417 #endif
4418 #ifdef MODULE
4419 #endif
4420 /* LDV_COMMENT_END_PREP */
4421
4422 /** CALLBACK SECTION request_irq **/
4423 /* content: static irqreturn_t intr_handler(int irq, void *dev_instance)*/
4424 /* LDV_COMMENT_BEGIN_PREP */
4425 #define DRV_NAME "starfire"
4426 #define DRV_VERSION "2.1"
4427 #define DRV_RELDATE "July 6, 2008"
4428 #define HAS_BROKEN_FIRMWARE
4429 #ifdef HAS_BROKEN_FIRMWARE
4430 #define PADDING_MASK 3
4431 #endif
4432 #define ZEROCOPY
4433 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4434 #define VLAN_SUPPORT
4435 #endif
4436 #define PKT_BUF_SZ 1536
4437 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4438 #else
4439 #endif
4440 #ifdef __sparc__
4441 #define DMA_BURST_SIZE 64
4442 #else
4443 #define DMA_BURST_SIZE 128
4444 #endif
4445 #define RX_RING_SIZE 256
4446 #define TX_RING_SIZE 32
4447 #define DONE_Q_SIZE 1024
4448 #define QUEUE_ALIGN 256
4449 #if RX_RING_SIZE > 256
4450 #define RX_Q_ENTRIES Rx2048QEntries
4451 #else
4452 #define RX_Q_ENTRIES Rx256QEntries
4453 #endif
4454 #define TX_TIMEOUT (2 * HZ)
4455 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4456 #define ADDR_64BITS
4457 #define netdrv_addr_t __le64
4458 #define cpu_to_dma(x) cpu_to_le64(x)
4459 #define dma_to_cpu(x) le64_to_cpu(x)
4460 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4461 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4462 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4463 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4464 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4465 #else
4466 #define netdrv_addr_t __le32
4467 #define cpu_to_dma(x) cpu_to_le32(x)
4468 #define dma_to_cpu(x) le32_to_cpu(x)
4469 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4470 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4471 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4472 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4473 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4474 #endif
4475 #define skb_first_frag_len(skb) skb_headlen(skb)
4476 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4477 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4478 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4479 #ifdef VLAN_SUPPORT
4480 #define RxComplType RxComplType3
4481 #else
4482 #define RxComplType RxComplType2
4483 #endif
4484 #ifdef ADDR_64BITS
4485 #define TX_DESC_TYPE TxDescType2
4486 #else
4487 #define TX_DESC_TYPE TxDescType1
4488 #endif
4489 #define TX_DESC_SPACING TxDescSpaceUnlim
4490 #if 0
4491 #endif
4492 #define PHY_CNT 2
4493 #ifdef VLAN_SUPPORT
4494 #endif
4495 #ifdef VLAN_SUPPORT
4496 #endif
4497 #ifdef VLAN_SUPPORT
4498 #endif
4499 #ifndef MODULE
4500 #endif
4501 #ifdef ZEROCOPY
4502 #endif
4503 #ifdef VLAN_SUPPORT
4504 #endif
4505 #ifdef ADDR_64BITS
4506 #endif
4507 #if ! defined(final_version)
4508 #endif
4509 #ifdef VLAN_SUPPORT
4510 #endif
4511 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4512 #endif
4513 /* LDV_COMMENT_END_PREP */
4514 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "intr_handler" */
4515 int var_intr_handler_10_p0;
4516 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "intr_handler" */
4517 void * var_intr_handler_10_p1;
4518 /* LDV_COMMENT_BEGIN_PREP */
4519 #ifndef final_version
4520 #endif
4521 #ifdef VLAN_SUPPORT
4522 #endif
4523 #ifdef VLAN_SUPPORT
4524 #endif
4525 #ifdef VLAN_SUPPORT
4526 #endif
4527 #ifdef VLAN_SUPPORT
4528 #endif
4529 #ifdef CONFIG_PM
4530 #endif
4531 #ifdef CONFIG_PM
4532 #endif
4533 #ifdef MODULE
4534 #endif
4535 /* LDV_COMMENT_END_PREP */
4536
4537
4538
4539
4540 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
4541 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
4542 /*============================= VARIABLE INITIALIZING PART =============================*/
4543 LDV_IN_INTERRUPT=1;
4544
4545
4546
4547
4548 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
4549 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
4550 /*============================= FUNCTION CALL SECTION =============================*/
4551 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
4552 ldv_initialize();
4553
4554 /** INIT: init_type: ST_MODULE_INIT **/
4555 /* content: static int __init starfire_init (void)*/
4556 /* LDV_COMMENT_BEGIN_PREP */
4557 #define DRV_NAME "starfire"
4558 #define DRV_VERSION "2.1"
4559 #define DRV_RELDATE "July 6, 2008"
4560 #define HAS_BROKEN_FIRMWARE
4561 #ifdef HAS_BROKEN_FIRMWARE
4562 #define PADDING_MASK 3
4563 #endif
4564 #define ZEROCOPY
4565 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4566 #define VLAN_SUPPORT
4567 #endif
4568 #define PKT_BUF_SZ 1536
4569 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4570 #else
4571 #endif
4572 #ifdef __sparc__
4573 #define DMA_BURST_SIZE 64
4574 #else
4575 #define DMA_BURST_SIZE 128
4576 #endif
4577 #define RX_RING_SIZE 256
4578 #define TX_RING_SIZE 32
4579 #define DONE_Q_SIZE 1024
4580 #define QUEUE_ALIGN 256
4581 #if RX_RING_SIZE > 256
4582 #define RX_Q_ENTRIES Rx2048QEntries
4583 #else
4584 #define RX_Q_ENTRIES Rx256QEntries
4585 #endif
4586 #define TX_TIMEOUT (2 * HZ)
4587 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4588 #define ADDR_64BITS
4589 #define netdrv_addr_t __le64
4590 #define cpu_to_dma(x) cpu_to_le64(x)
4591 #define dma_to_cpu(x) le64_to_cpu(x)
4592 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4593 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4594 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4595 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4596 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4597 #else
4598 #define netdrv_addr_t __le32
4599 #define cpu_to_dma(x) cpu_to_le32(x)
4600 #define dma_to_cpu(x) le32_to_cpu(x)
4601 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4602 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4603 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4604 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4605 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4606 #endif
4607 #define skb_first_frag_len(skb) skb_headlen(skb)
4608 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4609 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4610 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4611 #ifdef VLAN_SUPPORT
4612 #define RxComplType RxComplType3
4613 #else
4614 #define RxComplType RxComplType2
4615 #endif
4616 #ifdef ADDR_64BITS
4617 #define TX_DESC_TYPE TxDescType2
4618 #else
4619 #define TX_DESC_TYPE TxDescType1
4620 #endif
4621 #define TX_DESC_SPACING TxDescSpaceUnlim
4622 #if 0
4623 #endif
4624 #define PHY_CNT 2
4625 #ifdef VLAN_SUPPORT
4626 #endif
4627 #ifdef VLAN_SUPPORT
4628 #endif
4629 #ifdef VLAN_SUPPORT
4630 #endif
4631 #ifndef MODULE
4632 #endif
4633 #ifdef ZEROCOPY
4634 #endif
4635 #ifdef VLAN_SUPPORT
4636 #endif
4637 #ifdef ADDR_64BITS
4638 #endif
4639 #if ! defined(final_version)
4640 #endif
4641 #ifdef VLAN_SUPPORT
4642 #endif
4643 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4644 #endif
4645 #ifndef final_version
4646 #endif
4647 #ifdef VLAN_SUPPORT
4648 #endif
4649 #ifdef VLAN_SUPPORT
4650 #endif
4651 #ifdef VLAN_SUPPORT
4652 #endif
4653 #ifdef VLAN_SUPPORT
4654 #endif
4655 #ifdef CONFIG_PM
4656 #endif
4657 #ifdef CONFIG_PM
4658 #endif
4659 /* LDV_COMMENT_END_PREP */
4660 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
4661 ldv_handler_precall();
4662 if(starfire_init())
4663 goto ldv_final;
4664 int ldv_s_netdev_ops_net_device_ops = 0;
4665
4666
4667
4668
4669 int ldv_s_starfire_driver_pci_driver = 0;
4670
4671
4672
4673
4674
4675 while( nondet_int()
4676 || !(ldv_s_netdev_ops_net_device_ops == 0)
4677 || !(ldv_s_starfire_driver_pci_driver == 0)
4678 ) {
4679
4680 switch(nondet_int()) {
4681
4682 case 0: {
4683
4684 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
4685 if(ldv_s_netdev_ops_net_device_ops==0) {
4686
4687 /* content: static int netdev_open(struct net_device *dev)*/
4688 /* LDV_COMMENT_BEGIN_PREP */
4689 #define DRV_NAME "starfire"
4690 #define DRV_VERSION "2.1"
4691 #define DRV_RELDATE "July 6, 2008"
4692 #define HAS_BROKEN_FIRMWARE
4693 #ifdef HAS_BROKEN_FIRMWARE
4694 #define PADDING_MASK 3
4695 #endif
4696 #define ZEROCOPY
4697 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4698 #define VLAN_SUPPORT
4699 #endif
4700 #define PKT_BUF_SZ 1536
4701 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4702 #else
4703 #endif
4704 #ifdef __sparc__
4705 #define DMA_BURST_SIZE 64
4706 #else
4707 #define DMA_BURST_SIZE 128
4708 #endif
4709 #define RX_RING_SIZE 256
4710 #define TX_RING_SIZE 32
4711 #define DONE_Q_SIZE 1024
4712 #define QUEUE_ALIGN 256
4713 #if RX_RING_SIZE > 256
4714 #define RX_Q_ENTRIES Rx2048QEntries
4715 #else
4716 #define RX_Q_ENTRIES Rx256QEntries
4717 #endif
4718 #define TX_TIMEOUT (2 * HZ)
4719 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4720 #define ADDR_64BITS
4721 #define netdrv_addr_t __le64
4722 #define cpu_to_dma(x) cpu_to_le64(x)
4723 #define dma_to_cpu(x) le64_to_cpu(x)
4724 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4725 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4726 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4727 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4728 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4729 #else
4730 #define netdrv_addr_t __le32
4731 #define cpu_to_dma(x) cpu_to_le32(x)
4732 #define dma_to_cpu(x) le32_to_cpu(x)
4733 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4734 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4735 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4736 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4737 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4738 #endif
4739 #define skb_first_frag_len(skb) skb_headlen(skb)
4740 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4741 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4742 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4743 #ifdef VLAN_SUPPORT
4744 #define RxComplType RxComplType3
4745 #else
4746 #define RxComplType RxComplType2
4747 #endif
4748 #ifdef ADDR_64BITS
4749 #define TX_DESC_TYPE TxDescType2
4750 #else
4751 #define TX_DESC_TYPE TxDescType1
4752 #endif
4753 #define TX_DESC_SPACING TxDescSpaceUnlim
4754 #if 0
4755 #endif
4756 #define PHY_CNT 2
4757 #ifdef VLAN_SUPPORT
4758 #endif
4759 #ifdef VLAN_SUPPORT
4760 #endif
4761 #ifdef VLAN_SUPPORT
4762 #endif
4763 #ifndef MODULE
4764 #endif
4765 #ifdef ZEROCOPY
4766 #endif
4767 #ifdef VLAN_SUPPORT
4768 #endif
4769 #ifdef ADDR_64BITS
4770 #endif
4771 #if ! defined(final_version)
4772 #endif
4773 /* LDV_COMMENT_END_PREP */
4774 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "netdev_ops". Standart function test for correct return result. */
4775 ldv_handler_precall();
4776 res_netdev_open_5 = netdev_open( var_group1);
4777 ldv_check_return_value(res_netdev_open_5);
4778 if(res_netdev_open_5 < 0)
4779 goto ldv_module_exit;
4780 /* LDV_COMMENT_BEGIN_PREP */
4781 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4782 #endif
4783 #ifndef final_version
4784 #endif
4785 #ifdef VLAN_SUPPORT
4786 #endif
4787 #ifdef VLAN_SUPPORT
4788 #endif
4789 #ifdef VLAN_SUPPORT
4790 #endif
4791 #ifdef VLAN_SUPPORT
4792 #endif
4793 #ifdef CONFIG_PM
4794 #endif
4795 #ifdef CONFIG_PM
4796 #endif
4797 #ifdef MODULE
4798 #endif
4799 /* LDV_COMMENT_END_PREP */
4800 ldv_s_netdev_ops_net_device_ops++;
4801
4802 }
4803
4804 }
4805
4806 break;
4807 case 1: {
4808
4809 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
4810 if(ldv_s_netdev_ops_net_device_ops==1) {
4811
4812 /* content: static int netdev_close(struct net_device *dev)*/
4813 /* LDV_COMMENT_BEGIN_PREP */
4814 #define DRV_NAME "starfire"
4815 #define DRV_VERSION "2.1"
4816 #define DRV_RELDATE "July 6, 2008"
4817 #define HAS_BROKEN_FIRMWARE
4818 #ifdef HAS_BROKEN_FIRMWARE
4819 #define PADDING_MASK 3
4820 #endif
4821 #define ZEROCOPY
4822 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4823 #define VLAN_SUPPORT
4824 #endif
4825 #define PKT_BUF_SZ 1536
4826 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4827 #else
4828 #endif
4829 #ifdef __sparc__
4830 #define DMA_BURST_SIZE 64
4831 #else
4832 #define DMA_BURST_SIZE 128
4833 #endif
4834 #define RX_RING_SIZE 256
4835 #define TX_RING_SIZE 32
4836 #define DONE_Q_SIZE 1024
4837 #define QUEUE_ALIGN 256
4838 #if RX_RING_SIZE > 256
4839 #define RX_Q_ENTRIES Rx2048QEntries
4840 #else
4841 #define RX_Q_ENTRIES Rx256QEntries
4842 #endif
4843 #define TX_TIMEOUT (2 * HZ)
4844 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4845 #define ADDR_64BITS
4846 #define netdrv_addr_t __le64
4847 #define cpu_to_dma(x) cpu_to_le64(x)
4848 #define dma_to_cpu(x) le64_to_cpu(x)
4849 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4850 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4851 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4852 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4853 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4854 #else
4855 #define netdrv_addr_t __le32
4856 #define cpu_to_dma(x) cpu_to_le32(x)
4857 #define dma_to_cpu(x) le32_to_cpu(x)
4858 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4859 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4860 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4861 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4862 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4863 #endif
4864 #define skb_first_frag_len(skb) skb_headlen(skb)
4865 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4866 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4867 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4868 #ifdef VLAN_SUPPORT
4869 #define RxComplType RxComplType3
4870 #else
4871 #define RxComplType RxComplType2
4872 #endif
4873 #ifdef ADDR_64BITS
4874 #define TX_DESC_TYPE TxDescType2
4875 #else
4876 #define TX_DESC_TYPE TxDescType1
4877 #endif
4878 #define TX_DESC_SPACING TxDescSpaceUnlim
4879 #if 0
4880 #endif
4881 #define PHY_CNT 2
4882 #ifdef VLAN_SUPPORT
4883 #endif
4884 #ifdef VLAN_SUPPORT
4885 #endif
4886 #ifdef VLAN_SUPPORT
4887 #endif
4888 #ifndef MODULE
4889 #endif
4890 #ifdef ZEROCOPY
4891 #endif
4892 #ifdef VLAN_SUPPORT
4893 #endif
4894 #ifdef ADDR_64BITS
4895 #endif
4896 #if ! defined(final_version)
4897 #endif
4898 #ifdef VLAN_SUPPORT
4899 #endif
4900 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4901 #endif
4902 #ifndef final_version
4903 #endif
4904 #ifdef VLAN_SUPPORT
4905 #endif
4906 #ifdef VLAN_SUPPORT
4907 #endif
4908 #ifdef VLAN_SUPPORT
4909 #endif
4910 #ifdef VLAN_SUPPORT
4911 #endif
4912 /* LDV_COMMENT_END_PREP */
4913 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "netdev_ops". Standart function test for correct return result. */
4914 ldv_handler_precall();
4915 res_netdev_close_28 = netdev_close( var_group1);
4916 ldv_check_return_value(res_netdev_close_28);
4917 if(res_netdev_close_28)
4918 goto ldv_module_exit;
4919 /* LDV_COMMENT_BEGIN_PREP */
4920 #ifdef CONFIG_PM
4921 #endif
4922 #ifdef CONFIG_PM
4923 #endif
4924 #ifdef MODULE
4925 #endif
4926 /* LDV_COMMENT_END_PREP */
4927 ldv_s_netdev_ops_net_device_ops=0;
4928
4929 }
4930
4931 }
4932
4933 break;
4934 case 2: {
4935
4936 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
4937
4938
4939 /* content: static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)*/
4940 /* LDV_COMMENT_BEGIN_PREP */
4941 #define DRV_NAME "starfire"
4942 #define DRV_VERSION "2.1"
4943 #define DRV_RELDATE "July 6, 2008"
4944 #define HAS_BROKEN_FIRMWARE
4945 #ifdef HAS_BROKEN_FIRMWARE
4946 #define PADDING_MASK 3
4947 #endif
4948 #define ZEROCOPY
4949 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4950 #define VLAN_SUPPORT
4951 #endif
4952 #define PKT_BUF_SZ 1536
4953 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4954 #else
4955 #endif
4956 #ifdef __sparc__
4957 #define DMA_BURST_SIZE 64
4958 #else
4959 #define DMA_BURST_SIZE 128
4960 #endif
4961 #define RX_RING_SIZE 256
4962 #define TX_RING_SIZE 32
4963 #define DONE_Q_SIZE 1024
4964 #define QUEUE_ALIGN 256
4965 #if RX_RING_SIZE > 256
4966 #define RX_Q_ENTRIES Rx2048QEntries
4967 #else
4968 #define RX_Q_ENTRIES Rx256QEntries
4969 #endif
4970 #define TX_TIMEOUT (2 * HZ)
4971 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4972 #define ADDR_64BITS
4973 #define netdrv_addr_t __le64
4974 #define cpu_to_dma(x) cpu_to_le64(x)
4975 #define dma_to_cpu(x) le64_to_cpu(x)
4976 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4977 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4978 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4979 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4980 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4981 #else
4982 #define netdrv_addr_t __le32
4983 #define cpu_to_dma(x) cpu_to_le32(x)
4984 #define dma_to_cpu(x) le32_to_cpu(x)
4985 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4986 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4987 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4988 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4989 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4990 #endif
4991 #define skb_first_frag_len(skb) skb_headlen(skb)
4992 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4993 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4994 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4995 #ifdef VLAN_SUPPORT
4996 #define RxComplType RxComplType3
4997 #else
4998 #define RxComplType RxComplType2
4999 #endif
5000 #ifdef ADDR_64BITS
5001 #define TX_DESC_TYPE TxDescType2
5002 #else
5003 #define TX_DESC_TYPE TxDescType1
5004 #endif
5005 #define TX_DESC_SPACING TxDescSpaceUnlim
5006 #if 0
5007 #endif
5008 #define PHY_CNT 2
5009 #ifdef VLAN_SUPPORT
5010 #endif
5011 #ifdef VLAN_SUPPORT
5012 #endif
5013 #ifdef VLAN_SUPPORT
5014 #endif
5015 #ifndef MODULE
5016 #endif
5017 #ifdef ZEROCOPY
5018 #endif
5019 #ifdef VLAN_SUPPORT
5020 #endif
5021 #ifdef ADDR_64BITS
5022 #endif
5023 #if ! defined(final_version)
5024 #endif
5025 #ifdef VLAN_SUPPORT
5026 #endif
5027 /* LDV_COMMENT_END_PREP */
5028 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "netdev_ops" */
5029 ldv_handler_precall();
5030 start_tx( var_group2, var_group1);
5031 /* LDV_COMMENT_BEGIN_PREP */
5032 #ifndef final_version
5033 #endif
5034 #ifdef VLAN_SUPPORT
5035 #endif
5036 #ifdef VLAN_SUPPORT
5037 #endif
5038 #ifdef VLAN_SUPPORT
5039 #endif
5040 #ifdef VLAN_SUPPORT
5041 #endif
5042 #ifdef CONFIG_PM
5043 #endif
5044 #ifdef CONFIG_PM
5045 #endif
5046 #ifdef MODULE
5047 #endif
5048 /* LDV_COMMENT_END_PREP */
5049
5050
5051
5052
5053 }
5054
5055 break;
5056 case 3: {
5057
5058 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5059
5060
5061 /* content: static void tx_timeout(struct net_device *dev)*/
5062 /* LDV_COMMENT_BEGIN_PREP */
5063 #define DRV_NAME "starfire"
5064 #define DRV_VERSION "2.1"
5065 #define DRV_RELDATE "July 6, 2008"
5066 #define HAS_BROKEN_FIRMWARE
5067 #ifdef HAS_BROKEN_FIRMWARE
5068 #define PADDING_MASK 3
5069 #endif
5070 #define ZEROCOPY
5071 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5072 #define VLAN_SUPPORT
5073 #endif
5074 #define PKT_BUF_SZ 1536
5075 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5076 #else
5077 #endif
5078 #ifdef __sparc__
5079 #define DMA_BURST_SIZE 64
5080 #else
5081 #define DMA_BURST_SIZE 128
5082 #endif
5083 #define RX_RING_SIZE 256
5084 #define TX_RING_SIZE 32
5085 #define DONE_Q_SIZE 1024
5086 #define QUEUE_ALIGN 256
5087 #if RX_RING_SIZE > 256
5088 #define RX_Q_ENTRIES Rx2048QEntries
5089 #else
5090 #define RX_Q_ENTRIES Rx256QEntries
5091 #endif
5092 #define TX_TIMEOUT (2 * HZ)
5093 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5094 #define ADDR_64BITS
5095 #define netdrv_addr_t __le64
5096 #define cpu_to_dma(x) cpu_to_le64(x)
5097 #define dma_to_cpu(x) le64_to_cpu(x)
5098 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5099 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5100 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5101 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5102 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5103 #else
5104 #define netdrv_addr_t __le32
5105 #define cpu_to_dma(x) cpu_to_le32(x)
5106 #define dma_to_cpu(x) le32_to_cpu(x)
5107 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5108 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5109 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5110 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5111 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5112 #endif
5113 #define skb_first_frag_len(skb) skb_headlen(skb)
5114 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5115 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5116 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5117 #ifdef VLAN_SUPPORT
5118 #define RxComplType RxComplType3
5119 #else
5120 #define RxComplType RxComplType2
5121 #endif
5122 #ifdef ADDR_64BITS
5123 #define TX_DESC_TYPE TxDescType2
5124 #else
5125 #define TX_DESC_TYPE TxDescType1
5126 #endif
5127 #define TX_DESC_SPACING TxDescSpaceUnlim
5128 #if 0
5129 #endif
5130 #define PHY_CNT 2
5131 #ifdef VLAN_SUPPORT
5132 #endif
5133 #ifdef VLAN_SUPPORT
5134 #endif
5135 #ifdef VLAN_SUPPORT
5136 #endif
5137 #ifndef MODULE
5138 #endif
5139 #ifdef ZEROCOPY
5140 #endif
5141 #ifdef VLAN_SUPPORT
5142 #endif
5143 #ifdef ADDR_64BITS
5144 #endif
5145 #if ! defined(final_version)
5146 #endif
5147 #ifdef VLAN_SUPPORT
5148 #endif
5149 /* LDV_COMMENT_END_PREP */
5150 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "netdev_ops" */
5151 ldv_handler_precall();
5152 tx_timeout( var_group1);
5153 /* LDV_COMMENT_BEGIN_PREP */
5154 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5155 #endif
5156 #ifndef final_version
5157 #endif
5158 #ifdef VLAN_SUPPORT
5159 #endif
5160 #ifdef VLAN_SUPPORT
5161 #endif
5162 #ifdef VLAN_SUPPORT
5163 #endif
5164 #ifdef VLAN_SUPPORT
5165 #endif
5166 #ifdef CONFIG_PM
5167 #endif
5168 #ifdef CONFIG_PM
5169 #endif
5170 #ifdef MODULE
5171 #endif
5172 /* LDV_COMMENT_END_PREP */
5173
5174
5175
5176
5177 }
5178
5179 break;
5180 case 4: {
5181
5182 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5183
5184
5185 /* content: static struct net_device_stats *get_stats(struct net_device *dev)*/
5186 /* LDV_COMMENT_BEGIN_PREP */
5187 #define DRV_NAME "starfire"
5188 #define DRV_VERSION "2.1"
5189 #define DRV_RELDATE "July 6, 2008"
5190 #define HAS_BROKEN_FIRMWARE
5191 #ifdef HAS_BROKEN_FIRMWARE
5192 #define PADDING_MASK 3
5193 #endif
5194 #define ZEROCOPY
5195 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5196 #define VLAN_SUPPORT
5197 #endif
5198 #define PKT_BUF_SZ 1536
5199 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5200 #else
5201 #endif
5202 #ifdef __sparc__
5203 #define DMA_BURST_SIZE 64
5204 #else
5205 #define DMA_BURST_SIZE 128
5206 #endif
5207 #define RX_RING_SIZE 256
5208 #define TX_RING_SIZE 32
5209 #define DONE_Q_SIZE 1024
5210 #define QUEUE_ALIGN 256
5211 #if RX_RING_SIZE > 256
5212 #define RX_Q_ENTRIES Rx2048QEntries
5213 #else
5214 #define RX_Q_ENTRIES Rx256QEntries
5215 #endif
5216 #define TX_TIMEOUT (2 * HZ)
5217 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5218 #define ADDR_64BITS
5219 #define netdrv_addr_t __le64
5220 #define cpu_to_dma(x) cpu_to_le64(x)
5221 #define dma_to_cpu(x) le64_to_cpu(x)
5222 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5223 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5224 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5225 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5226 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5227 #else
5228 #define netdrv_addr_t __le32
5229 #define cpu_to_dma(x) cpu_to_le32(x)
5230 #define dma_to_cpu(x) le32_to_cpu(x)
5231 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5232 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5233 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5234 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5235 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5236 #endif
5237 #define skb_first_frag_len(skb) skb_headlen(skb)
5238 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5239 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5240 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5241 #ifdef VLAN_SUPPORT
5242 #define RxComplType RxComplType3
5243 #else
5244 #define RxComplType RxComplType2
5245 #endif
5246 #ifdef ADDR_64BITS
5247 #define TX_DESC_TYPE TxDescType2
5248 #else
5249 #define TX_DESC_TYPE TxDescType1
5250 #endif
5251 #define TX_DESC_SPACING TxDescSpaceUnlim
5252 #if 0
5253 #endif
5254 #define PHY_CNT 2
5255 #ifdef VLAN_SUPPORT
5256 #endif
5257 #ifdef VLAN_SUPPORT
5258 #endif
5259 #ifdef VLAN_SUPPORT
5260 #endif
5261 #ifndef MODULE
5262 #endif
5263 #ifdef ZEROCOPY
5264 #endif
5265 #ifdef VLAN_SUPPORT
5266 #endif
5267 #ifdef ADDR_64BITS
5268 #endif
5269 #if ! defined(final_version)
5270 #endif
5271 #ifdef VLAN_SUPPORT
5272 #endif
5273 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5274 #endif
5275 #ifndef final_version
5276 #endif
5277 #ifdef VLAN_SUPPORT
5278 #endif
5279 #ifdef VLAN_SUPPORT
5280 #endif
5281 /* LDV_COMMENT_END_PREP */
5282 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "netdev_ops" */
5283 ldv_handler_precall();
5284 get_stats( var_group1);
5285 /* LDV_COMMENT_BEGIN_PREP */
5286 #ifdef VLAN_SUPPORT
5287 #endif
5288 #ifdef VLAN_SUPPORT
5289 #endif
5290 #ifdef CONFIG_PM
5291 #endif
5292 #ifdef CONFIG_PM
5293 #endif
5294 #ifdef MODULE
5295 #endif
5296 /* LDV_COMMENT_END_PREP */
5297
5298
5299
5300
5301 }
5302
5303 break;
5304 case 5: {
5305
5306 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5307
5308
5309 /* content: static void set_rx_mode(struct net_device *dev)*/
5310 /* LDV_COMMENT_BEGIN_PREP */
5311 #define DRV_NAME "starfire"
5312 #define DRV_VERSION "2.1"
5313 #define DRV_RELDATE "July 6, 2008"
5314 #define HAS_BROKEN_FIRMWARE
5315 #ifdef HAS_BROKEN_FIRMWARE
5316 #define PADDING_MASK 3
5317 #endif
5318 #define ZEROCOPY
5319 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5320 #define VLAN_SUPPORT
5321 #endif
5322 #define PKT_BUF_SZ 1536
5323 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5324 #else
5325 #endif
5326 #ifdef __sparc__
5327 #define DMA_BURST_SIZE 64
5328 #else
5329 #define DMA_BURST_SIZE 128
5330 #endif
5331 #define RX_RING_SIZE 256
5332 #define TX_RING_SIZE 32
5333 #define DONE_Q_SIZE 1024
5334 #define QUEUE_ALIGN 256
5335 #if RX_RING_SIZE > 256
5336 #define RX_Q_ENTRIES Rx2048QEntries
5337 #else
5338 #define RX_Q_ENTRIES Rx256QEntries
5339 #endif
5340 #define TX_TIMEOUT (2 * HZ)
5341 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5342 #define ADDR_64BITS
5343 #define netdrv_addr_t __le64
5344 #define cpu_to_dma(x) cpu_to_le64(x)
5345 #define dma_to_cpu(x) le64_to_cpu(x)
5346 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5347 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5348 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5349 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5350 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5351 #else
5352 #define netdrv_addr_t __le32
5353 #define cpu_to_dma(x) cpu_to_le32(x)
5354 #define dma_to_cpu(x) le32_to_cpu(x)
5355 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5356 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5357 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5358 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5359 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5360 #endif
5361 #define skb_first_frag_len(skb) skb_headlen(skb)
5362 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5363 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5364 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5365 #ifdef VLAN_SUPPORT
5366 #define RxComplType RxComplType3
5367 #else
5368 #define RxComplType RxComplType2
5369 #endif
5370 #ifdef ADDR_64BITS
5371 #define TX_DESC_TYPE TxDescType2
5372 #else
5373 #define TX_DESC_TYPE TxDescType1
5374 #endif
5375 #define TX_DESC_SPACING TxDescSpaceUnlim
5376 #if 0
5377 #endif
5378 #define PHY_CNT 2
5379 #ifdef VLAN_SUPPORT
5380 #endif
5381 #ifdef VLAN_SUPPORT
5382 #endif
5383 #ifdef VLAN_SUPPORT
5384 #endif
5385 #ifndef MODULE
5386 #endif
5387 #ifdef ZEROCOPY
5388 #endif
5389 #ifdef VLAN_SUPPORT
5390 #endif
5391 #ifdef ADDR_64BITS
5392 #endif
5393 #if ! defined(final_version)
5394 #endif
5395 #ifdef VLAN_SUPPORT
5396 #endif
5397 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5398 #endif
5399 #ifndef final_version
5400 #endif
5401 #ifdef VLAN_SUPPORT
5402 #endif
5403 #ifdef VLAN_SUPPORT
5404 #endif
5405 #ifdef VLAN_SUPPORT
5406 #endif
5407 /* LDV_COMMENT_END_PREP */
5408 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "netdev_ops" */
5409 ldv_handler_precall();
5410 set_rx_mode( var_group1);
5411 /* LDV_COMMENT_BEGIN_PREP */
5412 #ifdef CONFIG_PM
5413 #endif
5414 #ifdef CONFIG_PM
5415 #endif
5416 #ifdef MODULE
5417 #endif
5418 /* LDV_COMMENT_END_PREP */
5419
5420
5421
5422
5423 }
5424
5425 break;
5426 case 6: {
5427
5428 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5429
5430
5431 /* content: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
5432 /* LDV_COMMENT_BEGIN_PREP */
5433 #define DRV_NAME "starfire"
5434 #define DRV_VERSION "2.1"
5435 #define DRV_RELDATE "July 6, 2008"
5436 #define HAS_BROKEN_FIRMWARE
5437 #ifdef HAS_BROKEN_FIRMWARE
5438 #define PADDING_MASK 3
5439 #endif
5440 #define ZEROCOPY
5441 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5442 #define VLAN_SUPPORT
5443 #endif
5444 #define PKT_BUF_SZ 1536
5445 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5446 #else
5447 #endif
5448 #ifdef __sparc__
5449 #define DMA_BURST_SIZE 64
5450 #else
5451 #define DMA_BURST_SIZE 128
5452 #endif
5453 #define RX_RING_SIZE 256
5454 #define TX_RING_SIZE 32
5455 #define DONE_Q_SIZE 1024
5456 #define QUEUE_ALIGN 256
5457 #if RX_RING_SIZE > 256
5458 #define RX_Q_ENTRIES Rx2048QEntries
5459 #else
5460 #define RX_Q_ENTRIES Rx256QEntries
5461 #endif
5462 #define TX_TIMEOUT (2 * HZ)
5463 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5464 #define ADDR_64BITS
5465 #define netdrv_addr_t __le64
5466 #define cpu_to_dma(x) cpu_to_le64(x)
5467 #define dma_to_cpu(x) le64_to_cpu(x)
5468 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5469 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5470 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5471 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5472 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5473 #else
5474 #define netdrv_addr_t __le32
5475 #define cpu_to_dma(x) cpu_to_le32(x)
5476 #define dma_to_cpu(x) le32_to_cpu(x)
5477 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5478 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5479 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5480 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5481 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5482 #endif
5483 #define skb_first_frag_len(skb) skb_headlen(skb)
5484 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5485 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5486 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5487 #ifdef VLAN_SUPPORT
5488 #define RxComplType RxComplType3
5489 #else
5490 #define RxComplType RxComplType2
5491 #endif
5492 #ifdef ADDR_64BITS
5493 #define TX_DESC_TYPE TxDescType2
5494 #else
5495 #define TX_DESC_TYPE TxDescType1
5496 #endif
5497 #define TX_DESC_SPACING TxDescSpaceUnlim
5498 #if 0
5499 #endif
5500 #define PHY_CNT 2
5501 #ifdef VLAN_SUPPORT
5502 #endif
5503 #ifdef VLAN_SUPPORT
5504 #endif
5505 #ifdef VLAN_SUPPORT
5506 #endif
5507 #ifndef MODULE
5508 #endif
5509 #ifdef ZEROCOPY
5510 #endif
5511 #ifdef VLAN_SUPPORT
5512 #endif
5513 #ifdef ADDR_64BITS
5514 #endif
5515 #if ! defined(final_version)
5516 #endif
5517 #ifdef VLAN_SUPPORT
5518 #endif
5519 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5520 #endif
5521 #ifndef final_version
5522 #endif
5523 #ifdef VLAN_SUPPORT
5524 #endif
5525 #ifdef VLAN_SUPPORT
5526 #endif
5527 #ifdef VLAN_SUPPORT
5528 #endif
5529 #ifdef VLAN_SUPPORT
5530 #endif
5531 /* LDV_COMMENT_END_PREP */
5532 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "netdev_ops" */
5533 ldv_handler_precall();
5534 netdev_ioctl( var_group1, var_group3, var_netdev_ioctl_27_p2);
5535 /* LDV_COMMENT_BEGIN_PREP */
5536 #ifdef CONFIG_PM
5537 #endif
5538 #ifdef CONFIG_PM
5539 #endif
5540 #ifdef MODULE
5541 #endif
5542 /* LDV_COMMENT_END_PREP */
5543
5544
5545
5546
5547 }
5548
5549 break;
5550 case 7: {
5551
5552 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5553
5554
5555 /* content: static int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)*/
5556 /* LDV_COMMENT_BEGIN_PREP */
5557 #define DRV_NAME "starfire"
5558 #define DRV_VERSION "2.1"
5559 #define DRV_RELDATE "July 6, 2008"
5560 #define HAS_BROKEN_FIRMWARE
5561 #ifdef HAS_BROKEN_FIRMWARE
5562 #define PADDING_MASK 3
5563 #endif
5564 #define ZEROCOPY
5565 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5566 #define VLAN_SUPPORT
5567 #endif
5568 #define PKT_BUF_SZ 1536
5569 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5570 #else
5571 #endif
5572 #ifdef __sparc__
5573 #define DMA_BURST_SIZE 64
5574 #else
5575 #define DMA_BURST_SIZE 128
5576 #endif
5577 #define RX_RING_SIZE 256
5578 #define TX_RING_SIZE 32
5579 #define DONE_Q_SIZE 1024
5580 #define QUEUE_ALIGN 256
5581 #if RX_RING_SIZE > 256
5582 #define RX_Q_ENTRIES Rx2048QEntries
5583 #else
5584 #define RX_Q_ENTRIES Rx256QEntries
5585 #endif
5586 #define TX_TIMEOUT (2 * HZ)
5587 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5588 #define ADDR_64BITS
5589 #define netdrv_addr_t __le64
5590 #define cpu_to_dma(x) cpu_to_le64(x)
5591 #define dma_to_cpu(x) le64_to_cpu(x)
5592 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5593 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5594 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5595 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5596 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5597 #else
5598 #define netdrv_addr_t __le32
5599 #define cpu_to_dma(x) cpu_to_le32(x)
5600 #define dma_to_cpu(x) le32_to_cpu(x)
5601 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5602 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5603 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5604 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5605 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5606 #endif
5607 #define skb_first_frag_len(skb) skb_headlen(skb)
5608 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5609 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5610 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5611 #ifdef VLAN_SUPPORT
5612 #define RxComplType RxComplType3
5613 #else
5614 #define RxComplType RxComplType2
5615 #endif
5616 #ifdef ADDR_64BITS
5617 #define TX_DESC_TYPE TxDescType2
5618 #else
5619 #define TX_DESC_TYPE TxDescType1
5620 #endif
5621 #define TX_DESC_SPACING TxDescSpaceUnlim
5622 #if 0
5623 #endif
5624 #define PHY_CNT 2
5625 #ifdef VLAN_SUPPORT
5626 #endif
5627 #ifdef VLAN_SUPPORT
5628 /* LDV_COMMENT_END_PREP */
5629 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_add_vid" from driver structure with callbacks "netdev_ops" */
5630 ldv_handler_precall();
5631 netdev_vlan_rx_add_vid( var_group1, var_netdev_vlan_rx_add_vid_0_p1, var_netdev_vlan_rx_add_vid_0_p2);
5632 /* LDV_COMMENT_BEGIN_PREP */
5633 #endif
5634 #ifdef VLAN_SUPPORT
5635 #endif
5636 #ifndef MODULE
5637 #endif
5638 #ifdef ZEROCOPY
5639 #endif
5640 #ifdef VLAN_SUPPORT
5641 #endif
5642 #ifdef ADDR_64BITS
5643 #endif
5644 #if ! defined(final_version)
5645 #endif
5646 #ifdef VLAN_SUPPORT
5647 #endif
5648 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5649 #endif
5650 #ifndef final_version
5651 #endif
5652 #ifdef VLAN_SUPPORT
5653 #endif
5654 #ifdef VLAN_SUPPORT
5655 #endif
5656 #ifdef VLAN_SUPPORT
5657 #endif
5658 #ifdef VLAN_SUPPORT
5659 #endif
5660 #ifdef CONFIG_PM
5661 #endif
5662 #ifdef CONFIG_PM
5663 #endif
5664 #ifdef MODULE
5665 #endif
5666 /* LDV_COMMENT_END_PREP */
5667
5668
5669
5670
5671 }
5672
5673 break;
5674 case 8: {
5675
5676 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5677
5678
5679 /* content: static int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)*/
5680 /* LDV_COMMENT_BEGIN_PREP */
5681 #define DRV_NAME "starfire"
5682 #define DRV_VERSION "2.1"
5683 #define DRV_RELDATE "July 6, 2008"
5684 #define HAS_BROKEN_FIRMWARE
5685 #ifdef HAS_BROKEN_FIRMWARE
5686 #define PADDING_MASK 3
5687 #endif
5688 #define ZEROCOPY
5689 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5690 #define VLAN_SUPPORT
5691 #endif
5692 #define PKT_BUF_SZ 1536
5693 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5694 #else
5695 #endif
5696 #ifdef __sparc__
5697 #define DMA_BURST_SIZE 64
5698 #else
5699 #define DMA_BURST_SIZE 128
5700 #endif
5701 #define RX_RING_SIZE 256
5702 #define TX_RING_SIZE 32
5703 #define DONE_Q_SIZE 1024
5704 #define QUEUE_ALIGN 256
5705 #if RX_RING_SIZE > 256
5706 #define RX_Q_ENTRIES Rx2048QEntries
5707 #else
5708 #define RX_Q_ENTRIES Rx256QEntries
5709 #endif
5710 #define TX_TIMEOUT (2 * HZ)
5711 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5712 #define ADDR_64BITS
5713 #define netdrv_addr_t __le64
5714 #define cpu_to_dma(x) cpu_to_le64(x)
5715 #define dma_to_cpu(x) le64_to_cpu(x)
5716 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5717 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5718 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5719 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5720 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5721 #else
5722 #define netdrv_addr_t __le32
5723 #define cpu_to_dma(x) cpu_to_le32(x)
5724 #define dma_to_cpu(x) le32_to_cpu(x)
5725 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5726 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5727 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5728 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5729 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5730 #endif
5731 #define skb_first_frag_len(skb) skb_headlen(skb)
5732 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5733 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5734 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5735 #ifdef VLAN_SUPPORT
5736 #define RxComplType RxComplType3
5737 #else
5738 #define RxComplType RxComplType2
5739 #endif
5740 #ifdef ADDR_64BITS
5741 #define TX_DESC_TYPE TxDescType2
5742 #else
5743 #define TX_DESC_TYPE TxDescType1
5744 #endif
5745 #define TX_DESC_SPACING TxDescSpaceUnlim
5746 #if 0
5747 #endif
5748 #define PHY_CNT 2
5749 #ifdef VLAN_SUPPORT
5750 #endif
5751 #ifdef VLAN_SUPPORT
5752 /* LDV_COMMENT_END_PREP */
5753 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_kill_vid" from driver structure with callbacks "netdev_ops" */
5754 ldv_handler_precall();
5755 netdev_vlan_rx_kill_vid( var_group1, var_netdev_vlan_rx_kill_vid_1_p1, var_netdev_vlan_rx_kill_vid_1_p2);
5756 /* LDV_COMMENT_BEGIN_PREP */
5757 #endif
5758 #ifdef VLAN_SUPPORT
5759 #endif
5760 #ifndef MODULE
5761 #endif
5762 #ifdef ZEROCOPY
5763 #endif
5764 #ifdef VLAN_SUPPORT
5765 #endif
5766 #ifdef ADDR_64BITS
5767 #endif
5768 #if ! defined(final_version)
5769 #endif
5770 #ifdef VLAN_SUPPORT
5771 #endif
5772 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5773 #endif
5774 #ifndef final_version
5775 #endif
5776 #ifdef VLAN_SUPPORT
5777 #endif
5778 #ifdef VLAN_SUPPORT
5779 #endif
5780 #ifdef VLAN_SUPPORT
5781 #endif
5782 #ifdef VLAN_SUPPORT
5783 #endif
5784 #ifdef CONFIG_PM
5785 #endif
5786 #ifdef CONFIG_PM
5787 #endif
5788 #ifdef MODULE
5789 #endif
5790 /* LDV_COMMENT_END_PREP */
5791
5792
5793
5794
5795 }
5796
5797 break;
5798 case 9: {
5799
5800 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
5801
5802
5803 /* content: static int check_if_running(struct net_device *dev)*/
5804 /* LDV_COMMENT_BEGIN_PREP */
5805 #define DRV_NAME "starfire"
5806 #define DRV_VERSION "2.1"
5807 #define DRV_RELDATE "July 6, 2008"
5808 #define HAS_BROKEN_FIRMWARE
5809 #ifdef HAS_BROKEN_FIRMWARE
5810 #define PADDING_MASK 3
5811 #endif
5812 #define ZEROCOPY
5813 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5814 #define VLAN_SUPPORT
5815 #endif
5816 #define PKT_BUF_SZ 1536
5817 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5818 #else
5819 #endif
5820 #ifdef __sparc__
5821 #define DMA_BURST_SIZE 64
5822 #else
5823 #define DMA_BURST_SIZE 128
5824 #endif
5825 #define RX_RING_SIZE 256
5826 #define TX_RING_SIZE 32
5827 #define DONE_Q_SIZE 1024
5828 #define QUEUE_ALIGN 256
5829 #if RX_RING_SIZE > 256
5830 #define RX_Q_ENTRIES Rx2048QEntries
5831 #else
5832 #define RX_Q_ENTRIES Rx256QEntries
5833 #endif
5834 #define TX_TIMEOUT (2 * HZ)
5835 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5836 #define ADDR_64BITS
5837 #define netdrv_addr_t __le64
5838 #define cpu_to_dma(x) cpu_to_le64(x)
5839 #define dma_to_cpu(x) le64_to_cpu(x)
5840 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5841 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5842 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5843 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5844 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5845 #else
5846 #define netdrv_addr_t __le32
5847 #define cpu_to_dma(x) cpu_to_le32(x)
5848 #define dma_to_cpu(x) le32_to_cpu(x)
5849 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5850 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5851 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5852 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5853 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5854 #endif
5855 #define skb_first_frag_len(skb) skb_headlen(skb)
5856 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5857 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5858 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5859 #ifdef VLAN_SUPPORT
5860 #define RxComplType RxComplType3
5861 #else
5862 #define RxComplType RxComplType2
5863 #endif
5864 #ifdef ADDR_64BITS
5865 #define TX_DESC_TYPE TxDescType2
5866 #else
5867 #define TX_DESC_TYPE TxDescType1
5868 #endif
5869 #define TX_DESC_SPACING TxDescSpaceUnlim
5870 #if 0
5871 #endif
5872 #define PHY_CNT 2
5873 #ifdef VLAN_SUPPORT
5874 #endif
5875 #ifdef VLAN_SUPPORT
5876 #endif
5877 #ifdef VLAN_SUPPORT
5878 #endif
5879 #ifndef MODULE
5880 #endif
5881 #ifdef ZEROCOPY
5882 #endif
5883 #ifdef VLAN_SUPPORT
5884 #endif
5885 #ifdef ADDR_64BITS
5886 #endif
5887 #if ! defined(final_version)
5888 #endif
5889 #ifdef VLAN_SUPPORT
5890 #endif
5891 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5892 #endif
5893 #ifndef final_version
5894 #endif
5895 #ifdef VLAN_SUPPORT
5896 #endif
5897 #ifdef VLAN_SUPPORT
5898 #endif
5899 #ifdef VLAN_SUPPORT
5900 #endif
5901 #ifdef VLAN_SUPPORT
5902 #endif
5903 /* LDV_COMMENT_END_PREP */
5904 /* LDV_COMMENT_FUNCTION_CALL Function from field "begin" from driver structure with callbacks "ethtool_ops" */
5905 ldv_handler_precall();
5906 check_if_running( var_group1);
5907 /* LDV_COMMENT_BEGIN_PREP */
5908 #ifdef CONFIG_PM
5909 #endif
5910 #ifdef CONFIG_PM
5911 #endif
5912 #ifdef MODULE
5913 #endif
5914 /* LDV_COMMENT_END_PREP */
5915
5916
5917
5918
5919 }
5920
5921 break;
5922 case 10: {
5923
5924 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
5925
5926
5927 /* content: static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
5928 /* LDV_COMMENT_BEGIN_PREP */
5929 #define DRV_NAME "starfire"
5930 #define DRV_VERSION "2.1"
5931 #define DRV_RELDATE "July 6, 2008"
5932 #define HAS_BROKEN_FIRMWARE
5933 #ifdef HAS_BROKEN_FIRMWARE
5934 #define PADDING_MASK 3
5935 #endif
5936 #define ZEROCOPY
5937 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5938 #define VLAN_SUPPORT
5939 #endif
5940 #define PKT_BUF_SZ 1536
5941 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5942 #else
5943 #endif
5944 #ifdef __sparc__
5945 #define DMA_BURST_SIZE 64
5946 #else
5947 #define DMA_BURST_SIZE 128
5948 #endif
5949 #define RX_RING_SIZE 256
5950 #define TX_RING_SIZE 32
5951 #define DONE_Q_SIZE 1024
5952 #define QUEUE_ALIGN 256
5953 #if RX_RING_SIZE > 256
5954 #define RX_Q_ENTRIES Rx2048QEntries
5955 #else
5956 #define RX_Q_ENTRIES Rx256QEntries
5957 #endif
5958 #define TX_TIMEOUT (2 * HZ)
5959 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5960 #define ADDR_64BITS
5961 #define netdrv_addr_t __le64
5962 #define cpu_to_dma(x) cpu_to_le64(x)
5963 #define dma_to_cpu(x) le64_to_cpu(x)
5964 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5965 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5966 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5967 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5968 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5969 #else
5970 #define netdrv_addr_t __le32
5971 #define cpu_to_dma(x) cpu_to_le32(x)
5972 #define dma_to_cpu(x) le32_to_cpu(x)
5973 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5974 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5975 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5976 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5977 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5978 #endif
5979 #define skb_first_frag_len(skb) skb_headlen(skb)
5980 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5981 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5982 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5983 #ifdef VLAN_SUPPORT
5984 #define RxComplType RxComplType3
5985 #else
5986 #define RxComplType RxComplType2
5987 #endif
5988 #ifdef ADDR_64BITS
5989 #define TX_DESC_TYPE TxDescType2
5990 #else
5991 #define TX_DESC_TYPE TxDescType1
5992 #endif
5993 #define TX_DESC_SPACING TxDescSpaceUnlim
5994 #if 0
5995 #endif
5996 #define PHY_CNT 2
5997 #ifdef VLAN_SUPPORT
5998 #endif
5999 #ifdef VLAN_SUPPORT
6000 #endif
6001 #ifdef VLAN_SUPPORT
6002 #endif
6003 #ifndef MODULE
6004 #endif
6005 #ifdef ZEROCOPY
6006 #endif
6007 #ifdef VLAN_SUPPORT
6008 #endif
6009 #ifdef ADDR_64BITS
6010 #endif
6011 #if ! defined(final_version)
6012 #endif
6013 #ifdef VLAN_SUPPORT
6014 #endif
6015 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6016 #endif
6017 #ifndef final_version
6018 #endif
6019 #ifdef VLAN_SUPPORT
6020 #endif
6021 #ifdef VLAN_SUPPORT
6022 #endif
6023 #ifdef VLAN_SUPPORT
6024 #endif
6025 #ifdef VLAN_SUPPORT
6026 #endif
6027 /* LDV_COMMENT_END_PREP */
6028 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_drvinfo" from driver structure with callbacks "ethtool_ops" */
6029 ldv_handler_precall();
6030 get_drvinfo( var_group1, var_group4);
6031 /* LDV_COMMENT_BEGIN_PREP */
6032 #ifdef CONFIG_PM
6033 #endif
6034 #ifdef CONFIG_PM
6035 #endif
6036 #ifdef MODULE
6037 #endif
6038 /* LDV_COMMENT_END_PREP */
6039
6040
6041
6042
6043 }
6044
6045 break;
6046 case 11: {
6047
6048 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6049
6050
6051 /* content: static int nway_reset(struct net_device *dev)*/
6052 /* LDV_COMMENT_BEGIN_PREP */
6053 #define DRV_NAME "starfire"
6054 #define DRV_VERSION "2.1"
6055 #define DRV_RELDATE "July 6, 2008"
6056 #define HAS_BROKEN_FIRMWARE
6057 #ifdef HAS_BROKEN_FIRMWARE
6058 #define PADDING_MASK 3
6059 #endif
6060 #define ZEROCOPY
6061 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6062 #define VLAN_SUPPORT
6063 #endif
6064 #define PKT_BUF_SZ 1536
6065 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6066 #else
6067 #endif
6068 #ifdef __sparc__
6069 #define DMA_BURST_SIZE 64
6070 #else
6071 #define DMA_BURST_SIZE 128
6072 #endif
6073 #define RX_RING_SIZE 256
6074 #define TX_RING_SIZE 32
6075 #define DONE_Q_SIZE 1024
6076 #define QUEUE_ALIGN 256
6077 #if RX_RING_SIZE > 256
6078 #define RX_Q_ENTRIES Rx2048QEntries
6079 #else
6080 #define RX_Q_ENTRIES Rx256QEntries
6081 #endif
6082 #define TX_TIMEOUT (2 * HZ)
6083 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6084 #define ADDR_64BITS
6085 #define netdrv_addr_t __le64
6086 #define cpu_to_dma(x) cpu_to_le64(x)
6087 #define dma_to_cpu(x) le64_to_cpu(x)
6088 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6089 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6090 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6091 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6092 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6093 #else
6094 #define netdrv_addr_t __le32
6095 #define cpu_to_dma(x) cpu_to_le32(x)
6096 #define dma_to_cpu(x) le32_to_cpu(x)
6097 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6098 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6099 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6100 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6101 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6102 #endif
6103 #define skb_first_frag_len(skb) skb_headlen(skb)
6104 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6105 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6106 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6107 #ifdef VLAN_SUPPORT
6108 #define RxComplType RxComplType3
6109 #else
6110 #define RxComplType RxComplType2
6111 #endif
6112 #ifdef ADDR_64BITS
6113 #define TX_DESC_TYPE TxDescType2
6114 #else
6115 #define TX_DESC_TYPE TxDescType1
6116 #endif
6117 #define TX_DESC_SPACING TxDescSpaceUnlim
6118 #if 0
6119 #endif
6120 #define PHY_CNT 2
6121 #ifdef VLAN_SUPPORT
6122 #endif
6123 #ifdef VLAN_SUPPORT
6124 #endif
6125 #ifdef VLAN_SUPPORT
6126 #endif
6127 #ifndef MODULE
6128 #endif
6129 #ifdef ZEROCOPY
6130 #endif
6131 #ifdef VLAN_SUPPORT
6132 #endif
6133 #ifdef ADDR_64BITS
6134 #endif
6135 #if ! defined(final_version)
6136 #endif
6137 #ifdef VLAN_SUPPORT
6138 #endif
6139 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6140 #endif
6141 #ifndef final_version
6142 #endif
6143 #ifdef VLAN_SUPPORT
6144 #endif
6145 #ifdef VLAN_SUPPORT
6146 #endif
6147 #ifdef VLAN_SUPPORT
6148 #endif
6149 #ifdef VLAN_SUPPORT
6150 #endif
6151 /* LDV_COMMENT_END_PREP */
6152 /* LDV_COMMENT_FUNCTION_CALL Function from field "nway_reset" from driver structure with callbacks "ethtool_ops" */
6153 ldv_handler_precall();
6154 nway_reset( var_group1);
6155 /* LDV_COMMENT_BEGIN_PREP */
6156 #ifdef CONFIG_PM
6157 #endif
6158 #ifdef CONFIG_PM
6159 #endif
6160 #ifdef MODULE
6161 #endif
6162 /* LDV_COMMENT_END_PREP */
6163
6164
6165
6166
6167 }
6168
6169 break;
6170 case 12: {
6171
6172 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6173
6174
6175 /* content: static u32 get_link(struct net_device *dev)*/
6176 /* LDV_COMMENT_BEGIN_PREP */
6177 #define DRV_NAME "starfire"
6178 #define DRV_VERSION "2.1"
6179 #define DRV_RELDATE "July 6, 2008"
6180 #define HAS_BROKEN_FIRMWARE
6181 #ifdef HAS_BROKEN_FIRMWARE
6182 #define PADDING_MASK 3
6183 #endif
6184 #define ZEROCOPY
6185 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6186 #define VLAN_SUPPORT
6187 #endif
6188 #define PKT_BUF_SZ 1536
6189 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6190 #else
6191 #endif
6192 #ifdef __sparc__
6193 #define DMA_BURST_SIZE 64
6194 #else
6195 #define DMA_BURST_SIZE 128
6196 #endif
6197 #define RX_RING_SIZE 256
6198 #define TX_RING_SIZE 32
6199 #define DONE_Q_SIZE 1024
6200 #define QUEUE_ALIGN 256
6201 #if RX_RING_SIZE > 256
6202 #define RX_Q_ENTRIES Rx2048QEntries
6203 #else
6204 #define RX_Q_ENTRIES Rx256QEntries
6205 #endif
6206 #define TX_TIMEOUT (2 * HZ)
6207 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6208 #define ADDR_64BITS
6209 #define netdrv_addr_t __le64
6210 #define cpu_to_dma(x) cpu_to_le64(x)
6211 #define dma_to_cpu(x) le64_to_cpu(x)
6212 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6213 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6214 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6215 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6216 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6217 #else
6218 #define netdrv_addr_t __le32
6219 #define cpu_to_dma(x) cpu_to_le32(x)
6220 #define dma_to_cpu(x) le32_to_cpu(x)
6221 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6222 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6223 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6224 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6225 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6226 #endif
6227 #define skb_first_frag_len(skb) skb_headlen(skb)
6228 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6229 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6230 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6231 #ifdef VLAN_SUPPORT
6232 #define RxComplType RxComplType3
6233 #else
6234 #define RxComplType RxComplType2
6235 #endif
6236 #ifdef ADDR_64BITS
6237 #define TX_DESC_TYPE TxDescType2
6238 #else
6239 #define TX_DESC_TYPE TxDescType1
6240 #endif
6241 #define TX_DESC_SPACING TxDescSpaceUnlim
6242 #if 0
6243 #endif
6244 #define PHY_CNT 2
6245 #ifdef VLAN_SUPPORT
6246 #endif
6247 #ifdef VLAN_SUPPORT
6248 #endif
6249 #ifdef VLAN_SUPPORT
6250 #endif
6251 #ifndef MODULE
6252 #endif
6253 #ifdef ZEROCOPY
6254 #endif
6255 #ifdef VLAN_SUPPORT
6256 #endif
6257 #ifdef ADDR_64BITS
6258 #endif
6259 #if ! defined(final_version)
6260 #endif
6261 #ifdef VLAN_SUPPORT
6262 #endif
6263 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6264 #endif
6265 #ifndef final_version
6266 #endif
6267 #ifdef VLAN_SUPPORT
6268 #endif
6269 #ifdef VLAN_SUPPORT
6270 #endif
6271 #ifdef VLAN_SUPPORT
6272 #endif
6273 #ifdef VLAN_SUPPORT
6274 #endif
6275 /* LDV_COMMENT_END_PREP */
6276 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_link" from driver structure with callbacks "ethtool_ops" */
6277 ldv_handler_precall();
6278 get_link( var_group1);
6279 /* LDV_COMMENT_BEGIN_PREP */
6280 #ifdef CONFIG_PM
6281 #endif
6282 #ifdef CONFIG_PM
6283 #endif
6284 #ifdef MODULE
6285 #endif
6286 /* LDV_COMMENT_END_PREP */
6287
6288
6289
6290
6291 }
6292
6293 break;
6294 case 13: {
6295
6296 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6297
6298
6299 /* content: static u32 get_msglevel(struct net_device *dev)*/
6300 /* LDV_COMMENT_BEGIN_PREP */
6301 #define DRV_NAME "starfire"
6302 #define DRV_VERSION "2.1"
6303 #define DRV_RELDATE "July 6, 2008"
6304 #define HAS_BROKEN_FIRMWARE
6305 #ifdef HAS_BROKEN_FIRMWARE
6306 #define PADDING_MASK 3
6307 #endif
6308 #define ZEROCOPY
6309 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6310 #define VLAN_SUPPORT
6311 #endif
6312 #define PKT_BUF_SZ 1536
6313 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6314 #else
6315 #endif
6316 #ifdef __sparc__
6317 #define DMA_BURST_SIZE 64
6318 #else
6319 #define DMA_BURST_SIZE 128
6320 #endif
6321 #define RX_RING_SIZE 256
6322 #define TX_RING_SIZE 32
6323 #define DONE_Q_SIZE 1024
6324 #define QUEUE_ALIGN 256
6325 #if RX_RING_SIZE > 256
6326 #define RX_Q_ENTRIES Rx2048QEntries
6327 #else
6328 #define RX_Q_ENTRIES Rx256QEntries
6329 #endif
6330 #define TX_TIMEOUT (2 * HZ)
6331 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6332 #define ADDR_64BITS
6333 #define netdrv_addr_t __le64
6334 #define cpu_to_dma(x) cpu_to_le64(x)
6335 #define dma_to_cpu(x) le64_to_cpu(x)
6336 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6337 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6338 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6339 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6340 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6341 #else
6342 #define netdrv_addr_t __le32
6343 #define cpu_to_dma(x) cpu_to_le32(x)
6344 #define dma_to_cpu(x) le32_to_cpu(x)
6345 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6346 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6347 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6348 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6349 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6350 #endif
6351 #define skb_first_frag_len(skb) skb_headlen(skb)
6352 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6353 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6354 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6355 #ifdef VLAN_SUPPORT
6356 #define RxComplType RxComplType3
6357 #else
6358 #define RxComplType RxComplType2
6359 #endif
6360 #ifdef ADDR_64BITS
6361 #define TX_DESC_TYPE TxDescType2
6362 #else
6363 #define TX_DESC_TYPE TxDescType1
6364 #endif
6365 #define TX_DESC_SPACING TxDescSpaceUnlim
6366 #if 0
6367 #endif
6368 #define PHY_CNT 2
6369 #ifdef VLAN_SUPPORT
6370 #endif
6371 #ifdef VLAN_SUPPORT
6372 #endif
6373 #ifdef VLAN_SUPPORT
6374 #endif
6375 #ifndef MODULE
6376 #endif
6377 #ifdef ZEROCOPY
6378 #endif
6379 #ifdef VLAN_SUPPORT
6380 #endif
6381 #ifdef ADDR_64BITS
6382 #endif
6383 #if ! defined(final_version)
6384 #endif
6385 #ifdef VLAN_SUPPORT
6386 #endif
6387 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6388 #endif
6389 #ifndef final_version
6390 #endif
6391 #ifdef VLAN_SUPPORT
6392 #endif
6393 #ifdef VLAN_SUPPORT
6394 #endif
6395 #ifdef VLAN_SUPPORT
6396 #endif
6397 #ifdef VLAN_SUPPORT
6398 #endif
6399 /* LDV_COMMENT_END_PREP */
6400 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_msglevel" from driver structure with callbacks "ethtool_ops" */
6401 ldv_handler_precall();
6402 get_msglevel( var_group1);
6403 /* LDV_COMMENT_BEGIN_PREP */
6404 #ifdef CONFIG_PM
6405 #endif
6406 #ifdef CONFIG_PM
6407 #endif
6408 #ifdef MODULE
6409 #endif
6410 /* LDV_COMMENT_END_PREP */
6411
6412
6413
6414
6415 }
6416
6417 break;
6418 case 14: {
6419
6420 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6421
6422
6423 /* content: static void set_msglevel(struct net_device *dev, u32 val)*/
6424 /* LDV_COMMENT_BEGIN_PREP */
6425 #define DRV_NAME "starfire"
6426 #define DRV_VERSION "2.1"
6427 #define DRV_RELDATE "July 6, 2008"
6428 #define HAS_BROKEN_FIRMWARE
6429 #ifdef HAS_BROKEN_FIRMWARE
6430 #define PADDING_MASK 3
6431 #endif
6432 #define ZEROCOPY
6433 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6434 #define VLAN_SUPPORT
6435 #endif
6436 #define PKT_BUF_SZ 1536
6437 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6438 #else
6439 #endif
6440 #ifdef __sparc__
6441 #define DMA_BURST_SIZE 64
6442 #else
6443 #define DMA_BURST_SIZE 128
6444 #endif
6445 #define RX_RING_SIZE 256
6446 #define TX_RING_SIZE 32
6447 #define DONE_Q_SIZE 1024
6448 #define QUEUE_ALIGN 256
6449 #if RX_RING_SIZE > 256
6450 #define RX_Q_ENTRIES Rx2048QEntries
6451 #else
6452 #define RX_Q_ENTRIES Rx256QEntries
6453 #endif
6454 #define TX_TIMEOUT (2 * HZ)
6455 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6456 #define ADDR_64BITS
6457 #define netdrv_addr_t __le64
6458 #define cpu_to_dma(x) cpu_to_le64(x)
6459 #define dma_to_cpu(x) le64_to_cpu(x)
6460 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6461 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6462 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6463 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6464 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6465 #else
6466 #define netdrv_addr_t __le32
6467 #define cpu_to_dma(x) cpu_to_le32(x)
6468 #define dma_to_cpu(x) le32_to_cpu(x)
6469 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6470 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6471 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6472 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6473 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6474 #endif
6475 #define skb_first_frag_len(skb) skb_headlen(skb)
6476 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6477 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6478 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6479 #ifdef VLAN_SUPPORT
6480 #define RxComplType RxComplType3
6481 #else
6482 #define RxComplType RxComplType2
6483 #endif
6484 #ifdef ADDR_64BITS
6485 #define TX_DESC_TYPE TxDescType2
6486 #else
6487 #define TX_DESC_TYPE TxDescType1
6488 #endif
6489 #define TX_DESC_SPACING TxDescSpaceUnlim
6490 #if 0
6491 #endif
6492 #define PHY_CNT 2
6493 #ifdef VLAN_SUPPORT
6494 #endif
6495 #ifdef VLAN_SUPPORT
6496 #endif
6497 #ifdef VLAN_SUPPORT
6498 #endif
6499 #ifndef MODULE
6500 #endif
6501 #ifdef ZEROCOPY
6502 #endif
6503 #ifdef VLAN_SUPPORT
6504 #endif
6505 #ifdef ADDR_64BITS
6506 #endif
6507 #if ! defined(final_version)
6508 #endif
6509 #ifdef VLAN_SUPPORT
6510 #endif
6511 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6512 #endif
6513 #ifndef final_version
6514 #endif
6515 #ifdef VLAN_SUPPORT
6516 #endif
6517 #ifdef VLAN_SUPPORT
6518 #endif
6519 #ifdef VLAN_SUPPORT
6520 #endif
6521 #ifdef VLAN_SUPPORT
6522 #endif
6523 /* LDV_COMMENT_END_PREP */
6524 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_msglevel" from driver structure with callbacks "ethtool_ops" */
6525 ldv_handler_precall();
6526 set_msglevel( var_group1, var_set_msglevel_26_p1);
6527 /* LDV_COMMENT_BEGIN_PREP */
6528 #ifdef CONFIG_PM
6529 #endif
6530 #ifdef CONFIG_PM
6531 #endif
6532 #ifdef MODULE
6533 #endif
6534 /* LDV_COMMENT_END_PREP */
6535
6536
6537
6538
6539 }
6540
6541 break;
6542 case 15: {
6543
6544 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6545
6546
6547 /* content: static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd)*/
6548 /* LDV_COMMENT_BEGIN_PREP */
6549 #define DRV_NAME "starfire"
6550 #define DRV_VERSION "2.1"
6551 #define DRV_RELDATE "July 6, 2008"
6552 #define HAS_BROKEN_FIRMWARE
6553 #ifdef HAS_BROKEN_FIRMWARE
6554 #define PADDING_MASK 3
6555 #endif
6556 #define ZEROCOPY
6557 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6558 #define VLAN_SUPPORT
6559 #endif
6560 #define PKT_BUF_SZ 1536
6561 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6562 #else
6563 #endif
6564 #ifdef __sparc__
6565 #define DMA_BURST_SIZE 64
6566 #else
6567 #define DMA_BURST_SIZE 128
6568 #endif
6569 #define RX_RING_SIZE 256
6570 #define TX_RING_SIZE 32
6571 #define DONE_Q_SIZE 1024
6572 #define QUEUE_ALIGN 256
6573 #if RX_RING_SIZE > 256
6574 #define RX_Q_ENTRIES Rx2048QEntries
6575 #else
6576 #define RX_Q_ENTRIES Rx256QEntries
6577 #endif
6578 #define TX_TIMEOUT (2 * HZ)
6579 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6580 #define ADDR_64BITS
6581 #define netdrv_addr_t __le64
6582 #define cpu_to_dma(x) cpu_to_le64(x)
6583 #define dma_to_cpu(x) le64_to_cpu(x)
6584 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6585 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6586 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6587 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6588 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6589 #else
6590 #define netdrv_addr_t __le32
6591 #define cpu_to_dma(x) cpu_to_le32(x)
6592 #define dma_to_cpu(x) le32_to_cpu(x)
6593 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6594 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6595 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6596 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6597 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6598 #endif
6599 #define skb_first_frag_len(skb) skb_headlen(skb)
6600 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6601 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6602 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6603 #ifdef VLAN_SUPPORT
6604 #define RxComplType RxComplType3
6605 #else
6606 #define RxComplType RxComplType2
6607 #endif
6608 #ifdef ADDR_64BITS
6609 #define TX_DESC_TYPE TxDescType2
6610 #else
6611 #define TX_DESC_TYPE TxDescType1
6612 #endif
6613 #define TX_DESC_SPACING TxDescSpaceUnlim
6614 #if 0
6615 #endif
6616 #define PHY_CNT 2
6617 #ifdef VLAN_SUPPORT
6618 #endif
6619 #ifdef VLAN_SUPPORT
6620 #endif
6621 #ifdef VLAN_SUPPORT
6622 #endif
6623 #ifndef MODULE
6624 #endif
6625 #ifdef ZEROCOPY
6626 #endif
6627 #ifdef VLAN_SUPPORT
6628 #endif
6629 #ifdef ADDR_64BITS
6630 #endif
6631 #if ! defined(final_version)
6632 #endif
6633 #ifdef VLAN_SUPPORT
6634 #endif
6635 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6636 #endif
6637 #ifndef final_version
6638 #endif
6639 #ifdef VLAN_SUPPORT
6640 #endif
6641 #ifdef VLAN_SUPPORT
6642 #endif
6643 #ifdef VLAN_SUPPORT
6644 #endif
6645 #ifdef VLAN_SUPPORT
6646 #endif
6647 /* LDV_COMMENT_END_PREP */
6648 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_link_ksettings" from driver structure with callbacks "ethtool_ops" */
6649 ldv_handler_precall();
6650 get_link_ksettings( var_group1, var_group5);
6651 /* LDV_COMMENT_BEGIN_PREP */
6652 #ifdef CONFIG_PM
6653 #endif
6654 #ifdef CONFIG_PM
6655 #endif
6656 #ifdef MODULE
6657 #endif
6658 /* LDV_COMMENT_END_PREP */
6659
6660
6661
6662
6663 }
6664
6665 break;
6666 case 16: {
6667
6668 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6669
6670
6671 /* content: static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd)*/
6672 /* LDV_COMMENT_BEGIN_PREP */
6673 #define DRV_NAME "starfire"
6674 #define DRV_VERSION "2.1"
6675 #define DRV_RELDATE "July 6, 2008"
6676 #define HAS_BROKEN_FIRMWARE
6677 #ifdef HAS_BROKEN_FIRMWARE
6678 #define PADDING_MASK 3
6679 #endif
6680 #define ZEROCOPY
6681 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6682 #define VLAN_SUPPORT
6683 #endif
6684 #define PKT_BUF_SZ 1536
6685 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6686 #else
6687 #endif
6688 #ifdef __sparc__
6689 #define DMA_BURST_SIZE 64
6690 #else
6691 #define DMA_BURST_SIZE 128
6692 #endif
6693 #define RX_RING_SIZE 256
6694 #define TX_RING_SIZE 32
6695 #define DONE_Q_SIZE 1024
6696 #define QUEUE_ALIGN 256
6697 #if RX_RING_SIZE > 256
6698 #define RX_Q_ENTRIES Rx2048QEntries
6699 #else
6700 #define RX_Q_ENTRIES Rx256QEntries
6701 #endif
6702 #define TX_TIMEOUT (2 * HZ)
6703 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6704 #define ADDR_64BITS
6705 #define netdrv_addr_t __le64
6706 #define cpu_to_dma(x) cpu_to_le64(x)
6707 #define dma_to_cpu(x) le64_to_cpu(x)
6708 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6709 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6710 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6711 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6712 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6713 #else
6714 #define netdrv_addr_t __le32
6715 #define cpu_to_dma(x) cpu_to_le32(x)
6716 #define dma_to_cpu(x) le32_to_cpu(x)
6717 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6718 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6719 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6720 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6721 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6722 #endif
6723 #define skb_first_frag_len(skb) skb_headlen(skb)
6724 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6725 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6726 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6727 #ifdef VLAN_SUPPORT
6728 #define RxComplType RxComplType3
6729 #else
6730 #define RxComplType RxComplType2
6731 #endif
6732 #ifdef ADDR_64BITS
6733 #define TX_DESC_TYPE TxDescType2
6734 #else
6735 #define TX_DESC_TYPE TxDescType1
6736 #endif
6737 #define TX_DESC_SPACING TxDescSpaceUnlim
6738 #if 0
6739 #endif
6740 #define PHY_CNT 2
6741 #ifdef VLAN_SUPPORT
6742 #endif
6743 #ifdef VLAN_SUPPORT
6744 #endif
6745 #ifdef VLAN_SUPPORT
6746 #endif
6747 #ifndef MODULE
6748 #endif
6749 #ifdef ZEROCOPY
6750 #endif
6751 #ifdef VLAN_SUPPORT
6752 #endif
6753 #ifdef ADDR_64BITS
6754 #endif
6755 #if ! defined(final_version)
6756 #endif
6757 #ifdef VLAN_SUPPORT
6758 #endif
6759 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6760 #endif
6761 #ifndef final_version
6762 #endif
6763 #ifdef VLAN_SUPPORT
6764 #endif
6765 #ifdef VLAN_SUPPORT
6766 #endif
6767 #ifdef VLAN_SUPPORT
6768 #endif
6769 #ifdef VLAN_SUPPORT
6770 #endif
6771 /* LDV_COMMENT_END_PREP */
6772 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_link_ksettings" from driver structure with callbacks "ethtool_ops" */
6773 ldv_handler_precall();
6774 set_link_ksettings( var_group1, var_set_link_ksettings_22_p1);
6775 /* LDV_COMMENT_BEGIN_PREP */
6776 #ifdef CONFIG_PM
6777 #endif
6778 #ifdef CONFIG_PM
6779 #endif
6780 #ifdef MODULE
6781 #endif
6782 /* LDV_COMMENT_END_PREP */
6783
6784
6785
6786
6787 }
6788
6789 break;
6790 case 17: {
6791
6792 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
6793 if(ldv_s_starfire_driver_pci_driver==0) {
6794
6795 /* content: static int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
6796 /* LDV_COMMENT_BEGIN_PREP */
6797 #define DRV_NAME "starfire"
6798 #define DRV_VERSION "2.1"
6799 #define DRV_RELDATE "July 6, 2008"
6800 #define HAS_BROKEN_FIRMWARE
6801 #ifdef HAS_BROKEN_FIRMWARE
6802 #define PADDING_MASK 3
6803 #endif
6804 #define ZEROCOPY
6805 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6806 #define VLAN_SUPPORT
6807 #endif
6808 #define PKT_BUF_SZ 1536
6809 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6810 #else
6811 #endif
6812 #ifdef __sparc__
6813 #define DMA_BURST_SIZE 64
6814 #else
6815 #define DMA_BURST_SIZE 128
6816 #endif
6817 #define RX_RING_SIZE 256
6818 #define TX_RING_SIZE 32
6819 #define DONE_Q_SIZE 1024
6820 #define QUEUE_ALIGN 256
6821 #if RX_RING_SIZE > 256
6822 #define RX_Q_ENTRIES Rx2048QEntries
6823 #else
6824 #define RX_Q_ENTRIES Rx256QEntries
6825 #endif
6826 #define TX_TIMEOUT (2 * HZ)
6827 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6828 #define ADDR_64BITS
6829 #define netdrv_addr_t __le64
6830 #define cpu_to_dma(x) cpu_to_le64(x)
6831 #define dma_to_cpu(x) le64_to_cpu(x)
6832 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6833 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6834 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6835 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6836 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6837 #else
6838 #define netdrv_addr_t __le32
6839 #define cpu_to_dma(x) cpu_to_le32(x)
6840 #define dma_to_cpu(x) le32_to_cpu(x)
6841 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6842 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6843 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6844 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6845 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6846 #endif
6847 #define skb_first_frag_len(skb) skb_headlen(skb)
6848 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6849 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6850 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6851 #ifdef VLAN_SUPPORT
6852 #define RxComplType RxComplType3
6853 #else
6854 #define RxComplType RxComplType2
6855 #endif
6856 #ifdef ADDR_64BITS
6857 #define TX_DESC_TYPE TxDescType2
6858 #else
6859 #define TX_DESC_TYPE TxDescType1
6860 #endif
6861 #define TX_DESC_SPACING TxDescSpaceUnlim
6862 #if 0
6863 #endif
6864 #define PHY_CNT 2
6865 #ifdef VLAN_SUPPORT
6866 #endif
6867 #ifdef VLAN_SUPPORT
6868 #endif
6869 #ifdef VLAN_SUPPORT
6870 #endif
6871 /* LDV_COMMENT_END_PREP */
6872 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "starfire_driver". Standart function test for correct return result. */
6873 res_starfire_init_one_2 = starfire_init_one( var_group6, var_starfire_init_one_2_p1);
6874 ldv_check_return_value(res_starfire_init_one_2);
6875 ldv_check_return_value_probe(res_starfire_init_one_2);
6876 if(res_starfire_init_one_2)
6877 goto ldv_module_exit;
6878 /* LDV_COMMENT_BEGIN_PREP */
6879 #ifdef VLAN_SUPPORT
6880 #endif
6881 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6882 #endif
6883 #ifndef final_version
6884 #endif
6885 #ifdef VLAN_SUPPORT
6886 #endif
6887 #ifdef VLAN_SUPPORT
6888 #endif
6889 #ifdef VLAN_SUPPORT
6890 #endif
6891 #ifdef VLAN_SUPPORT
6892 #endif
6893 #ifdef CONFIG_PM
6894 #endif
6895 #ifdef CONFIG_PM
6896 #endif
6897 #ifdef MODULE
6898 #endif
6899 /* LDV_COMMENT_END_PREP */
6900 ldv_s_starfire_driver_pci_driver++;
6901
6902 }
6903
6904 }
6905
6906 break;
6907 case 18: {
6908
6909 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
6910 if(ldv_s_starfire_driver_pci_driver==1) {
6911
6912 /* content: static void starfire_remove_one(struct pci_dev *pdev)*/
6913 /* LDV_COMMENT_BEGIN_PREP */
6914 #define DRV_NAME "starfire"
6915 #define DRV_VERSION "2.1"
6916 #define DRV_RELDATE "July 6, 2008"
6917 #define HAS_BROKEN_FIRMWARE
6918 #ifdef HAS_BROKEN_FIRMWARE
6919 #define PADDING_MASK 3
6920 #endif
6921 #define ZEROCOPY
6922 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6923 #define VLAN_SUPPORT
6924 #endif
6925 #define PKT_BUF_SZ 1536
6926 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6927 #else
6928 #endif
6929 #ifdef __sparc__
6930 #define DMA_BURST_SIZE 64
6931 #else
6932 #define DMA_BURST_SIZE 128
6933 #endif
6934 #define RX_RING_SIZE 256
6935 #define TX_RING_SIZE 32
6936 #define DONE_Q_SIZE 1024
6937 #define QUEUE_ALIGN 256
6938 #if RX_RING_SIZE > 256
6939 #define RX_Q_ENTRIES Rx2048QEntries
6940 #else
6941 #define RX_Q_ENTRIES Rx256QEntries
6942 #endif
6943 #define TX_TIMEOUT (2 * HZ)
6944 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6945 #define ADDR_64BITS
6946 #define netdrv_addr_t __le64
6947 #define cpu_to_dma(x) cpu_to_le64(x)
6948 #define dma_to_cpu(x) le64_to_cpu(x)
6949 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6950 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6951 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6952 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6953 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6954 #else
6955 #define netdrv_addr_t __le32
6956 #define cpu_to_dma(x) cpu_to_le32(x)
6957 #define dma_to_cpu(x) le32_to_cpu(x)
6958 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6959 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6960 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6961 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6962 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6963 #endif
6964 #define skb_first_frag_len(skb) skb_headlen(skb)
6965 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6966 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6967 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6968 #ifdef VLAN_SUPPORT
6969 #define RxComplType RxComplType3
6970 #else
6971 #define RxComplType RxComplType2
6972 #endif
6973 #ifdef ADDR_64BITS
6974 #define TX_DESC_TYPE TxDescType2
6975 #else
6976 #define TX_DESC_TYPE TxDescType1
6977 #endif
6978 #define TX_DESC_SPACING TxDescSpaceUnlim
6979 #if 0
6980 #endif
6981 #define PHY_CNT 2
6982 #ifdef VLAN_SUPPORT
6983 #endif
6984 #ifdef VLAN_SUPPORT
6985 #endif
6986 #ifdef VLAN_SUPPORT
6987 #endif
6988 #ifndef MODULE
6989 #endif
6990 #ifdef ZEROCOPY
6991 #endif
6992 #ifdef VLAN_SUPPORT
6993 #endif
6994 #ifdef ADDR_64BITS
6995 #endif
6996 #if ! defined(final_version)
6997 #endif
6998 #ifdef VLAN_SUPPORT
6999 #endif
7000 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7001 #endif
7002 #ifndef final_version
7003 #endif
7004 #ifdef VLAN_SUPPORT
7005 #endif
7006 #ifdef VLAN_SUPPORT
7007 #endif
7008 #ifdef VLAN_SUPPORT
7009 #endif
7010 #ifdef VLAN_SUPPORT
7011 #endif
7012 #ifdef CONFIG_PM
7013 #endif
7014 /* LDV_COMMENT_END_PREP */
7015 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "starfire_driver" */
7016 ldv_handler_precall();
7017 starfire_remove_one( var_group6);
7018 /* LDV_COMMENT_BEGIN_PREP */
7019 #ifdef CONFIG_PM
7020 #endif
7021 #ifdef MODULE
7022 #endif
7023 /* LDV_COMMENT_END_PREP */
7024 ldv_s_starfire_driver_pci_driver=0;
7025
7026 }
7027
7028 }
7029
7030 break;
7031 case 19: {
7032
7033 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
7034
7035
7036 /* content: static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)*/
7037 /* LDV_COMMENT_BEGIN_PREP */
7038 #define DRV_NAME "starfire"
7039 #define DRV_VERSION "2.1"
7040 #define DRV_RELDATE "July 6, 2008"
7041 #define HAS_BROKEN_FIRMWARE
7042 #ifdef HAS_BROKEN_FIRMWARE
7043 #define PADDING_MASK 3
7044 #endif
7045 #define ZEROCOPY
7046 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7047 #define VLAN_SUPPORT
7048 #endif
7049 #define PKT_BUF_SZ 1536
7050 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7051 #else
7052 #endif
7053 #ifdef __sparc__
7054 #define DMA_BURST_SIZE 64
7055 #else
7056 #define DMA_BURST_SIZE 128
7057 #endif
7058 #define RX_RING_SIZE 256
7059 #define TX_RING_SIZE 32
7060 #define DONE_Q_SIZE 1024
7061 #define QUEUE_ALIGN 256
7062 #if RX_RING_SIZE > 256
7063 #define RX_Q_ENTRIES Rx2048QEntries
7064 #else
7065 #define RX_Q_ENTRIES Rx256QEntries
7066 #endif
7067 #define TX_TIMEOUT (2 * HZ)
7068 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7069 #define ADDR_64BITS
7070 #define netdrv_addr_t __le64
7071 #define cpu_to_dma(x) cpu_to_le64(x)
7072 #define dma_to_cpu(x) le64_to_cpu(x)
7073 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7074 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7075 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7076 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7077 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7078 #else
7079 #define netdrv_addr_t __le32
7080 #define cpu_to_dma(x) cpu_to_le32(x)
7081 #define dma_to_cpu(x) le32_to_cpu(x)
7082 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7083 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7084 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7085 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7086 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7087 #endif
7088 #define skb_first_frag_len(skb) skb_headlen(skb)
7089 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7090 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7091 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7092 #ifdef VLAN_SUPPORT
7093 #define RxComplType RxComplType3
7094 #else
7095 #define RxComplType RxComplType2
7096 #endif
7097 #ifdef ADDR_64BITS
7098 #define TX_DESC_TYPE TxDescType2
7099 #else
7100 #define TX_DESC_TYPE TxDescType1
7101 #endif
7102 #define TX_DESC_SPACING TxDescSpaceUnlim
7103 #if 0
7104 #endif
7105 #define PHY_CNT 2
7106 #ifdef VLAN_SUPPORT
7107 #endif
7108 #ifdef VLAN_SUPPORT
7109 #endif
7110 #ifdef VLAN_SUPPORT
7111 #endif
7112 #ifndef MODULE
7113 #endif
7114 #ifdef ZEROCOPY
7115 #endif
7116 #ifdef VLAN_SUPPORT
7117 #endif
7118 #ifdef ADDR_64BITS
7119 #endif
7120 #if ! defined(final_version)
7121 #endif
7122 #ifdef VLAN_SUPPORT
7123 #endif
7124 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7125 #endif
7126 #ifndef final_version
7127 #endif
7128 #ifdef VLAN_SUPPORT
7129 #endif
7130 #ifdef VLAN_SUPPORT
7131 #endif
7132 #ifdef VLAN_SUPPORT
7133 #endif
7134 #ifdef VLAN_SUPPORT
7135 #endif
7136 #ifdef CONFIG_PM
7137 /* LDV_COMMENT_END_PREP */
7138 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "starfire_driver" */
7139 ldv_handler_precall();
7140 starfire_suspend( var_group6, var_starfire_suspend_29_p1);
7141 /* LDV_COMMENT_BEGIN_PREP */
7142 #endif
7143 #ifdef CONFIG_PM
7144 #endif
7145 #ifdef MODULE
7146 #endif
7147 /* LDV_COMMENT_END_PREP */
7148
7149
7150
7151
7152 }
7153
7154 break;
7155 case 20: {
7156
7157 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
7158
7159
7160 /* content: static int starfire_resume(struct pci_dev *pdev)*/
7161 /* LDV_COMMENT_BEGIN_PREP */
7162 #define DRV_NAME "starfire"
7163 #define DRV_VERSION "2.1"
7164 #define DRV_RELDATE "July 6, 2008"
7165 #define HAS_BROKEN_FIRMWARE
7166 #ifdef HAS_BROKEN_FIRMWARE
7167 #define PADDING_MASK 3
7168 #endif
7169 #define ZEROCOPY
7170 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7171 #define VLAN_SUPPORT
7172 #endif
7173 #define PKT_BUF_SZ 1536
7174 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7175 #else
7176 #endif
7177 #ifdef __sparc__
7178 #define DMA_BURST_SIZE 64
7179 #else
7180 #define DMA_BURST_SIZE 128
7181 #endif
7182 #define RX_RING_SIZE 256
7183 #define TX_RING_SIZE 32
7184 #define DONE_Q_SIZE 1024
7185 #define QUEUE_ALIGN 256
7186 #if RX_RING_SIZE > 256
7187 #define RX_Q_ENTRIES Rx2048QEntries
7188 #else
7189 #define RX_Q_ENTRIES Rx256QEntries
7190 #endif
7191 #define TX_TIMEOUT (2 * HZ)
7192 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7193 #define ADDR_64BITS
7194 #define netdrv_addr_t __le64
7195 #define cpu_to_dma(x) cpu_to_le64(x)
7196 #define dma_to_cpu(x) le64_to_cpu(x)
7197 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7198 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7199 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7200 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7201 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7202 #else
7203 #define netdrv_addr_t __le32
7204 #define cpu_to_dma(x) cpu_to_le32(x)
7205 #define dma_to_cpu(x) le32_to_cpu(x)
7206 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7207 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7208 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7209 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7210 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7211 #endif
7212 #define skb_first_frag_len(skb) skb_headlen(skb)
7213 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7214 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7215 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7216 #ifdef VLAN_SUPPORT
7217 #define RxComplType RxComplType3
7218 #else
7219 #define RxComplType RxComplType2
7220 #endif
7221 #ifdef ADDR_64BITS
7222 #define TX_DESC_TYPE TxDescType2
7223 #else
7224 #define TX_DESC_TYPE TxDescType1
7225 #endif
7226 #define TX_DESC_SPACING TxDescSpaceUnlim
7227 #if 0
7228 #endif
7229 #define PHY_CNT 2
7230 #ifdef VLAN_SUPPORT
7231 #endif
7232 #ifdef VLAN_SUPPORT
7233 #endif
7234 #ifdef VLAN_SUPPORT
7235 #endif
7236 #ifndef MODULE
7237 #endif
7238 #ifdef ZEROCOPY
7239 #endif
7240 #ifdef VLAN_SUPPORT
7241 #endif
7242 #ifdef ADDR_64BITS
7243 #endif
7244 #if ! defined(final_version)
7245 #endif
7246 #ifdef VLAN_SUPPORT
7247 #endif
7248 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7249 #endif
7250 #ifndef final_version
7251 #endif
7252 #ifdef VLAN_SUPPORT
7253 #endif
7254 #ifdef VLAN_SUPPORT
7255 #endif
7256 #ifdef VLAN_SUPPORT
7257 #endif
7258 #ifdef VLAN_SUPPORT
7259 #endif
7260 #ifdef CONFIG_PM
7261 /* LDV_COMMENT_END_PREP */
7262 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "starfire_driver" */
7263 ldv_handler_precall();
7264 starfire_resume( var_group6);
7265 /* LDV_COMMENT_BEGIN_PREP */
7266 #endif
7267 #ifdef CONFIG_PM
7268 #endif
7269 #ifdef MODULE
7270 #endif
7271 /* LDV_COMMENT_END_PREP */
7272
7273
7274
7275
7276 }
7277
7278 break;
7279 case 21: {
7280
7281 /** CALLBACK SECTION request_irq **/
7282 LDV_IN_INTERRUPT=2;
7283
7284 /* content: static irqreturn_t intr_handler(int irq, void *dev_instance)*/
7285 /* LDV_COMMENT_BEGIN_PREP */
7286 #define DRV_NAME "starfire"
7287 #define DRV_VERSION "2.1"
7288 #define DRV_RELDATE "July 6, 2008"
7289 #define HAS_BROKEN_FIRMWARE
7290 #ifdef HAS_BROKEN_FIRMWARE
7291 #define PADDING_MASK 3
7292 #endif
7293 #define ZEROCOPY
7294 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7295 #define VLAN_SUPPORT
7296 #endif
7297 #define PKT_BUF_SZ 1536
7298 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7299 #else
7300 #endif
7301 #ifdef __sparc__
7302 #define DMA_BURST_SIZE 64
7303 #else
7304 #define DMA_BURST_SIZE 128
7305 #endif
7306 #define RX_RING_SIZE 256
7307 #define TX_RING_SIZE 32
7308 #define DONE_Q_SIZE 1024
7309 #define QUEUE_ALIGN 256
7310 #if RX_RING_SIZE > 256
7311 #define RX_Q_ENTRIES Rx2048QEntries
7312 #else
7313 #define RX_Q_ENTRIES Rx256QEntries
7314 #endif
7315 #define TX_TIMEOUT (2 * HZ)
7316 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7317 #define ADDR_64BITS
7318 #define netdrv_addr_t __le64
7319 #define cpu_to_dma(x) cpu_to_le64(x)
7320 #define dma_to_cpu(x) le64_to_cpu(x)
7321 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7322 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7323 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7324 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7325 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7326 #else
7327 #define netdrv_addr_t __le32
7328 #define cpu_to_dma(x) cpu_to_le32(x)
7329 #define dma_to_cpu(x) le32_to_cpu(x)
7330 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7331 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7332 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7333 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7334 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7335 #endif
7336 #define skb_first_frag_len(skb) skb_headlen(skb)
7337 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7338 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7339 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7340 #ifdef VLAN_SUPPORT
7341 #define RxComplType RxComplType3
7342 #else
7343 #define RxComplType RxComplType2
7344 #endif
7345 #ifdef ADDR_64BITS
7346 #define TX_DESC_TYPE TxDescType2
7347 #else
7348 #define TX_DESC_TYPE TxDescType1
7349 #endif
7350 #define TX_DESC_SPACING TxDescSpaceUnlim
7351 #if 0
7352 #endif
7353 #define PHY_CNT 2
7354 #ifdef VLAN_SUPPORT
7355 #endif
7356 #ifdef VLAN_SUPPORT
7357 #endif
7358 #ifdef VLAN_SUPPORT
7359 #endif
7360 #ifndef MODULE
7361 #endif
7362 #ifdef ZEROCOPY
7363 #endif
7364 #ifdef VLAN_SUPPORT
7365 #endif
7366 #ifdef ADDR_64BITS
7367 #endif
7368 #if ! defined(final_version)
7369 #endif
7370 #ifdef VLAN_SUPPORT
7371 #endif
7372 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7373 #endif
7374 /* LDV_COMMENT_END_PREP */
7375 /* LDV_COMMENT_FUNCTION_CALL */
7376 ldv_handler_precall();
7377 intr_handler( var_intr_handler_10_p0, var_intr_handler_10_p1);
7378 /* LDV_COMMENT_BEGIN_PREP */
7379 #ifndef final_version
7380 #endif
7381 #ifdef VLAN_SUPPORT
7382 #endif
7383 #ifdef VLAN_SUPPORT
7384 #endif
7385 #ifdef VLAN_SUPPORT
7386 #endif
7387 #ifdef VLAN_SUPPORT
7388 #endif
7389 #ifdef CONFIG_PM
7390 #endif
7391 #ifdef CONFIG_PM
7392 #endif
7393 #ifdef MODULE
7394 #endif
7395 /* LDV_COMMENT_END_PREP */
7396 LDV_IN_INTERRUPT=1;
7397
7398
7399
7400 }
7401
7402 break;
7403 default: break;
7404
7405 }
7406
7407 }
7408
7409 ldv_module_exit:
7410
7411 /** INIT: init_type: ST_MODULE_EXIT **/
7412 /* content: static void __exit starfire_cleanup (void)*/
7413 /* LDV_COMMENT_BEGIN_PREP */
7414 #define DRV_NAME "starfire"
7415 #define DRV_VERSION "2.1"
7416 #define DRV_RELDATE "July 6, 2008"
7417 #define HAS_BROKEN_FIRMWARE
7418 #ifdef HAS_BROKEN_FIRMWARE
7419 #define PADDING_MASK 3
7420 #endif
7421 #define ZEROCOPY
7422 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7423 #define VLAN_SUPPORT
7424 #endif
7425 #define PKT_BUF_SZ 1536
7426 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7427 #else
7428 #endif
7429 #ifdef __sparc__
7430 #define DMA_BURST_SIZE 64
7431 #else
7432 #define DMA_BURST_SIZE 128
7433 #endif
7434 #define RX_RING_SIZE 256
7435 #define TX_RING_SIZE 32
7436 #define DONE_Q_SIZE 1024
7437 #define QUEUE_ALIGN 256
7438 #if RX_RING_SIZE > 256
7439 #define RX_Q_ENTRIES Rx2048QEntries
7440 #else
7441 #define RX_Q_ENTRIES Rx256QEntries
7442 #endif
7443 #define TX_TIMEOUT (2 * HZ)
7444 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7445 #define ADDR_64BITS
7446 #define netdrv_addr_t __le64
7447 #define cpu_to_dma(x) cpu_to_le64(x)
7448 #define dma_to_cpu(x) le64_to_cpu(x)
7449 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7450 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7451 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7452 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7453 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7454 #else
7455 #define netdrv_addr_t __le32
7456 #define cpu_to_dma(x) cpu_to_le32(x)
7457 #define dma_to_cpu(x) le32_to_cpu(x)
7458 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7459 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7460 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7461 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7462 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7463 #endif
7464 #define skb_first_frag_len(skb) skb_headlen(skb)
7465 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7466 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7467 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7468 #ifdef VLAN_SUPPORT
7469 #define RxComplType RxComplType3
7470 #else
7471 #define RxComplType RxComplType2
7472 #endif
7473 #ifdef ADDR_64BITS
7474 #define TX_DESC_TYPE TxDescType2
7475 #else
7476 #define TX_DESC_TYPE TxDescType1
7477 #endif
7478 #define TX_DESC_SPACING TxDescSpaceUnlim
7479 #if 0
7480 #endif
7481 #define PHY_CNT 2
7482 #ifdef VLAN_SUPPORT
7483 #endif
7484 #ifdef VLAN_SUPPORT
7485 #endif
7486 #ifdef VLAN_SUPPORT
7487 #endif
7488 #ifndef MODULE
7489 #endif
7490 #ifdef ZEROCOPY
7491 #endif
7492 #ifdef VLAN_SUPPORT
7493 #endif
7494 #ifdef ADDR_64BITS
7495 #endif
7496 #if ! defined(final_version)
7497 #endif
7498 #ifdef VLAN_SUPPORT
7499 #endif
7500 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7501 #endif
7502 #ifndef final_version
7503 #endif
7504 #ifdef VLAN_SUPPORT
7505 #endif
7506 #ifdef VLAN_SUPPORT
7507 #endif
7508 #ifdef VLAN_SUPPORT
7509 #endif
7510 #ifdef VLAN_SUPPORT
7511 #endif
7512 #ifdef CONFIG_PM
7513 #endif
7514 #ifdef CONFIG_PM
7515 #endif
7516 #ifdef MODULE
7517 #endif
7518 /* LDV_COMMENT_END_PREP */
7519 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
7520 ldv_handler_precall();
7521 starfire_cleanup();
7522
7523 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
7524 ldv_final: ldv_check_final_state();
7525
7526 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
7527 return;
7528
7529 }
7530 #endif
7531
7532 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 extern void ldv_dma_map_page(void);
9 extern void ldv_dma_mapping_error(void);
10 #line 1 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.10-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.10-rc1.tar.xz/csd_deg_dscv/12798/dscv_tempdir/dscv/ri/331_1a/drivers/net/ethernet/adaptec/starfire.c"
11
12 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
13 /*
14 Written 1998-2000 by Donald Becker.
15
16 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
17 send all bug reports to me, and not to Donald Becker, as this code
18 has been heavily modified from Donald's original version.
19
20 This software may be used and distributed according to the terms of
21 the GNU General Public License (GPL), incorporated herein by reference.
22 Drivers based on or derived from this code fall under the GPL and must
23 retain the authorship, copyright and license notice. This file is not
24 a complete program and may only be used when the entire operating
25 system is licensed under the GPL.
26
27 The information below comes from Donald Becker's original driver:
28
29 The author may be reached as becker@scyld.com, or C/O
30 Scyld Computing Corporation
31 410 Severn Ave., Suite 210
32 Annapolis MD 21403
33
34 Support and updates available at
35 http://www.scyld.com/network/starfire.html
36 [link no longer provides useful info -jgarzik]
37
38 */
39
40 #define DRV_NAME "starfire"
41 #define DRV_VERSION "2.1"
42 #define DRV_RELDATE "July 6, 2008"
43
44 #include <linux/interrupt.h>
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/pci.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/init.h>
51 #include <linux/delay.h>
52 #include <linux/crc32.h>
53 #include <linux/ethtool.h>
54 #include <linux/mii.h>
55 #include <linux/if_vlan.h>
56 #include <linux/mm.h>
57 #include <linux/firmware.h>
58 #include <asm/processor.h> /* Processor type for cache alignment. */
59 #include <linux/uaccess.h>
60 #include <asm/io.h>
61
62 /*
63 * The current frame processor firmware fails to checksum a fragment
64 * of length 1. If and when this is fixed, the #define below can be removed.
65 */
66 #define HAS_BROKEN_FIRMWARE
67
68 /*
69 * If using the broken firmware, data must be padded to the next 32-bit boundary.
70 */
71 #ifdef HAS_BROKEN_FIRMWARE
72 #define PADDING_MASK 3
73 #endif
74
75 /*
76 * Define this if using the driver with the zero-copy patch
77 */
78 #define ZEROCOPY
79
80 #if IS_ENABLED(CONFIG_VLAN_8021Q)
81 #define VLAN_SUPPORT
82 #endif
83
84 /* The user-configurable values.
85 These may be modified when a driver module is loaded.*/
86
87 /* Used for tuning interrupt latency vs. overhead. */
88 static int intr_latency;
89 static int small_frames;
90
91 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
92 static int max_interrupt_work = 20;
93 static int mtu;
94 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
95 The Starfire has a 512 element hash table based on the Ethernet CRC. */
96 static const int multicast_filter_limit = 512;
97 /* Whether to do TCP/UDP checksums in hardware */
98 static int enable_hw_cksum = 1;
99
100 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
101 /*
102 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
103 * Setting to > 1518 effectively disables this feature.
104 *
105 * NOTE:
106 * The ia64 doesn't allow for unaligned loads even of integers being
107 * misaligned on a 2 byte boundary. Thus always force copying of
108 * packets as the starfire doesn't allow for misaligned DMAs ;-(
109 * 23/10/2000 - Jes
110 *
111 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
112 * at least, having unaligned frames leads to a rather serious performance
113 * penalty. -Ion
114 */
115 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
116 static int rx_copybreak = PKT_BUF_SZ;
117 #else
118 static int rx_copybreak /* = 0 */;
119 #endif
120
121 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
122 #ifdef __sparc__
123 #define DMA_BURST_SIZE 64
124 #else
125 #define DMA_BURST_SIZE 128
126 #endif
127
128 /* Operational parameters that are set at compile time. */
129
130 /* The "native" ring sizes are either 256 or 2048.
131 However in some modes a descriptor may be marked to wrap the ring earlier.
132 */
133 #define RX_RING_SIZE 256
134 #define TX_RING_SIZE 32
135 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
136 #define DONE_Q_SIZE 1024
137 /* All queues must be aligned on a 256-byte boundary */
138 #define QUEUE_ALIGN 256
139
140 #if RX_RING_SIZE > 256
141 #define RX_Q_ENTRIES Rx2048QEntries
142 #else
143 #define RX_Q_ENTRIES Rx256QEntries
144 #endif
145
146 /* Operational parameters that usually are not changed. */
147 /* Time in jiffies before concluding the transmitter is hung. */
148 #define TX_TIMEOUT (2 * HZ)
149
150 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
151 /* 64-bit dma_addr_t */
152 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
153 #define netdrv_addr_t __le64
154 #define cpu_to_dma(x) cpu_to_le64(x)
155 #define dma_to_cpu(x) le64_to_cpu(x)
156 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
157 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
158 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
159 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
160 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
161 #else /* 32-bit dma_addr_t */
162 #define netdrv_addr_t __le32
163 #define cpu_to_dma(x) cpu_to_le32(x)
164 #define dma_to_cpu(x) le32_to_cpu(x)
165 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
166 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
167 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
168 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
169 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
170 #endif
171
172 #define skb_first_frag_len(skb) skb_headlen(skb)
173 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
174
175 /* Firmware names */
176 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
177 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
178
179 /* These identify the driver base version and may not be removed. */
180 static const char version[] =
181 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
182 " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
183
184 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
185 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
186 MODULE_LICENSE("GPL");
187 MODULE_VERSION(DRV_VERSION);
188 MODULE_FIRMWARE(FIRMWARE_RX);
189 MODULE_FIRMWARE(FIRMWARE_TX);
190
191 module_param(max_interrupt_work, int, 0);
192 module_param(mtu, int, 0);
193 module_param(debug, int, 0);
194 module_param(rx_copybreak, int, 0);
195 module_param(intr_latency, int, 0);
196 module_param(small_frames, int, 0);
197 module_param(enable_hw_cksum, int, 0);
198 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
199 MODULE_PARM_DESC(mtu, "MTU (all boards)");
200 MODULE_PARM_DESC(debug, "Debug level (0-6)");
201 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
202 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
203 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
204 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
205
206 /*
207 Theory of Operation
208
209 I. Board Compatibility
210
211 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
212
213 II. Board-specific settings
214
215 III. Driver operation
216
217 IIIa. Ring buffers
218
219 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
220 ring sizes are set fixed by the hardware, but may optionally be wrapped
221 earlier by the END bit in the descriptor.
222 This driver uses that hardware queue size for the Rx ring, where a large
223 number of entries has no ill effect beyond increases the potential backlog.
224 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
225 disables the queue layer priority ordering and we have no mechanism to
226 utilize the hardware two-level priority queue. When modifying the
227 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
228 levels.
229
230 IIIb/c. Transmit/Receive Structure
231
232 See the Adaptec manual for the many possible structures, and options for
233 each structure. There are far too many to document all of them here.
234
235 For transmit this driver uses type 0/1 transmit descriptors (depending
236 on the 32/64 bitness of the architecture), and relies on automatic
237 minimum-length padding. It does not use the completion queue
238 consumer index, but instead checks for non-zero status entries.
239
240 For receive this driver uses type 2/3 receive descriptors. The driver
241 allocates full frame size skbuffs for the Rx ring buffers, so all frames
242 should fit in a single descriptor. The driver does not use the completion
243 queue consumer index, but instead checks for non-zero status entries.
244
245 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
246 is allocated and the frame is copied to the new skbuff. When the incoming
247 frame is larger, the skbuff is passed directly up the protocol stack.
248 Buffers consumed this way are replaced by newly allocated skbuffs in a later
249 phase of receive.
250
251 A notable aspect of operation is that unaligned buffers are not permitted by
252 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
253 isn't longword aligned, which may cause problems on some machine
254 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
255 the frame into a new skbuff unconditionally. Copied frames are put into the
256 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
257
258 IIId. Synchronization
259
260 The driver runs as two independent, single-threaded flows of control. One
261 is the send-packet routine, which enforces single-threaded use by the
262 dev->tbusy flag. The other thread is the interrupt handler, which is single
263 threaded by the hardware and interrupt handling software.
264
265 The send packet thread has partial control over the Tx ring and the netif_queue
266 status. If the number of free Tx slots in the ring falls below a certain number
267 (currently hardcoded to 4), it signals the upper layer to stop the queue.
268
269 The interrupt handler has exclusive control over the Rx ring and records stats
270 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
271 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
272 number of free Tx slow is above the threshold, it signals the upper layer to
273 restart the queue.
274
275 IV. Notes
276
277 IVb. References
278
279 The Adaptec Starfire manuals, available only from Adaptec.
280 http://www.scyld.com/expert/100mbps.html
281 http://www.scyld.com/expert/NWay.html
282
283 IVc. Errata
284
285 - StopOnPerr is broken, don't enable
286 - Hardware ethernet padding exposes random data, perform software padding
287 instead (unverified -- works correctly for all the hardware I have)
288
289 */
290
291
292
293 enum chip_capability_flags {CanHaveMII=1, };
294
295 enum chipset {
296 CH_6915 = 0,
297 };
298
299 static const struct pci_device_id starfire_pci_tbl[] = {
300 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
301 { 0, }
302 };
303 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
304
305 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
306 static const struct chip_info {
307 const char *name;
308 int drv_flags;
309 } netdrv_tbl[] = {
310 { "Adaptec Starfire 6915", CanHaveMII },
311 };
312
313
314 /* Offsets to the device registers.
315 Unlike software-only systems, device drivers interact with complex hardware.
316 It's not useful to define symbolic names for every register bit in the
317 device. The name can only partially document the semantics and make
318 the driver longer and more difficult to read.
319 In general, only the important configuration values or bits changed
320 multiple times should be defined symbolically.
321 */
322 enum register_offsets {
323 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
324 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
325 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
326 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
327 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
328 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
329 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
330 TxThreshold=0x500B0,
331 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
332 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
333 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
334 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
335 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
336 TxMode=0x55000, VlanType=0x55064,
337 PerfFilterTable=0x56000, HashTable=0x56100,
338 TxGfpMem=0x58000, RxGfpMem=0x5a000,
339 };
340
341 /*
342 * Bits in the interrupt status/mask registers.
343 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
344 * enables all the interrupt sources that are or'ed into those status bits.
345 */
346 enum intr_status_bits {
347 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
348 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
349 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
350 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
351 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
352 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
353 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
354 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
355 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
356 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
357 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
358 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
359 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
360 IntrTxGfp=0x02, IntrPCIPad=0x01,
361 /* not quite bits */
362 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
363 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
364 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
365 };
366
367 /* Bits in the RxFilterMode register. */
368 enum rx_mode_bits {
369 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
370 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
371 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
372 WakeupOnGFP=0x0800,
373 };
374
375 /* Bits in the TxMode register */
376 enum tx_mode_bits {
377 MiiSoftReset=0x8000, MIILoopback=0x4000,
378 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
379 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
380 };
381
382 /* Bits in the TxDescCtrl register. */
383 enum tx_ctrl_bits {
384 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
385 TxDescSpace128=0x30, TxDescSpace256=0x40,
386 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
387 TxDescType3=0x03, TxDescType4=0x04,
388 TxNoDMACompletion=0x08,
389 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
390 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
391 TxDMABurstSizeShift=8,
392 };
393
394 /* Bits in the RxDescQCtrl register. */
395 enum rx_ctrl_bits {
396 RxBufferLenShift=16, RxMinDescrThreshShift=0,
397 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
398 Rx2048QEntries=0x4000, Rx256QEntries=0,
399 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
400 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
401 RxDescSpace4=0x000, RxDescSpace8=0x100,
402 RxDescSpace16=0x200, RxDescSpace32=0x300,
403 RxDescSpace64=0x400, RxDescSpace128=0x500,
404 RxConsumerWrEn=0x80,
405 };
406
407 /* Bits in the RxDMACtrl register. */
408 enum rx_dmactrl_bits {
409 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
410 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
411 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
412 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
413 RxChecksumRejectTCPOnly=0x01000000,
414 RxCompletionQ2Enable=0x800000,
415 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
416 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
417 RxDMAQ2NonIP=0x400000,
418 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
419 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
420 RxBurstSizeShift=0,
421 };
422
423 /* Bits in the RxCompletionAddr register */
424 enum rx_compl_bits {
425 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
426 RxComplProducerWrEn=0x40,
427 RxComplType0=0x00, RxComplType1=0x10,
428 RxComplType2=0x20, RxComplType3=0x30,
429 RxComplThreshShift=0,
430 };
431
432 /* Bits in the TxCompletionAddr register */
433 enum tx_compl_bits {
434 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
435 TxComplProducerWrEn=0x40,
436 TxComplIntrStatus=0x20,
437 CommonQueueMode=0x10,
438 TxComplThreshShift=0,
439 };
440
441 /* Bits in the GenCtrl register */
442 enum gen_ctrl_bits {
443 RxEnable=0x05, TxEnable=0x0a,
444 RxGFPEnable=0x10, TxGFPEnable=0x20,
445 };
446
447 /* Bits in the IntrTimerCtrl register */
448 enum intr_ctrl_bits {
449 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
450 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
451 IntrLatencyMask=0x1f,
452 };
453
454 /* The Rx and Tx buffer descriptors. */
455 struct starfire_rx_desc {
456 netdrv_addr_t rxaddr;
457 };
458 enum rx_desc_bits {
459 RxDescValid=1, RxDescEndRing=2,
460 };
461
462 /* Completion queue entry. */
463 struct short_rx_done_desc {
464 __le32 status; /* Low 16 bits is length. */
465 };
466 struct basic_rx_done_desc {
467 __le32 status; /* Low 16 bits is length. */
468 __le16 vlanid;
469 __le16 status2;
470 };
471 struct csum_rx_done_desc {
472 __le32 status; /* Low 16 bits is length. */
473 __le16 csum; /* Partial checksum */
474 __le16 status2;
475 };
476 struct full_rx_done_desc {
477 __le32 status; /* Low 16 bits is length. */
478 __le16 status3;
479 __le16 status2;
480 __le16 vlanid;
481 __le16 csum; /* partial checksum */
482 __le32 timestamp;
483 };
484 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
485 #ifdef VLAN_SUPPORT
486 typedef struct full_rx_done_desc rx_done_desc;
487 #define RxComplType RxComplType3
488 #else /* not VLAN_SUPPORT */
489 typedef struct csum_rx_done_desc rx_done_desc;
490 #define RxComplType RxComplType2
491 #endif /* not VLAN_SUPPORT */
492
493 enum rx_done_bits {
494 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
495 };
496
497 /* Type 1 Tx descriptor. */
498 struct starfire_tx_desc_1 {
499 __le32 status; /* Upper bits are status, lower 16 length. */
500 __le32 addr;
501 };
502
503 /* Type 2 Tx descriptor. */
504 struct starfire_tx_desc_2 {
505 __le32 status; /* Upper bits are status, lower 16 length. */
506 __le32 reserved;
507 __le64 addr;
508 };
509
510 #ifdef ADDR_64BITS
511 typedef struct starfire_tx_desc_2 starfire_tx_desc;
512 #define TX_DESC_TYPE TxDescType2
513 #else /* not ADDR_64BITS */
514 typedef struct starfire_tx_desc_1 starfire_tx_desc;
515 #define TX_DESC_TYPE TxDescType1
516 #endif /* not ADDR_64BITS */
517 #define TX_DESC_SPACING TxDescSpaceUnlim
518
519 enum tx_desc_bits {
520 TxDescID=0xB0000000,
521 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
522 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
523 };
524 struct tx_done_desc {
525 __le32 status; /* timestamp, index. */
526 #if 0
527 __le32 intrstatus; /* interrupt status */
528 #endif
529 };
530
531 struct rx_ring_info {
532 struct sk_buff *skb;
533 dma_addr_t mapping;
534 };
535 struct tx_ring_info {
536 struct sk_buff *skb;
537 dma_addr_t mapping;
538 unsigned int used_slots;
539 };
540
541 #define PHY_CNT 2
542 struct netdev_private {
543 /* Descriptor rings first for alignment. */
544 struct starfire_rx_desc *rx_ring;
545 starfire_tx_desc *tx_ring;
546 dma_addr_t rx_ring_dma;
547 dma_addr_t tx_ring_dma;
548 /* The addresses of rx/tx-in-place skbuffs. */
549 struct rx_ring_info rx_info[RX_RING_SIZE];
550 struct tx_ring_info tx_info[TX_RING_SIZE];
551 /* Pointers to completion queues (full pages). */
552 rx_done_desc *rx_done_q;
553 dma_addr_t rx_done_q_dma;
554 unsigned int rx_done;
555 struct tx_done_desc *tx_done_q;
556 dma_addr_t tx_done_q_dma;
557 unsigned int tx_done;
558 struct napi_struct napi;
559 struct net_device *dev;
560 struct pci_dev *pci_dev;
561 #ifdef VLAN_SUPPORT
562 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
563 #endif
564 void *queue_mem;
565 dma_addr_t queue_mem_dma;
566 size_t queue_mem_size;
567
568 /* Frequently used values: keep some adjacent for cache effect. */
569 spinlock_t lock;
570 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
571 unsigned int cur_tx, dirty_tx, reap_tx;
572 unsigned int rx_buf_sz; /* Based on MTU+slack. */
573 /* These values keep track of the transceiver/media in use. */
574 int speed100; /* Set if speed == 100MBit. */
575 u32 tx_mode;
576 u32 intr_timer_ctrl;
577 u8 tx_threshold;
578 /* MII transceiver section. */
579 struct mii_if_info mii_if; /* MII lib hooks/info */
580 int phy_cnt; /* MII device addresses. */
581 unsigned char phys[PHY_CNT]; /* MII device addresses. */
582 void __iomem *base;
583 };
584
585
586 static int mdio_read(struct net_device *dev, int phy_id, int location);
587 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
588 static int netdev_open(struct net_device *dev);
589 static void check_duplex(struct net_device *dev);
590 static void tx_timeout(struct net_device *dev);
591 static void init_ring(struct net_device *dev);
592 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
593 static irqreturn_t intr_handler(int irq, void *dev_instance);
594 static void netdev_error(struct net_device *dev, int intr_status);
595 static int __netdev_rx(struct net_device *dev, int *quota);
596 static int netdev_poll(struct napi_struct *napi, int budget);
597 static void refill_rx_ring(struct net_device *dev);
598 static void netdev_error(struct net_device *dev, int intr_status);
599 static void set_rx_mode(struct net_device *dev);
600 static struct net_device_stats *get_stats(struct net_device *dev);
601 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
602 static int netdev_close(struct net_device *dev);
603 static void netdev_media_change(struct net_device *dev);
604 static const struct ethtool_ops ethtool_ops;
605
606
607 #ifdef VLAN_SUPPORT
608 static int netdev_vlan_rx_add_vid(struct net_device *dev,
609 __be16 proto, u16 vid)
610 {
611 struct netdev_private *np = netdev_priv(dev);
612
613 spin_lock(&np->lock);
614 if (debug > 1)
615 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
616 set_bit(vid, np->active_vlans);
617 set_rx_mode(dev);
618 spin_unlock(&np->lock);
619
620 return 0;
621 }
622
623 static int netdev_vlan_rx_kill_vid(struct net_device *dev,
624 __be16 proto, u16 vid)
625 {
626 struct netdev_private *np = netdev_priv(dev);
627
628 spin_lock(&np->lock);
629 if (debug > 1)
630 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
631 clear_bit(vid, np->active_vlans);
632 set_rx_mode(dev);
633 spin_unlock(&np->lock);
634
635 return 0;
636 }
637 #endif /* VLAN_SUPPORT */
638
639
640 static const struct net_device_ops netdev_ops = {
641 .ndo_open = netdev_open,
642 .ndo_stop = netdev_close,
643 .ndo_start_xmit = start_tx,
644 .ndo_tx_timeout = tx_timeout,
645 .ndo_get_stats = get_stats,
646 .ndo_set_rx_mode = set_rx_mode,
647 .ndo_do_ioctl = netdev_ioctl,
648 .ndo_set_mac_address = eth_mac_addr,
649 .ndo_validate_addr = eth_validate_addr,
650 #ifdef VLAN_SUPPORT
651 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
652 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
653 #endif
654 };
655
656 static int starfire_init_one(struct pci_dev *pdev,
657 const struct pci_device_id *ent)
658 {
659 struct device *d = &pdev->dev;
660 struct netdev_private *np;
661 int i, irq, chip_idx = ent->driver_data;
662 struct net_device *dev;
663 long ioaddr;
664 void __iomem *base;
665 int drv_flags, io_size;
666 int boguscnt;
667
668 /* when built into the kernel, we only print version if device is found */
669 #ifndef MODULE
670 static int printed_version;
671 if (!printed_version++)
672 printk(version);
673 #endif
674
675 if (pci_enable_device (pdev))
676 return -EIO;
677
678 ioaddr = pci_resource_start(pdev, 0);
679 io_size = pci_resource_len(pdev, 0);
680 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
681 dev_err(d, "no PCI MEM resources, aborting\n");
682 return -ENODEV;
683 }
684
685 dev = alloc_etherdev(sizeof(*np));
686 if (!dev)
687 return -ENOMEM;
688
689 SET_NETDEV_DEV(dev, &pdev->dev);
690
691 irq = pdev->irq;
692
693 if (pci_request_regions (pdev, DRV_NAME)) {
694 dev_err(d, "cannot reserve PCI resources, aborting\n");
695 goto err_out_free_netdev;
696 }
697
698 base = ioremap(ioaddr, io_size);
699 if (!base) {
700 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
701 io_size, ioaddr);
702 goto err_out_free_res;
703 }
704
705 pci_set_master(pdev);
706
707 /* enable MWI -- it vastly improves Rx performance on sparc64 */
708 pci_try_set_mwi(pdev);
709
710 #ifdef ZEROCOPY
711 /* Starfire can do TCP/UDP checksumming */
712 if (enable_hw_cksum)
713 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
714 #endif /* ZEROCOPY */
715
716 #ifdef VLAN_SUPPORT
717 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
718 #endif /* VLAN_RX_KILL_VID */
719 #ifdef ADDR_64BITS
720 dev->features |= NETIF_F_HIGHDMA;
721 #endif /* ADDR_64BITS */
722
723 /* Serial EEPROM reads are hidden by the hardware. */
724 for (i = 0; i < 6; i++)
725 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
726
727 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
728 if (debug > 4)
729 for (i = 0; i < 0x20; i++)
730 printk("%2.2x%s",
731 (unsigned int)readb(base + EEPROMCtrl + i),
732 i % 16 != 15 ? " " : "\n");
733 #endif
734
735 /* Issue soft reset */
736 writel(MiiSoftReset, base + TxMode);
737 udelay(1000);
738 writel(0, base + TxMode);
739
740 /* Reset the chip to erase previous misconfiguration. */
741 writel(1, base + PCIDeviceConfig);
742 boguscnt = 1000;
743 while (--boguscnt > 0) {
744 udelay(10);
745 if ((readl(base + PCIDeviceConfig) & 1) == 0)
746 break;
747 }
748 if (boguscnt == 0)
749 printk("%s: chipset reset never completed!\n", dev->name);
750 /* wait a little longer */
751 udelay(1000);
752
753 np = netdev_priv(dev);
754 np->dev = dev;
755 np->base = base;
756 spin_lock_init(&np->lock);
757 pci_set_drvdata(pdev, dev);
758
759 np->pci_dev = pdev;
760
761 np->mii_if.dev = dev;
762 np->mii_if.mdio_read = mdio_read;
763 np->mii_if.mdio_write = mdio_write;
764 np->mii_if.phy_id_mask = 0x1f;
765 np->mii_if.reg_num_mask = 0x1f;
766
767 drv_flags = netdrv_tbl[chip_idx].drv_flags;
768
769 np->speed100 = 1;
770
771 /* timer resolution is 128 * 0.8us */
772 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
773 Timer10X | EnableIntrMasking;
774
775 if (small_frames > 0) {
776 np->intr_timer_ctrl |= SmallFrameBypass;
777 switch (small_frames) {
778 case 1 ... 64:
779 np->intr_timer_ctrl |= SmallFrame64;
780 break;
781 case 65 ... 128:
782 np->intr_timer_ctrl |= SmallFrame128;
783 break;
784 case 129 ... 256:
785 np->intr_timer_ctrl |= SmallFrame256;
786 break;
787 default:
788 np->intr_timer_ctrl |= SmallFrame512;
789 if (small_frames > 512)
790 printk("Adjusting small_frames down to 512\n");
791 break;
792 }
793 }
794
795 dev->netdev_ops = &netdev_ops;
796 dev->watchdog_timeo = TX_TIMEOUT;
797 dev->ethtool_ops = ðtool_ops;
798
799 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
800
801 if (mtu)
802 dev->mtu = mtu;
803
804 if (register_netdev(dev))
805 goto err_out_cleardev;
806
807 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
808 dev->name, netdrv_tbl[chip_idx].name, base,
809 dev->dev_addr, irq);
810
811 if (drv_flags & CanHaveMII) {
812 int phy, phy_idx = 0;
813 int mii_status;
814 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
815 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
816 mdelay(100);
817 boguscnt = 1000;
818 while (--boguscnt > 0)
819 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
820 break;
821 if (boguscnt == 0) {
822 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
823 continue;
824 }
825 mii_status = mdio_read(dev, phy, MII_BMSR);
826 if (mii_status != 0) {
827 np->phys[phy_idx++] = phy;
828 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
829 printk(KERN_INFO "%s: MII PHY found at address %d, status "
830 "%#4.4x advertising %#4.4x.\n",
831 dev->name, phy, mii_status, np->mii_if.advertising);
832 /* there can be only one PHY on-board */
833 break;
834 }
835 }
836 np->phy_cnt = phy_idx;
837 if (np->phy_cnt > 0)
838 np->mii_if.phy_id = np->phys[0];
839 else
840 memset(&np->mii_if, 0, sizeof(np->mii_if));
841 }
842
843 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
844 dev->name, enable_hw_cksum ? "enabled" : "disabled");
845 return 0;
846
847 err_out_cleardev:
848 iounmap(base);
849 err_out_free_res:
850 pci_release_regions (pdev);
851 err_out_free_netdev:
852 free_netdev(dev);
853 return -ENODEV;
854 }
855
856
857 /* Read the MII Management Data I/O (MDIO) interfaces. */
858 static int mdio_read(struct net_device *dev, int phy_id, int location)
859 {
860 struct netdev_private *np = netdev_priv(dev);
861 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
862 int result, boguscnt=1000;
863 /* ??? Should we add a busy-wait here? */
864 do {
865 result = readl(mdio_addr);
866 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
867 if (boguscnt == 0)
868 return 0;
869 if ((result & 0xffff) == 0xffff)
870 return 0;
871 return result & 0xffff;
872 }
873
874
875 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
876 {
877 struct netdev_private *np = netdev_priv(dev);
878 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
879 writel(value, mdio_addr);
880 /* The busy-wait will occur before a read. */
881 }
882
883
884 static int netdev_open(struct net_device *dev)
885 {
886 const struct firmware *fw_rx, *fw_tx;
887 const __be32 *fw_rx_data, *fw_tx_data;
888 struct netdev_private *np = netdev_priv(dev);
889 void __iomem *ioaddr = np->base;
890 const int irq = np->pci_dev->irq;
891 int i, retval;
892 size_t tx_size, rx_size;
893 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
894
895 /* Do we ever need to reset the chip??? */
896
897 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
898 if (retval)
899 return retval;
900
901 /* Disable the Rx and Tx, and reset the chip. */
902 writel(0, ioaddr + GenCtrl);
903 writel(1, ioaddr + PCIDeviceConfig);
904 if (debug > 1)
905 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
906 dev->name, irq);
907
908 /* Allocate the various queues. */
909 if (!np->queue_mem) {
910 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
911 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
912 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
913 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
914 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
915 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
916 if (np->queue_mem == NULL) {
917 free_irq(irq, dev);
918 return -ENOMEM;
919 }
920
921 np->tx_done_q = np->queue_mem;
922 np->tx_done_q_dma = np->queue_mem_dma;
923 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
924 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
925 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
926 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
927 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
928 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
929 }
930
931 /* Start with no carrier, it gets adjusted later */
932 netif_carrier_off(dev);
933 init_ring(dev);
934 /* Set the size of the Rx buffers. */
935 writel((np->rx_buf_sz << RxBufferLenShift) |
936 (0 << RxMinDescrThreshShift) |
937 RxPrefetchMode | RxVariableQ |
938 RX_Q_ENTRIES |
939 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
940 RxDescSpace4,
941 ioaddr + RxDescQCtrl);
942
943 /* Set up the Rx DMA controller. */
944 writel(RxChecksumIgnore |
945 (0 << RxEarlyIntThreshShift) |
946 (6 << RxHighPrioThreshShift) |
947 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
948 ioaddr + RxDMACtrl);
949
950 /* Set Tx descriptor */
951 writel((2 << TxHiPriFIFOThreshShift) |
952 (0 << TxPadLenShift) |
953 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
954 TX_DESC_Q_ADDR_SIZE |
955 TX_DESC_SPACING | TX_DESC_TYPE,
956 ioaddr + TxDescCtrl);
957
958 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
959 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
960 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
961 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
962 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
963
964 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
965 writel(np->rx_done_q_dma |
966 RxComplType |
967 (0 << RxComplThreshShift),
968 ioaddr + RxCompletionAddr);
969
970 if (debug > 1)
971 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
972
973 /* Fill both the Tx SA register and the Rx perfect filter. */
974 for (i = 0; i < 6; i++)
975 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
976 /* The first entry is special because it bypasses the VLAN filter.
977 Don't use it. */
978 writew(0, ioaddr + PerfFilterTable);
979 writew(0, ioaddr + PerfFilterTable + 4);
980 writew(0, ioaddr + PerfFilterTable + 8);
981 for (i = 1; i < 16; i++) {
982 __be16 *eaddrs = (__be16 *)dev->dev_addr;
983 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
984 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
985 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
986 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
987 }
988
989 /* Initialize other registers. */
990 /* Configure the PCI bus bursts and FIFO thresholds. */
991 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
992 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
993 udelay(1000);
994 writel(np->tx_mode, ioaddr + TxMode);
995 np->tx_threshold = 4;
996 writel(np->tx_threshold, ioaddr + TxThreshold);
997
998 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
999
1000 napi_enable(&np->napi);
1001
1002 netif_start_queue(dev);
1003
1004 if (debug > 1)
1005 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1006 set_rx_mode(dev);
1007
1008 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1009 check_duplex(dev);
1010
1011 /* Enable GPIO interrupts on link change */
1012 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1013
1014 /* Set the interrupt mask */
1015 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1016 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1017 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1018 ioaddr + IntrEnable);
1019 /* Enable PCI interrupts. */
1020 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1021 ioaddr + PCIDeviceConfig);
1022
1023 #ifdef VLAN_SUPPORT
1024 /* Set VLAN type to 802.1q */
1025 writel(ETH_P_8021Q, ioaddr + VlanType);
1026 #endif /* VLAN_SUPPORT */
1027
1028 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1029 if (retval) {
1030 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1031 FIRMWARE_RX);
1032 goto out_init;
1033 }
1034 if (fw_rx->size % 4) {
1035 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1036 fw_rx->size, FIRMWARE_RX);
1037 retval = -EINVAL;
1038 goto out_rx;
1039 }
1040 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1041 if (retval) {
1042 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1043 FIRMWARE_TX);
1044 goto out_rx;
1045 }
1046 if (fw_tx->size % 4) {
1047 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1048 fw_tx->size, FIRMWARE_TX);
1049 retval = -EINVAL;
1050 goto out_tx;
1051 }
1052 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1053 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1054 rx_size = fw_rx->size / 4;
1055 tx_size = fw_tx->size / 4;
1056
1057 /* Load Rx/Tx firmware into the frame processors */
1058 for (i = 0; i < rx_size; i++)
1059 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1060 for (i = 0; i < tx_size; i++)
1061 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1062 if (enable_hw_cksum)
1063 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1064 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1065 else
1066 /* Enable the Rx and Tx units only. */
1067 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1068
1069 if (debug > 1)
1070 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1071 dev->name);
1072
1073 out_tx:
1074 release_firmware(fw_tx);
1075 out_rx:
1076 release_firmware(fw_rx);
1077 out_init:
1078 if (retval)
1079 netdev_close(dev);
1080 return retval;
1081 }
1082
1083
1084 static void check_duplex(struct net_device *dev)
1085 {
1086 struct netdev_private *np = netdev_priv(dev);
1087 u16 reg0;
1088 int silly_count = 1000;
1089
1090 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1091 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1092 udelay(500);
1093 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1094 /* do nothing */;
1095 if (!silly_count) {
1096 printk("%s: MII reset failed!\n", dev->name);
1097 return;
1098 }
1099
1100 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1101
1102 if (!np->mii_if.force_media) {
1103 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1104 } else {
1105 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1106 if (np->speed100)
1107 reg0 |= BMCR_SPEED100;
1108 if (np->mii_if.full_duplex)
1109 reg0 |= BMCR_FULLDPLX;
1110 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1111 dev->name,
1112 np->speed100 ? "100" : "10",
1113 np->mii_if.full_duplex ? "full" : "half");
1114 }
1115 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1116 }
1117
1118
1119 static void tx_timeout(struct net_device *dev)
1120 {
1121 struct netdev_private *np = netdev_priv(dev);
1122 void __iomem *ioaddr = np->base;
1123 int old_debug;
1124
1125 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1126 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1127
1128 /* Perhaps we should reinitialize the hardware here. */
1129
1130 /*
1131 * Stop and restart the interface.
1132 * Cheat and increase the debug level temporarily.
1133 */
1134 old_debug = debug;
1135 debug = 2;
1136 netdev_close(dev);
1137 netdev_open(dev);
1138 debug = old_debug;
1139
1140 /* Trigger an immediate transmit demand. */
1141
1142 netif_trans_update(dev); /* prevent tx timeout */
1143 dev->stats.tx_errors++;
1144 netif_wake_queue(dev);
1145 }
1146
1147
1148 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1149 static void init_ring(struct net_device *dev)
1150 {
1151 struct netdev_private *np = netdev_priv(dev);
1152 int i;
1153
1154 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1155 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1156
1157 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1158
1159 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1160 for (i = 0; i < RX_RING_SIZE; i++) {
1161 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1162 np->rx_info[i].skb = skb;
1163 if (skb == NULL)
1164 break;
1165 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1166 /* Grrr, we cannot offset to correctly align the IP header. */
1167 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1168 }
1169 writew(i - 1, np->base + RxDescQIdx);
1170 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1171
1172 /* Clear the remainder of the Rx buffer ring. */
1173 for ( ; i < RX_RING_SIZE; i++) {
1174 np->rx_ring[i].rxaddr = 0;
1175 np->rx_info[i].skb = NULL;
1176 np->rx_info[i].mapping = 0;
1177 }
1178 /* Mark the last entry as wrapping the ring. */
1179 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1180
1181 /* Clear the completion rings. */
1182 for (i = 0; i < DONE_Q_SIZE; i++) {
1183 np->rx_done_q[i].status = 0;
1184 np->tx_done_q[i].status = 0;
1185 }
1186
1187 for (i = 0; i < TX_RING_SIZE; i++)
1188 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1189 }
1190
1191
1192 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1193 {
1194 struct netdev_private *np = netdev_priv(dev);
1195 unsigned int entry;
1196 u32 status;
1197 int i;
1198
1199 /*
1200 * be cautious here, wrapping the queue has weird semantics
1201 * and we may not have enough slots even when it seems we do.
1202 */
1203 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1204 netif_stop_queue(dev);
1205 return NETDEV_TX_BUSY;
1206 }
1207
1208 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1209 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1210 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1211 return NETDEV_TX_OK;
1212 }
1213 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1214
1215 entry = np->cur_tx % TX_RING_SIZE;
1216 for (i = 0; i < skb_num_frags(skb); i++) {
1217 int wrap_ring = 0;
1218 status = TxDescID;
1219
1220 if (i == 0) {
1221 np->tx_info[entry].skb = skb;
1222 status |= TxCRCEn;
1223 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1224 status |= TxRingWrap;
1225 wrap_ring = 1;
1226 }
1227 if (np->reap_tx) {
1228 status |= TxDescIntr;
1229 np->reap_tx = 0;
1230 }
1231 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1232 status |= TxCalTCP;
1233 dev->stats.tx_compressed++;
1234 }
1235 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1236
1237 np->tx_info[entry].mapping =
1238 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1239 } else {
1240 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1241 status |= skb_frag_size(this_frag);
1242 np->tx_info[entry].mapping =
1243 pci_map_single(np->pci_dev,
1244 skb_frag_address(this_frag),
1245 skb_frag_size(this_frag),
1246 PCI_DMA_TODEVICE);
1247 }
1248
1249 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1250 np->tx_ring[entry].status = cpu_to_le32(status);
1251 if (debug > 3)
1252 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1253 dev->name, np->cur_tx, np->dirty_tx,
1254 entry, status);
1255 if (wrap_ring) {
1256 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1257 np->cur_tx += np->tx_info[entry].used_slots;
1258 entry = 0;
1259 } else {
1260 np->tx_info[entry].used_slots = 1;
1261 np->cur_tx += np->tx_info[entry].used_slots;
1262 entry++;
1263 }
1264 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1265 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1266 np->reap_tx = 1;
1267 }
1268
1269 /* Non-x86: explicitly flush descriptor cache lines here. */
1270 /* Ensure all descriptors are written back before the transmit is
1271 initiated. - Jes */
1272 wmb();
1273
1274 /* Update the producer index. */
1275 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1276
1277 /* 4 is arbitrary, but should be ok */
1278 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1279 netif_stop_queue(dev);
1280
1281 return NETDEV_TX_OK;
1282 }
1283
1284
1285 /* The interrupt handler does all of the Rx thread work and cleans up
1286 after the Tx thread. */
1287 static irqreturn_t intr_handler(int irq, void *dev_instance)
1288 {
1289 struct net_device *dev = dev_instance;
1290 struct netdev_private *np = netdev_priv(dev);
1291 void __iomem *ioaddr = np->base;
1292 int boguscnt = max_interrupt_work;
1293 int consumer;
1294 int tx_status;
1295 int handled = 0;
1296
1297 do {
1298 u32 intr_status = readl(ioaddr + IntrClear);
1299
1300 if (debug > 4)
1301 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1302 dev->name, intr_status);
1303
1304 if (intr_status == 0 || intr_status == (u32) -1)
1305 break;
1306
1307 handled = 1;
1308
1309 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1310 u32 enable;
1311
1312 if (likely(napi_schedule_prep(&np->napi))) {
1313 __napi_schedule(&np->napi);
1314 enable = readl(ioaddr + IntrEnable);
1315 enable &= ~(IntrRxDone | IntrRxEmpty);
1316 writel(enable, ioaddr + IntrEnable);
1317 /* flush PCI posting buffers */
1318 readl(ioaddr + IntrEnable);
1319 } else {
1320 /* Paranoia check */
1321 enable = readl(ioaddr + IntrEnable);
1322 if (enable & (IntrRxDone | IntrRxEmpty)) {
1323 printk(KERN_INFO
1324 "%s: interrupt while in poll!\n",
1325 dev->name);
1326 enable &= ~(IntrRxDone | IntrRxEmpty);
1327 writel(enable, ioaddr + IntrEnable);
1328 }
1329 }
1330 }
1331
1332 /* Scavenge the skbuff list based on the Tx-done queue.
1333 There are redundant checks here that may be cleaned up
1334 after the driver has proven to be reliable. */
1335 consumer = readl(ioaddr + TxConsumerIdx);
1336 if (debug > 3)
1337 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1338 dev->name, consumer);
1339
1340 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1341 if (debug > 3)
1342 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1343 dev->name, np->dirty_tx, np->tx_done, tx_status);
1344 if ((tx_status & 0xe0000000) == 0xa0000000) {
1345 dev->stats.tx_packets++;
1346 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1347 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1348 struct sk_buff *skb = np->tx_info[entry].skb;
1349 np->tx_info[entry].skb = NULL;
1350 pci_unmap_single(np->pci_dev,
1351 np->tx_info[entry].mapping,
1352 skb_first_frag_len(skb),
1353 PCI_DMA_TODEVICE);
1354 np->tx_info[entry].mapping = 0;
1355 np->dirty_tx += np->tx_info[entry].used_slots;
1356 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1357 {
1358 int i;
1359 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1360 pci_unmap_single(np->pci_dev,
1361 np->tx_info[entry].mapping,
1362 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1363 PCI_DMA_TODEVICE);
1364 np->dirty_tx++;
1365 entry++;
1366 }
1367 }
1368
1369 dev_kfree_skb_irq(skb);
1370 }
1371 np->tx_done_q[np->tx_done].status = 0;
1372 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1373 }
1374 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1375
1376 if (netif_queue_stopped(dev) &&
1377 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1378 /* The ring is no longer full, wake the queue. */
1379 netif_wake_queue(dev);
1380 }
1381
1382 /* Stats overflow */
1383 if (intr_status & IntrStatsMax)
1384 get_stats(dev);
1385
1386 /* Media change interrupt. */
1387 if (intr_status & IntrLinkChange)
1388 netdev_media_change(dev);
1389
1390 /* Abnormal error summary/uncommon events handlers. */
1391 if (intr_status & IntrAbnormalSummary)
1392 netdev_error(dev, intr_status);
1393
1394 if (--boguscnt < 0) {
1395 if (debug > 1)
1396 printk(KERN_WARNING "%s: Too much work at interrupt, "
1397 "status=%#8.8x.\n",
1398 dev->name, intr_status);
1399 break;
1400 }
1401 } while (1);
1402
1403 if (debug > 4)
1404 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1405 dev->name, (int) readl(ioaddr + IntrStatus));
1406 return IRQ_RETVAL(handled);
1407 }
1408
1409
1410 /*
1411 * This routine is logically part of the interrupt/poll handler, but separated
1412 * for clarity and better register allocation.
1413 */
1414 static int __netdev_rx(struct net_device *dev, int *quota)
1415 {
1416 struct netdev_private *np = netdev_priv(dev);
1417 u32 desc_status;
1418 int retcode = 0;
1419
1420 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1421 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1422 struct sk_buff *skb;
1423 u16 pkt_len;
1424 int entry;
1425 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1426
1427 if (debug > 4)
1428 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1429 if (!(desc_status & RxOK)) {
1430 /* There was an error. */
1431 if (debug > 2)
1432 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1433 dev->stats.rx_errors++;
1434 if (desc_status & RxFIFOErr)
1435 dev->stats.rx_fifo_errors++;
1436 goto next_rx;
1437 }
1438
1439 if (*quota <= 0) { /* out of rx quota */
1440 retcode = 1;
1441 goto out;
1442 }
1443 (*quota)--;
1444
1445 pkt_len = desc_status; /* Implicitly Truncate */
1446 entry = (desc_status >> 16) & 0x7ff;
1447
1448 if (debug > 4)
1449 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1450 /* Check if the packet is long enough to accept without copying
1451 to a minimally-sized skbuff. */
1452 if (pkt_len < rx_copybreak &&
1453 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1454 skb_reserve(skb, 2); /* 16 byte align the IP header */
1455 pci_dma_sync_single_for_cpu(np->pci_dev,
1456 np->rx_info[entry].mapping,
1457 pkt_len, PCI_DMA_FROMDEVICE);
1458 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1459 pci_dma_sync_single_for_device(np->pci_dev,
1460 np->rx_info[entry].mapping,
1461 pkt_len, PCI_DMA_FROMDEVICE);
1462 skb_put(skb, pkt_len);
1463 } else {
1464 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1465 skb = np->rx_info[entry].skb;
1466 skb_put(skb, pkt_len);
1467 np->rx_info[entry].skb = NULL;
1468 np->rx_info[entry].mapping = 0;
1469 }
1470 #ifndef final_version /* Remove after testing. */
1471 /* You will want this info for the initial debug. */
1472 if (debug > 5) {
1473 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1474 skb->data, skb->data + 6,
1475 skb->data[12], skb->data[13]);
1476 }
1477 #endif
1478
1479 skb->protocol = eth_type_trans(skb, dev);
1480 #ifdef VLAN_SUPPORT
1481 if (debug > 4)
1482 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1483 #endif
1484 if (le16_to_cpu(desc->status2) & 0x0100) {
1485 skb->ip_summed = CHECKSUM_UNNECESSARY;
1486 dev->stats.rx_compressed++;
1487 }
1488 /*
1489 * This feature doesn't seem to be working, at least
1490 * with the two firmware versions I have. If the GFP sees
1491 * an IP fragment, it either ignores it completely, or reports
1492 * "bad checksum" on it.
1493 *
1494 * Maybe I missed something -- corrections are welcome.
1495 * Until then, the printk stays. :-) -Ion
1496 */
1497 else if (le16_to_cpu(desc->status2) & 0x0040) {
1498 skb->ip_summed = CHECKSUM_COMPLETE;
1499 skb->csum = le16_to_cpu(desc->csum);
1500 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1501 }
1502 #ifdef VLAN_SUPPORT
1503 if (le16_to_cpu(desc->status2) & 0x0200) {
1504 u16 vlid = le16_to_cpu(desc->vlanid);
1505
1506 if (debug > 4) {
1507 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1508 vlid);
1509 }
1510 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1511 }
1512 #endif /* VLAN_SUPPORT */
1513 netif_receive_skb(skb);
1514 dev->stats.rx_packets++;
1515
1516 next_rx:
1517 np->cur_rx++;
1518 desc->status = 0;
1519 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1520 }
1521
1522 if (*quota == 0) { /* out of rx quota */
1523 retcode = 1;
1524 goto out;
1525 }
1526 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1527
1528 out:
1529 refill_rx_ring(dev);
1530 if (debug > 5)
1531 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1532 retcode, np->rx_done, desc_status);
1533 return retcode;
1534 }
1535
1536 static int netdev_poll(struct napi_struct *napi, int budget)
1537 {
1538 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1539 struct net_device *dev = np->dev;
1540 u32 intr_status;
1541 void __iomem *ioaddr = np->base;
1542 int quota = budget;
1543
1544 do {
1545 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1546
1547 if (__netdev_rx(dev, "a))
1548 goto out;
1549
1550 intr_status = readl(ioaddr + IntrStatus);
1551 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1552
1553 napi_complete(napi);
1554 intr_status = readl(ioaddr + IntrEnable);
1555 intr_status |= IntrRxDone | IntrRxEmpty;
1556 writel(intr_status, ioaddr + IntrEnable);
1557
1558 out:
1559 if (debug > 5)
1560 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1561 budget - quota);
1562
1563 /* Restart Rx engine if stopped. */
1564 return budget - quota;
1565 }
1566
1567 static void refill_rx_ring(struct net_device *dev)
1568 {
1569 struct netdev_private *np = netdev_priv(dev);
1570 struct sk_buff *skb;
1571 int entry = -1;
1572
1573 /* Refill the Rx ring buffers. */
1574 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1575 entry = np->dirty_rx % RX_RING_SIZE;
1576 if (np->rx_info[entry].skb == NULL) {
1577 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1578 np->rx_info[entry].skb = skb;
1579 if (skb == NULL)
1580 break; /* Better luck next round. */
1581 np->rx_info[entry].mapping =
1582 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1583 np->rx_ring[entry].rxaddr =
1584 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1585 }
1586 if (entry == RX_RING_SIZE - 1)
1587 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1588 }
1589 if (entry >= 0)
1590 writew(entry, np->base + RxDescQIdx);
1591 }
1592
1593
1594 static void netdev_media_change(struct net_device *dev)
1595 {
1596 struct netdev_private *np = netdev_priv(dev);
1597 void __iomem *ioaddr = np->base;
1598 u16 reg0, reg1, reg4, reg5;
1599 u32 new_tx_mode;
1600 u32 new_intr_timer_ctrl;
1601
1602 /* reset status first */
1603 mdio_read(dev, np->phys[0], MII_BMCR);
1604 mdio_read(dev, np->phys[0], MII_BMSR);
1605
1606 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1607 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1608
1609 if (reg1 & BMSR_LSTATUS) {
1610 /* link is up */
1611 if (reg0 & BMCR_ANENABLE) {
1612 /* autonegotiation is enabled */
1613 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1614 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1615 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1616 np->speed100 = 1;
1617 np->mii_if.full_duplex = 1;
1618 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1619 np->speed100 = 1;
1620 np->mii_if.full_duplex = 0;
1621 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1622 np->speed100 = 0;
1623 np->mii_if.full_duplex = 1;
1624 } else {
1625 np->speed100 = 0;
1626 np->mii_if.full_duplex = 0;
1627 }
1628 } else {
1629 /* autonegotiation is disabled */
1630 if (reg0 & BMCR_SPEED100)
1631 np->speed100 = 1;
1632 else
1633 np->speed100 = 0;
1634 if (reg0 & BMCR_FULLDPLX)
1635 np->mii_if.full_duplex = 1;
1636 else
1637 np->mii_if.full_duplex = 0;
1638 }
1639 netif_carrier_on(dev);
1640 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1641 dev->name,
1642 np->speed100 ? "100" : "10",
1643 np->mii_if.full_duplex ? "full" : "half");
1644
1645 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1646 if (np->mii_if.full_duplex)
1647 new_tx_mode |= FullDuplex;
1648 if (np->tx_mode != new_tx_mode) {
1649 np->tx_mode = new_tx_mode;
1650 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1651 udelay(1000);
1652 writel(np->tx_mode, ioaddr + TxMode);
1653 }
1654
1655 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1656 if (np->speed100)
1657 new_intr_timer_ctrl |= Timer10X;
1658 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1659 np->intr_timer_ctrl = new_intr_timer_ctrl;
1660 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1661 }
1662 } else {
1663 netif_carrier_off(dev);
1664 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1665 }
1666 }
1667
1668
1669 static void netdev_error(struct net_device *dev, int intr_status)
1670 {
1671 struct netdev_private *np = netdev_priv(dev);
1672
1673 /* Came close to underrunning the Tx FIFO, increase threshold. */
1674 if (intr_status & IntrTxDataLow) {
1675 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1676 writel(++np->tx_threshold, np->base + TxThreshold);
1677 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1678 dev->name, np->tx_threshold * 16);
1679 } else
1680 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1681 }
1682 if (intr_status & IntrRxGFPDead) {
1683 dev->stats.rx_fifo_errors++;
1684 dev->stats.rx_errors++;
1685 }
1686 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1687 dev->stats.tx_fifo_errors++;
1688 dev->stats.tx_errors++;
1689 }
1690 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1691 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1692 dev->name, intr_status);
1693 }
1694
1695
1696 static struct net_device_stats *get_stats(struct net_device *dev)
1697 {
1698 struct netdev_private *np = netdev_priv(dev);
1699 void __iomem *ioaddr = np->base;
1700
1701 /* This adapter architecture needs no SMP locks. */
1702 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1703 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1704 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1705 dev->stats.tx_aborted_errors =
1706 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1707 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1708 dev->stats.collisions =
1709 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1710
1711 /* The chip only need report frame silently dropped. */
1712 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1713 writew(0, ioaddr + RxDMAStatus);
1714 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1715 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1716 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1717 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1718
1719 return &dev->stats;
1720 }
1721
1722 #ifdef VLAN_SUPPORT
1723 static u32 set_vlan_mode(struct netdev_private *np)
1724 {
1725 u32 ret = VlanMode;
1726 u16 vid;
1727 void __iomem *filter_addr = np->base + HashTable + 8;
1728 int vlan_count = 0;
1729
1730 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1731 if (vlan_count == 32)
1732 break;
1733 writew(vid, filter_addr);
1734 filter_addr += 16;
1735 vlan_count++;
1736 }
1737 if (vlan_count == 32) {
1738 ret |= PerfectFilterVlan;
1739 while (vlan_count < 32) {
1740 writew(0, filter_addr);
1741 filter_addr += 16;
1742 vlan_count++;
1743 }
1744 }
1745 return ret;
1746 }
1747 #endif /* VLAN_SUPPORT */
1748
1749 static void set_rx_mode(struct net_device *dev)
1750 {
1751 struct netdev_private *np = netdev_priv(dev);
1752 void __iomem *ioaddr = np->base;
1753 u32 rx_mode = MinVLANPrio;
1754 struct netdev_hw_addr *ha;
1755 int i;
1756
1757 #ifdef VLAN_SUPPORT
1758 rx_mode |= set_vlan_mode(np);
1759 #endif /* VLAN_SUPPORT */
1760
1761 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1762 rx_mode |= AcceptAll;
1763 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1764 (dev->flags & IFF_ALLMULTI)) {
1765 /* Too many to match, or accept all multicasts. */
1766 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1767 } else if (netdev_mc_count(dev) <= 14) {
1768 /* Use the 16 element perfect filter, skip first two entries. */
1769 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1770 __be16 *eaddrs;
1771 netdev_for_each_mc_addr(ha, dev) {
1772 eaddrs = (__be16 *) ha->addr;
1773 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1774 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1775 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1776 }
1777 eaddrs = (__be16 *)dev->dev_addr;
1778 i = netdev_mc_count(dev) + 2;
1779 while (i++ < 16) {
1780 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1781 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1782 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1783 }
1784 rx_mode |= AcceptBroadcast|PerfectFilter;
1785 } else {
1786 /* Must use a multicast hash table. */
1787 void __iomem *filter_addr;
1788 __be16 *eaddrs;
1789 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1790
1791 memset(mc_filter, 0, sizeof(mc_filter));
1792 netdev_for_each_mc_addr(ha, dev) {
1793 /* The chip uses the upper 9 CRC bits
1794 as index into the hash table */
1795 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1796 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1797
1798 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1799 }
1800 /* Clear the perfect filter list, skip first two entries. */
1801 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1802 eaddrs = (__be16 *)dev->dev_addr;
1803 for (i = 2; i < 16; i++) {
1804 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1805 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1806 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1807 }
1808 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1809 writew(mc_filter[i], filter_addr);
1810 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1811 }
1812 writel(rx_mode, ioaddr + RxFilterMode);
1813 }
1814
1815 static int check_if_running(struct net_device *dev)
1816 {
1817 if (!netif_running(dev))
1818 return -EINVAL;
1819 return 0;
1820 }
1821
1822 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1823 {
1824 struct netdev_private *np = netdev_priv(dev);
1825 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1826 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1827 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1828 }
1829
1830 static int get_link_ksettings(struct net_device *dev,
1831 struct ethtool_link_ksettings *cmd)
1832 {
1833 struct netdev_private *np = netdev_priv(dev);
1834 spin_lock_irq(&np->lock);
1835 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1836 spin_unlock_irq(&np->lock);
1837 return 0;
1838 }
1839
1840 static int set_link_ksettings(struct net_device *dev,
1841 const struct ethtool_link_ksettings *cmd)
1842 {
1843 struct netdev_private *np = netdev_priv(dev);
1844 int res;
1845 spin_lock_irq(&np->lock);
1846 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1847 spin_unlock_irq(&np->lock);
1848 check_duplex(dev);
1849 return res;
1850 }
1851
1852 static int nway_reset(struct net_device *dev)
1853 {
1854 struct netdev_private *np = netdev_priv(dev);
1855 return mii_nway_restart(&np->mii_if);
1856 }
1857
1858 static u32 get_link(struct net_device *dev)
1859 {
1860 struct netdev_private *np = netdev_priv(dev);
1861 return mii_link_ok(&np->mii_if);
1862 }
1863
1864 static u32 get_msglevel(struct net_device *dev)
1865 {
1866 return debug;
1867 }
1868
1869 static void set_msglevel(struct net_device *dev, u32 val)
1870 {
1871 debug = val;
1872 }
1873
1874 static const struct ethtool_ops ethtool_ops = {
1875 .begin = check_if_running,
1876 .get_drvinfo = get_drvinfo,
1877 .nway_reset = nway_reset,
1878 .get_link = get_link,
1879 .get_msglevel = get_msglevel,
1880 .set_msglevel = set_msglevel,
1881 .get_link_ksettings = get_link_ksettings,
1882 .set_link_ksettings = set_link_ksettings,
1883 };
1884
1885 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1886 {
1887 struct netdev_private *np = netdev_priv(dev);
1888 struct mii_ioctl_data *data = if_mii(rq);
1889 int rc;
1890
1891 if (!netif_running(dev))
1892 return -EINVAL;
1893
1894 spin_lock_irq(&np->lock);
1895 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1896 spin_unlock_irq(&np->lock);
1897
1898 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1899 check_duplex(dev);
1900
1901 return rc;
1902 }
1903
1904 static int netdev_close(struct net_device *dev)
1905 {
1906 struct netdev_private *np = netdev_priv(dev);
1907 void __iomem *ioaddr = np->base;
1908 int i;
1909
1910 netif_stop_queue(dev);
1911
1912 napi_disable(&np->napi);
1913
1914 if (debug > 1) {
1915 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1916 dev->name, (int) readl(ioaddr + IntrStatus));
1917 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1918 dev->name, np->cur_tx, np->dirty_tx,
1919 np->cur_rx, np->dirty_rx);
1920 }
1921
1922 /* Disable interrupts by clearing the interrupt mask. */
1923 writel(0, ioaddr + IntrEnable);
1924
1925 /* Stop the chip's Tx and Rx processes. */
1926 writel(0, ioaddr + GenCtrl);
1927 readl(ioaddr + GenCtrl);
1928
1929 if (debug > 5) {
1930 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1931 (long long) np->tx_ring_dma);
1932 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1933 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1934 i, le32_to_cpu(np->tx_ring[i].status),
1935 (long long) dma_to_cpu(np->tx_ring[i].addr),
1936 le32_to_cpu(np->tx_done_q[i].status));
1937 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1938 (long long) np->rx_ring_dma, np->rx_done_q);
1939 if (np->rx_done_q)
1940 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1941 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1942 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1943 }
1944 }
1945
1946 free_irq(np->pci_dev->irq, dev);
1947
1948 /* Free all the skbuffs in the Rx queue. */
1949 for (i = 0; i < RX_RING_SIZE; i++) {
1950 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1951 if (np->rx_info[i].skb != NULL) {
1952 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1953 dev_kfree_skb(np->rx_info[i].skb);
1954 }
1955 np->rx_info[i].skb = NULL;
1956 np->rx_info[i].mapping = 0;
1957 }
1958 for (i = 0; i < TX_RING_SIZE; i++) {
1959 struct sk_buff *skb = np->tx_info[i].skb;
1960 if (skb == NULL)
1961 continue;
1962 pci_unmap_single(np->pci_dev,
1963 np->tx_info[i].mapping,
1964 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1965 np->tx_info[i].mapping = 0;
1966 dev_kfree_skb(skb);
1967 np->tx_info[i].skb = NULL;
1968 }
1969
1970 return 0;
1971 }
1972
1973 #ifdef CONFIG_PM
1974 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
1975 {
1976 struct net_device *dev = pci_get_drvdata(pdev);
1977
1978 if (netif_running(dev)) {
1979 netif_device_detach(dev);
1980 netdev_close(dev);
1981 }
1982
1983 pci_save_state(pdev);
1984 pci_set_power_state(pdev, pci_choose_state(pdev,state));
1985
1986 return 0;
1987 }
1988
1989 static int starfire_resume(struct pci_dev *pdev)
1990 {
1991 struct net_device *dev = pci_get_drvdata(pdev);
1992
1993 pci_set_power_state(pdev, PCI_D0);
1994 pci_restore_state(pdev);
1995
1996 if (netif_running(dev)) {
1997 netdev_open(dev);
1998 netif_device_attach(dev);
1999 }
2000
2001 return 0;
2002 }
2003 #endif /* CONFIG_PM */
2004
2005
2006 static void starfire_remove_one(struct pci_dev *pdev)
2007 {
2008 struct net_device *dev = pci_get_drvdata(pdev);
2009 struct netdev_private *np = netdev_priv(dev);
2010
2011 BUG_ON(!dev);
2012
2013 unregister_netdev(dev);
2014
2015 if (np->queue_mem)
2016 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2017
2018
2019 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2020 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2021 pci_disable_device(pdev);
2022
2023 iounmap(np->base);
2024 pci_release_regions(pdev);
2025
2026 free_netdev(dev); /* Will also free np!! */
2027 }
2028
2029
2030 static struct pci_driver starfire_driver = {
2031 .name = DRV_NAME,
2032 .probe = starfire_init_one,
2033 .remove = starfire_remove_one,
2034 #ifdef CONFIG_PM
2035 .suspend = starfire_suspend,
2036 .resume = starfire_resume,
2037 #endif /* CONFIG_PM */
2038 .id_table = starfire_pci_tbl,
2039 };
2040
2041
2042 static int __init starfire_init (void)
2043 {
2044 /* when a module, this is printed whether or not devices are found in probe */
2045 #ifdef MODULE
2046 printk(version);
2047
2048 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2049 #endif
2050
2051 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2052
2053 return pci_register_driver(&starfire_driver);
2054 }
2055
2056
2057 static void __exit starfire_cleanup (void)
2058 {
2059 pci_unregister_driver (&starfire_driver);
2060 }
2061
2062
2063 module_init(starfire_init);
2064 module_exit(starfire_cleanup);
2065
2066
2067 /*
2068 * Local variables:
2069 * c-basic-offset: 8
2070 * tab-width: 8
2071 * End:
2072 */
2073
2074
2075
2076
2077
2078 /* LDV_COMMENT_BEGIN_MAIN */
2079 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2080
2081 /*###########################################################################*/
2082
2083 /*############## Driver Environment Generator 0.2 output ####################*/
2084
2085 /*###########################################################################*/
2086
2087
2088
2089 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2090 void ldv_check_final_state(void);
2091
2092 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2093 void ldv_check_return_value(int res);
2094
2095 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2096 void ldv_check_return_value_probe(int res);
2097
2098 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2099 void ldv_initialize(void);
2100
2101 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2102 void ldv_handler_precall(void);
2103
2104 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2105 int nondet_int(void);
2106
2107 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2108 int LDV_IN_INTERRUPT;
2109
2110 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2111 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2112
2113
2114
2115 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2116 /*============================= VARIABLE DECLARATION PART =============================*/
2117 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
2118 /* content: static int netdev_open(struct net_device *dev)*/
2119 /* LDV_COMMENT_BEGIN_PREP */
2120 #define DRV_NAME "starfire"
2121 #define DRV_VERSION "2.1"
2122 #define DRV_RELDATE "July 6, 2008"
2123 #define HAS_BROKEN_FIRMWARE
2124 #ifdef HAS_BROKEN_FIRMWARE
2125 #define PADDING_MASK 3
2126 #endif
2127 #define ZEROCOPY
2128 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2129 #define VLAN_SUPPORT
2130 #endif
2131 #define PKT_BUF_SZ 1536
2132 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2133 #else
2134 #endif
2135 #ifdef __sparc__
2136 #define DMA_BURST_SIZE 64
2137 #else
2138 #define DMA_BURST_SIZE 128
2139 #endif
2140 #define RX_RING_SIZE 256
2141 #define TX_RING_SIZE 32
2142 #define DONE_Q_SIZE 1024
2143 #define QUEUE_ALIGN 256
2144 #if RX_RING_SIZE > 256
2145 #define RX_Q_ENTRIES Rx2048QEntries
2146 #else
2147 #define RX_Q_ENTRIES Rx256QEntries
2148 #endif
2149 #define TX_TIMEOUT (2 * HZ)
2150 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2151 #define ADDR_64BITS
2152 #define netdrv_addr_t __le64
2153 #define cpu_to_dma(x) cpu_to_le64(x)
2154 #define dma_to_cpu(x) le64_to_cpu(x)
2155 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2156 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2157 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2158 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2159 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2160 #else
2161 #define netdrv_addr_t __le32
2162 #define cpu_to_dma(x) cpu_to_le32(x)
2163 #define dma_to_cpu(x) le32_to_cpu(x)
2164 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2165 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2166 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2167 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2168 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2169 #endif
2170 #define skb_first_frag_len(skb) skb_headlen(skb)
2171 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2172 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2173 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2174 #ifdef VLAN_SUPPORT
2175 #define RxComplType RxComplType3
2176 #else
2177 #define RxComplType RxComplType2
2178 #endif
2179 #ifdef ADDR_64BITS
2180 #define TX_DESC_TYPE TxDescType2
2181 #else
2182 #define TX_DESC_TYPE TxDescType1
2183 #endif
2184 #define TX_DESC_SPACING TxDescSpaceUnlim
2185 #if 0
2186 #endif
2187 #define PHY_CNT 2
2188 #ifdef VLAN_SUPPORT
2189 #endif
2190 #ifdef VLAN_SUPPORT
2191 #endif
2192 #ifdef VLAN_SUPPORT
2193 #endif
2194 #ifndef MODULE
2195 #endif
2196 #ifdef ZEROCOPY
2197 #endif
2198 #ifdef VLAN_SUPPORT
2199 #endif
2200 #ifdef ADDR_64BITS
2201 #endif
2202 #if ! defined(final_version)
2203 #endif
2204 /* LDV_COMMENT_END_PREP */
2205 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_open" */
2206 struct net_device * var_group1;
2207 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "netdev_open" */
2208 static int res_netdev_open_5;
2209 /* LDV_COMMENT_BEGIN_PREP */
2210 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2211 #endif
2212 #ifndef final_version
2213 #endif
2214 #ifdef VLAN_SUPPORT
2215 #endif
2216 #ifdef VLAN_SUPPORT
2217 #endif
2218 #ifdef VLAN_SUPPORT
2219 #endif
2220 #ifdef VLAN_SUPPORT
2221 #endif
2222 #ifdef CONFIG_PM
2223 #endif
2224 #ifdef CONFIG_PM
2225 #endif
2226 #ifdef MODULE
2227 #endif
2228 /* LDV_COMMENT_END_PREP */
2229 /* content: static int netdev_close(struct net_device *dev)*/
2230 /* LDV_COMMENT_BEGIN_PREP */
2231 #define DRV_NAME "starfire"
2232 #define DRV_VERSION "2.1"
2233 #define DRV_RELDATE "July 6, 2008"
2234 #define HAS_BROKEN_FIRMWARE
2235 #ifdef HAS_BROKEN_FIRMWARE
2236 #define PADDING_MASK 3
2237 #endif
2238 #define ZEROCOPY
2239 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2240 #define VLAN_SUPPORT
2241 #endif
2242 #define PKT_BUF_SZ 1536
2243 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2244 #else
2245 #endif
2246 #ifdef __sparc__
2247 #define DMA_BURST_SIZE 64
2248 #else
2249 #define DMA_BURST_SIZE 128
2250 #endif
2251 #define RX_RING_SIZE 256
2252 #define TX_RING_SIZE 32
2253 #define DONE_Q_SIZE 1024
2254 #define QUEUE_ALIGN 256
2255 #if RX_RING_SIZE > 256
2256 #define RX_Q_ENTRIES Rx2048QEntries
2257 #else
2258 #define RX_Q_ENTRIES Rx256QEntries
2259 #endif
2260 #define TX_TIMEOUT (2 * HZ)
2261 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2262 #define ADDR_64BITS
2263 #define netdrv_addr_t __le64
2264 #define cpu_to_dma(x) cpu_to_le64(x)
2265 #define dma_to_cpu(x) le64_to_cpu(x)
2266 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2267 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2268 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2269 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2270 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2271 #else
2272 #define netdrv_addr_t __le32
2273 #define cpu_to_dma(x) cpu_to_le32(x)
2274 #define dma_to_cpu(x) le32_to_cpu(x)
2275 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2276 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2277 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2278 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2279 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2280 #endif
2281 #define skb_first_frag_len(skb) skb_headlen(skb)
2282 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2283 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2284 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2285 #ifdef VLAN_SUPPORT
2286 #define RxComplType RxComplType3
2287 #else
2288 #define RxComplType RxComplType2
2289 #endif
2290 #ifdef ADDR_64BITS
2291 #define TX_DESC_TYPE TxDescType2
2292 #else
2293 #define TX_DESC_TYPE TxDescType1
2294 #endif
2295 #define TX_DESC_SPACING TxDescSpaceUnlim
2296 #if 0
2297 #endif
2298 #define PHY_CNT 2
2299 #ifdef VLAN_SUPPORT
2300 #endif
2301 #ifdef VLAN_SUPPORT
2302 #endif
2303 #ifdef VLAN_SUPPORT
2304 #endif
2305 #ifndef MODULE
2306 #endif
2307 #ifdef ZEROCOPY
2308 #endif
2309 #ifdef VLAN_SUPPORT
2310 #endif
2311 #ifdef ADDR_64BITS
2312 #endif
2313 #if ! defined(final_version)
2314 #endif
2315 #ifdef VLAN_SUPPORT
2316 #endif
2317 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2318 #endif
2319 #ifndef final_version
2320 #endif
2321 #ifdef VLAN_SUPPORT
2322 #endif
2323 #ifdef VLAN_SUPPORT
2324 #endif
2325 #ifdef VLAN_SUPPORT
2326 #endif
2327 #ifdef VLAN_SUPPORT
2328 #endif
2329 /* LDV_COMMENT_END_PREP */
2330 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "netdev_close" */
2331 static int res_netdev_close_28;
2332 /* LDV_COMMENT_BEGIN_PREP */
2333 #ifdef CONFIG_PM
2334 #endif
2335 #ifdef CONFIG_PM
2336 #endif
2337 #ifdef MODULE
2338 #endif
2339 /* LDV_COMMENT_END_PREP */
2340 /* content: static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)*/
2341 /* LDV_COMMENT_BEGIN_PREP */
2342 #define DRV_NAME "starfire"
2343 #define DRV_VERSION "2.1"
2344 #define DRV_RELDATE "July 6, 2008"
2345 #define HAS_BROKEN_FIRMWARE
2346 #ifdef HAS_BROKEN_FIRMWARE
2347 #define PADDING_MASK 3
2348 #endif
2349 #define ZEROCOPY
2350 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2351 #define VLAN_SUPPORT
2352 #endif
2353 #define PKT_BUF_SZ 1536
2354 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2355 #else
2356 #endif
2357 #ifdef __sparc__
2358 #define DMA_BURST_SIZE 64
2359 #else
2360 #define DMA_BURST_SIZE 128
2361 #endif
2362 #define RX_RING_SIZE 256
2363 #define TX_RING_SIZE 32
2364 #define DONE_Q_SIZE 1024
2365 #define QUEUE_ALIGN 256
2366 #if RX_RING_SIZE > 256
2367 #define RX_Q_ENTRIES Rx2048QEntries
2368 #else
2369 #define RX_Q_ENTRIES Rx256QEntries
2370 #endif
2371 #define TX_TIMEOUT (2 * HZ)
2372 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2373 #define ADDR_64BITS
2374 #define netdrv_addr_t __le64
2375 #define cpu_to_dma(x) cpu_to_le64(x)
2376 #define dma_to_cpu(x) le64_to_cpu(x)
2377 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2378 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2379 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2380 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2381 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2382 #else
2383 #define netdrv_addr_t __le32
2384 #define cpu_to_dma(x) cpu_to_le32(x)
2385 #define dma_to_cpu(x) le32_to_cpu(x)
2386 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2387 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2388 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2389 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2390 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2391 #endif
2392 #define skb_first_frag_len(skb) skb_headlen(skb)
2393 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2394 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2395 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2396 #ifdef VLAN_SUPPORT
2397 #define RxComplType RxComplType3
2398 #else
2399 #define RxComplType RxComplType2
2400 #endif
2401 #ifdef ADDR_64BITS
2402 #define TX_DESC_TYPE TxDescType2
2403 #else
2404 #define TX_DESC_TYPE TxDescType1
2405 #endif
2406 #define TX_DESC_SPACING TxDescSpaceUnlim
2407 #if 0
2408 #endif
2409 #define PHY_CNT 2
2410 #ifdef VLAN_SUPPORT
2411 #endif
2412 #ifdef VLAN_SUPPORT
2413 #endif
2414 #ifdef VLAN_SUPPORT
2415 #endif
2416 #ifndef MODULE
2417 #endif
2418 #ifdef ZEROCOPY
2419 #endif
2420 #ifdef VLAN_SUPPORT
2421 #endif
2422 #ifdef ADDR_64BITS
2423 #endif
2424 #if ! defined(final_version)
2425 #endif
2426 #ifdef VLAN_SUPPORT
2427 #endif
2428 /* LDV_COMMENT_END_PREP */
2429 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "start_tx" */
2430 struct sk_buff * var_group2;
2431 /* LDV_COMMENT_BEGIN_PREP */
2432 #ifndef final_version
2433 #endif
2434 #ifdef VLAN_SUPPORT
2435 #endif
2436 #ifdef VLAN_SUPPORT
2437 #endif
2438 #ifdef VLAN_SUPPORT
2439 #endif
2440 #ifdef VLAN_SUPPORT
2441 #endif
2442 #ifdef CONFIG_PM
2443 #endif
2444 #ifdef CONFIG_PM
2445 #endif
2446 #ifdef MODULE
2447 #endif
2448 /* LDV_COMMENT_END_PREP */
2449 /* content: static void tx_timeout(struct net_device *dev)*/
2450 /* LDV_COMMENT_BEGIN_PREP */
2451 #define DRV_NAME "starfire"
2452 #define DRV_VERSION "2.1"
2453 #define DRV_RELDATE "July 6, 2008"
2454 #define HAS_BROKEN_FIRMWARE
2455 #ifdef HAS_BROKEN_FIRMWARE
2456 #define PADDING_MASK 3
2457 #endif
2458 #define ZEROCOPY
2459 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2460 #define VLAN_SUPPORT
2461 #endif
2462 #define PKT_BUF_SZ 1536
2463 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2464 #else
2465 #endif
2466 #ifdef __sparc__
2467 #define DMA_BURST_SIZE 64
2468 #else
2469 #define DMA_BURST_SIZE 128
2470 #endif
2471 #define RX_RING_SIZE 256
2472 #define TX_RING_SIZE 32
2473 #define DONE_Q_SIZE 1024
2474 #define QUEUE_ALIGN 256
2475 #if RX_RING_SIZE > 256
2476 #define RX_Q_ENTRIES Rx2048QEntries
2477 #else
2478 #define RX_Q_ENTRIES Rx256QEntries
2479 #endif
2480 #define TX_TIMEOUT (2 * HZ)
2481 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2482 #define ADDR_64BITS
2483 #define netdrv_addr_t __le64
2484 #define cpu_to_dma(x) cpu_to_le64(x)
2485 #define dma_to_cpu(x) le64_to_cpu(x)
2486 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2487 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2488 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2489 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2490 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2491 #else
2492 #define netdrv_addr_t __le32
2493 #define cpu_to_dma(x) cpu_to_le32(x)
2494 #define dma_to_cpu(x) le32_to_cpu(x)
2495 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2496 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2497 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2498 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2499 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2500 #endif
2501 #define skb_first_frag_len(skb) skb_headlen(skb)
2502 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2503 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2504 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2505 #ifdef VLAN_SUPPORT
2506 #define RxComplType RxComplType3
2507 #else
2508 #define RxComplType RxComplType2
2509 #endif
2510 #ifdef ADDR_64BITS
2511 #define TX_DESC_TYPE TxDescType2
2512 #else
2513 #define TX_DESC_TYPE TxDescType1
2514 #endif
2515 #define TX_DESC_SPACING TxDescSpaceUnlim
2516 #if 0
2517 #endif
2518 #define PHY_CNT 2
2519 #ifdef VLAN_SUPPORT
2520 #endif
2521 #ifdef VLAN_SUPPORT
2522 #endif
2523 #ifdef VLAN_SUPPORT
2524 #endif
2525 #ifndef MODULE
2526 #endif
2527 #ifdef ZEROCOPY
2528 #endif
2529 #ifdef VLAN_SUPPORT
2530 #endif
2531 #ifdef ADDR_64BITS
2532 #endif
2533 #if ! defined(final_version)
2534 #endif
2535 #ifdef VLAN_SUPPORT
2536 #endif
2537 /* LDV_COMMENT_END_PREP */
2538 /* LDV_COMMENT_BEGIN_PREP */
2539 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2540 #endif
2541 #ifndef final_version
2542 #endif
2543 #ifdef VLAN_SUPPORT
2544 #endif
2545 #ifdef VLAN_SUPPORT
2546 #endif
2547 #ifdef VLAN_SUPPORT
2548 #endif
2549 #ifdef VLAN_SUPPORT
2550 #endif
2551 #ifdef CONFIG_PM
2552 #endif
2553 #ifdef CONFIG_PM
2554 #endif
2555 #ifdef MODULE
2556 #endif
2557 /* LDV_COMMENT_END_PREP */
2558 /* content: static struct net_device_stats *get_stats(struct net_device *dev)*/
2559 /* LDV_COMMENT_BEGIN_PREP */
2560 #define DRV_NAME "starfire"
2561 #define DRV_VERSION "2.1"
2562 #define DRV_RELDATE "July 6, 2008"
2563 #define HAS_BROKEN_FIRMWARE
2564 #ifdef HAS_BROKEN_FIRMWARE
2565 #define PADDING_MASK 3
2566 #endif
2567 #define ZEROCOPY
2568 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2569 #define VLAN_SUPPORT
2570 #endif
2571 #define PKT_BUF_SZ 1536
2572 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2573 #else
2574 #endif
2575 #ifdef __sparc__
2576 #define DMA_BURST_SIZE 64
2577 #else
2578 #define DMA_BURST_SIZE 128
2579 #endif
2580 #define RX_RING_SIZE 256
2581 #define TX_RING_SIZE 32
2582 #define DONE_Q_SIZE 1024
2583 #define QUEUE_ALIGN 256
2584 #if RX_RING_SIZE > 256
2585 #define RX_Q_ENTRIES Rx2048QEntries
2586 #else
2587 #define RX_Q_ENTRIES Rx256QEntries
2588 #endif
2589 #define TX_TIMEOUT (2 * HZ)
2590 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2591 #define ADDR_64BITS
2592 #define netdrv_addr_t __le64
2593 #define cpu_to_dma(x) cpu_to_le64(x)
2594 #define dma_to_cpu(x) le64_to_cpu(x)
2595 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2596 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2597 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2598 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2599 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2600 #else
2601 #define netdrv_addr_t __le32
2602 #define cpu_to_dma(x) cpu_to_le32(x)
2603 #define dma_to_cpu(x) le32_to_cpu(x)
2604 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2605 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2606 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2607 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2608 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2609 #endif
2610 #define skb_first_frag_len(skb) skb_headlen(skb)
2611 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2612 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2613 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2614 #ifdef VLAN_SUPPORT
2615 #define RxComplType RxComplType3
2616 #else
2617 #define RxComplType RxComplType2
2618 #endif
2619 #ifdef ADDR_64BITS
2620 #define TX_DESC_TYPE TxDescType2
2621 #else
2622 #define TX_DESC_TYPE TxDescType1
2623 #endif
2624 #define TX_DESC_SPACING TxDescSpaceUnlim
2625 #if 0
2626 #endif
2627 #define PHY_CNT 2
2628 #ifdef VLAN_SUPPORT
2629 #endif
2630 #ifdef VLAN_SUPPORT
2631 #endif
2632 #ifdef VLAN_SUPPORT
2633 #endif
2634 #ifndef MODULE
2635 #endif
2636 #ifdef ZEROCOPY
2637 #endif
2638 #ifdef VLAN_SUPPORT
2639 #endif
2640 #ifdef ADDR_64BITS
2641 #endif
2642 #if ! defined(final_version)
2643 #endif
2644 #ifdef VLAN_SUPPORT
2645 #endif
2646 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2647 #endif
2648 #ifndef final_version
2649 #endif
2650 #ifdef VLAN_SUPPORT
2651 #endif
2652 #ifdef VLAN_SUPPORT
2653 #endif
2654 /* LDV_COMMENT_END_PREP */
2655 /* LDV_COMMENT_BEGIN_PREP */
2656 #ifdef VLAN_SUPPORT
2657 #endif
2658 #ifdef VLAN_SUPPORT
2659 #endif
2660 #ifdef CONFIG_PM
2661 #endif
2662 #ifdef CONFIG_PM
2663 #endif
2664 #ifdef MODULE
2665 #endif
2666 /* LDV_COMMENT_END_PREP */
2667 /* content: static void set_rx_mode(struct net_device *dev)*/
2668 /* LDV_COMMENT_BEGIN_PREP */
2669 #define DRV_NAME "starfire"
2670 #define DRV_VERSION "2.1"
2671 #define DRV_RELDATE "July 6, 2008"
2672 #define HAS_BROKEN_FIRMWARE
2673 #ifdef HAS_BROKEN_FIRMWARE
2674 #define PADDING_MASK 3
2675 #endif
2676 #define ZEROCOPY
2677 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2678 #define VLAN_SUPPORT
2679 #endif
2680 #define PKT_BUF_SZ 1536
2681 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2682 #else
2683 #endif
2684 #ifdef __sparc__
2685 #define DMA_BURST_SIZE 64
2686 #else
2687 #define DMA_BURST_SIZE 128
2688 #endif
2689 #define RX_RING_SIZE 256
2690 #define TX_RING_SIZE 32
2691 #define DONE_Q_SIZE 1024
2692 #define QUEUE_ALIGN 256
2693 #if RX_RING_SIZE > 256
2694 #define RX_Q_ENTRIES Rx2048QEntries
2695 #else
2696 #define RX_Q_ENTRIES Rx256QEntries
2697 #endif
2698 #define TX_TIMEOUT (2 * HZ)
2699 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2700 #define ADDR_64BITS
2701 #define netdrv_addr_t __le64
2702 #define cpu_to_dma(x) cpu_to_le64(x)
2703 #define dma_to_cpu(x) le64_to_cpu(x)
2704 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2705 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2706 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2707 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2708 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2709 #else
2710 #define netdrv_addr_t __le32
2711 #define cpu_to_dma(x) cpu_to_le32(x)
2712 #define dma_to_cpu(x) le32_to_cpu(x)
2713 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2714 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2715 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2716 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2717 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2718 #endif
2719 #define skb_first_frag_len(skb) skb_headlen(skb)
2720 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2721 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2722 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2723 #ifdef VLAN_SUPPORT
2724 #define RxComplType RxComplType3
2725 #else
2726 #define RxComplType RxComplType2
2727 #endif
2728 #ifdef ADDR_64BITS
2729 #define TX_DESC_TYPE TxDescType2
2730 #else
2731 #define TX_DESC_TYPE TxDescType1
2732 #endif
2733 #define TX_DESC_SPACING TxDescSpaceUnlim
2734 #if 0
2735 #endif
2736 #define PHY_CNT 2
2737 #ifdef VLAN_SUPPORT
2738 #endif
2739 #ifdef VLAN_SUPPORT
2740 #endif
2741 #ifdef VLAN_SUPPORT
2742 #endif
2743 #ifndef MODULE
2744 #endif
2745 #ifdef ZEROCOPY
2746 #endif
2747 #ifdef VLAN_SUPPORT
2748 #endif
2749 #ifdef ADDR_64BITS
2750 #endif
2751 #if ! defined(final_version)
2752 #endif
2753 #ifdef VLAN_SUPPORT
2754 #endif
2755 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2756 #endif
2757 #ifndef final_version
2758 #endif
2759 #ifdef VLAN_SUPPORT
2760 #endif
2761 #ifdef VLAN_SUPPORT
2762 #endif
2763 #ifdef VLAN_SUPPORT
2764 #endif
2765 /* LDV_COMMENT_END_PREP */
2766 /* LDV_COMMENT_BEGIN_PREP */
2767 #ifdef CONFIG_PM
2768 #endif
2769 #ifdef CONFIG_PM
2770 #endif
2771 #ifdef MODULE
2772 #endif
2773 /* LDV_COMMENT_END_PREP */
2774 /* content: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
2775 /* LDV_COMMENT_BEGIN_PREP */
2776 #define DRV_NAME "starfire"
2777 #define DRV_VERSION "2.1"
2778 #define DRV_RELDATE "July 6, 2008"
2779 #define HAS_BROKEN_FIRMWARE
2780 #ifdef HAS_BROKEN_FIRMWARE
2781 #define PADDING_MASK 3
2782 #endif
2783 #define ZEROCOPY
2784 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2785 #define VLAN_SUPPORT
2786 #endif
2787 #define PKT_BUF_SZ 1536
2788 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2789 #else
2790 #endif
2791 #ifdef __sparc__
2792 #define DMA_BURST_SIZE 64
2793 #else
2794 #define DMA_BURST_SIZE 128
2795 #endif
2796 #define RX_RING_SIZE 256
2797 #define TX_RING_SIZE 32
2798 #define DONE_Q_SIZE 1024
2799 #define QUEUE_ALIGN 256
2800 #if RX_RING_SIZE > 256
2801 #define RX_Q_ENTRIES Rx2048QEntries
2802 #else
2803 #define RX_Q_ENTRIES Rx256QEntries
2804 #endif
2805 #define TX_TIMEOUT (2 * HZ)
2806 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2807 #define ADDR_64BITS
2808 #define netdrv_addr_t __le64
2809 #define cpu_to_dma(x) cpu_to_le64(x)
2810 #define dma_to_cpu(x) le64_to_cpu(x)
2811 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2812 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2813 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2814 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2815 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2816 #else
2817 #define netdrv_addr_t __le32
2818 #define cpu_to_dma(x) cpu_to_le32(x)
2819 #define dma_to_cpu(x) le32_to_cpu(x)
2820 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2821 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2822 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2823 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2824 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2825 #endif
2826 #define skb_first_frag_len(skb) skb_headlen(skb)
2827 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2828 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2829 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2830 #ifdef VLAN_SUPPORT
2831 #define RxComplType RxComplType3
2832 #else
2833 #define RxComplType RxComplType2
2834 #endif
2835 #ifdef ADDR_64BITS
2836 #define TX_DESC_TYPE TxDescType2
2837 #else
2838 #define TX_DESC_TYPE TxDescType1
2839 #endif
2840 #define TX_DESC_SPACING TxDescSpaceUnlim
2841 #if 0
2842 #endif
2843 #define PHY_CNT 2
2844 #ifdef VLAN_SUPPORT
2845 #endif
2846 #ifdef VLAN_SUPPORT
2847 #endif
2848 #ifdef VLAN_SUPPORT
2849 #endif
2850 #ifndef MODULE
2851 #endif
2852 #ifdef ZEROCOPY
2853 #endif
2854 #ifdef VLAN_SUPPORT
2855 #endif
2856 #ifdef ADDR_64BITS
2857 #endif
2858 #if ! defined(final_version)
2859 #endif
2860 #ifdef VLAN_SUPPORT
2861 #endif
2862 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2863 #endif
2864 #ifndef final_version
2865 #endif
2866 #ifdef VLAN_SUPPORT
2867 #endif
2868 #ifdef VLAN_SUPPORT
2869 #endif
2870 #ifdef VLAN_SUPPORT
2871 #endif
2872 #ifdef VLAN_SUPPORT
2873 #endif
2874 /* LDV_COMMENT_END_PREP */
2875 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_ioctl" */
2876 struct ifreq * var_group3;
2877 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_ioctl" */
2878 int var_netdev_ioctl_27_p2;
2879 /* LDV_COMMENT_BEGIN_PREP */
2880 #ifdef CONFIG_PM
2881 #endif
2882 #ifdef CONFIG_PM
2883 #endif
2884 #ifdef MODULE
2885 #endif
2886 /* LDV_COMMENT_END_PREP */
2887 /* content: static int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)*/
2888 /* LDV_COMMENT_BEGIN_PREP */
2889 #define DRV_NAME "starfire"
2890 #define DRV_VERSION "2.1"
2891 #define DRV_RELDATE "July 6, 2008"
2892 #define HAS_BROKEN_FIRMWARE
2893 #ifdef HAS_BROKEN_FIRMWARE
2894 #define PADDING_MASK 3
2895 #endif
2896 #define ZEROCOPY
2897 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2898 #define VLAN_SUPPORT
2899 #endif
2900 #define PKT_BUF_SZ 1536
2901 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
2902 #else
2903 #endif
2904 #ifdef __sparc__
2905 #define DMA_BURST_SIZE 64
2906 #else
2907 #define DMA_BURST_SIZE 128
2908 #endif
2909 #define RX_RING_SIZE 256
2910 #define TX_RING_SIZE 32
2911 #define DONE_Q_SIZE 1024
2912 #define QUEUE_ALIGN 256
2913 #if RX_RING_SIZE > 256
2914 #define RX_Q_ENTRIES Rx2048QEntries
2915 #else
2916 #define RX_Q_ENTRIES Rx256QEntries
2917 #endif
2918 #define TX_TIMEOUT (2 * HZ)
2919 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2920 #define ADDR_64BITS
2921 #define netdrv_addr_t __le64
2922 #define cpu_to_dma(x) cpu_to_le64(x)
2923 #define dma_to_cpu(x) le64_to_cpu(x)
2924 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
2925 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
2926 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
2927 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
2928 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
2929 #else
2930 #define netdrv_addr_t __le32
2931 #define cpu_to_dma(x) cpu_to_le32(x)
2932 #define dma_to_cpu(x) le32_to_cpu(x)
2933 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
2934 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
2935 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
2936 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
2937 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
2938 #endif
2939 #define skb_first_frag_len(skb) skb_headlen(skb)
2940 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
2941 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
2942 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
2943 #ifdef VLAN_SUPPORT
2944 #define RxComplType RxComplType3
2945 #else
2946 #define RxComplType RxComplType2
2947 #endif
2948 #ifdef ADDR_64BITS
2949 #define TX_DESC_TYPE TxDescType2
2950 #else
2951 #define TX_DESC_TYPE TxDescType1
2952 #endif
2953 #define TX_DESC_SPACING TxDescSpaceUnlim
2954 #if 0
2955 #endif
2956 #define PHY_CNT 2
2957 #ifdef VLAN_SUPPORT
2958 #endif
2959 #ifdef VLAN_SUPPORT
2960 /* LDV_COMMENT_END_PREP */
2961 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_add_vid" */
2962 __be16 var_netdev_vlan_rx_add_vid_0_p1;
2963 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_add_vid" */
2964 u16 var_netdev_vlan_rx_add_vid_0_p2;
2965 /* LDV_COMMENT_BEGIN_PREP */
2966 #endif
2967 #ifdef VLAN_SUPPORT
2968 #endif
2969 #ifndef MODULE
2970 #endif
2971 #ifdef ZEROCOPY
2972 #endif
2973 #ifdef VLAN_SUPPORT
2974 #endif
2975 #ifdef ADDR_64BITS
2976 #endif
2977 #if ! defined(final_version)
2978 #endif
2979 #ifdef VLAN_SUPPORT
2980 #endif
2981 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
2982 #endif
2983 #ifndef final_version
2984 #endif
2985 #ifdef VLAN_SUPPORT
2986 #endif
2987 #ifdef VLAN_SUPPORT
2988 #endif
2989 #ifdef VLAN_SUPPORT
2990 #endif
2991 #ifdef VLAN_SUPPORT
2992 #endif
2993 #ifdef CONFIG_PM
2994 #endif
2995 #ifdef CONFIG_PM
2996 #endif
2997 #ifdef MODULE
2998 #endif
2999 /* LDV_COMMENT_END_PREP */
3000 /* content: static int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)*/
3001 /* LDV_COMMENT_BEGIN_PREP */
3002 #define DRV_NAME "starfire"
3003 #define DRV_VERSION "2.1"
3004 #define DRV_RELDATE "July 6, 2008"
3005 #define HAS_BROKEN_FIRMWARE
3006 #ifdef HAS_BROKEN_FIRMWARE
3007 #define PADDING_MASK 3
3008 #endif
3009 #define ZEROCOPY
3010 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3011 #define VLAN_SUPPORT
3012 #endif
3013 #define PKT_BUF_SZ 1536
3014 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3015 #else
3016 #endif
3017 #ifdef __sparc__
3018 #define DMA_BURST_SIZE 64
3019 #else
3020 #define DMA_BURST_SIZE 128
3021 #endif
3022 #define RX_RING_SIZE 256
3023 #define TX_RING_SIZE 32
3024 #define DONE_Q_SIZE 1024
3025 #define QUEUE_ALIGN 256
3026 #if RX_RING_SIZE > 256
3027 #define RX_Q_ENTRIES Rx2048QEntries
3028 #else
3029 #define RX_Q_ENTRIES Rx256QEntries
3030 #endif
3031 #define TX_TIMEOUT (2 * HZ)
3032 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3033 #define ADDR_64BITS
3034 #define netdrv_addr_t __le64
3035 #define cpu_to_dma(x) cpu_to_le64(x)
3036 #define dma_to_cpu(x) le64_to_cpu(x)
3037 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3038 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3039 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3040 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3041 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3042 #else
3043 #define netdrv_addr_t __le32
3044 #define cpu_to_dma(x) cpu_to_le32(x)
3045 #define dma_to_cpu(x) le32_to_cpu(x)
3046 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3047 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3048 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3049 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3050 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3051 #endif
3052 #define skb_first_frag_len(skb) skb_headlen(skb)
3053 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3054 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3055 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3056 #ifdef VLAN_SUPPORT
3057 #define RxComplType RxComplType3
3058 #else
3059 #define RxComplType RxComplType2
3060 #endif
3061 #ifdef ADDR_64BITS
3062 #define TX_DESC_TYPE TxDescType2
3063 #else
3064 #define TX_DESC_TYPE TxDescType1
3065 #endif
3066 #define TX_DESC_SPACING TxDescSpaceUnlim
3067 #if 0
3068 #endif
3069 #define PHY_CNT 2
3070 #ifdef VLAN_SUPPORT
3071 #endif
3072 #ifdef VLAN_SUPPORT
3073 /* LDV_COMMENT_END_PREP */
3074 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_kill_vid" */
3075 __be16 var_netdev_vlan_rx_kill_vid_1_p1;
3076 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "netdev_vlan_rx_kill_vid" */
3077 u16 var_netdev_vlan_rx_kill_vid_1_p2;
3078 /* LDV_COMMENT_BEGIN_PREP */
3079 #endif
3080 #ifdef VLAN_SUPPORT
3081 #endif
3082 #ifndef MODULE
3083 #endif
3084 #ifdef ZEROCOPY
3085 #endif
3086 #ifdef VLAN_SUPPORT
3087 #endif
3088 #ifdef ADDR_64BITS
3089 #endif
3090 #if ! defined(final_version)
3091 #endif
3092 #ifdef VLAN_SUPPORT
3093 #endif
3094 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3095 #endif
3096 #ifndef final_version
3097 #endif
3098 #ifdef VLAN_SUPPORT
3099 #endif
3100 #ifdef VLAN_SUPPORT
3101 #endif
3102 #ifdef VLAN_SUPPORT
3103 #endif
3104 #ifdef VLAN_SUPPORT
3105 #endif
3106 #ifdef CONFIG_PM
3107 #endif
3108 #ifdef CONFIG_PM
3109 #endif
3110 #ifdef MODULE
3111 #endif
3112 /* LDV_COMMENT_END_PREP */
3113
3114 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
3115 /* content: static int check_if_running(struct net_device *dev)*/
3116 /* LDV_COMMENT_BEGIN_PREP */
3117 #define DRV_NAME "starfire"
3118 #define DRV_VERSION "2.1"
3119 #define DRV_RELDATE "July 6, 2008"
3120 #define HAS_BROKEN_FIRMWARE
3121 #ifdef HAS_BROKEN_FIRMWARE
3122 #define PADDING_MASK 3
3123 #endif
3124 #define ZEROCOPY
3125 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3126 #define VLAN_SUPPORT
3127 #endif
3128 #define PKT_BUF_SZ 1536
3129 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3130 #else
3131 #endif
3132 #ifdef __sparc__
3133 #define DMA_BURST_SIZE 64
3134 #else
3135 #define DMA_BURST_SIZE 128
3136 #endif
3137 #define RX_RING_SIZE 256
3138 #define TX_RING_SIZE 32
3139 #define DONE_Q_SIZE 1024
3140 #define QUEUE_ALIGN 256
3141 #if RX_RING_SIZE > 256
3142 #define RX_Q_ENTRIES Rx2048QEntries
3143 #else
3144 #define RX_Q_ENTRIES Rx256QEntries
3145 #endif
3146 #define TX_TIMEOUT (2 * HZ)
3147 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3148 #define ADDR_64BITS
3149 #define netdrv_addr_t __le64
3150 #define cpu_to_dma(x) cpu_to_le64(x)
3151 #define dma_to_cpu(x) le64_to_cpu(x)
3152 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3153 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3154 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3155 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3156 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3157 #else
3158 #define netdrv_addr_t __le32
3159 #define cpu_to_dma(x) cpu_to_le32(x)
3160 #define dma_to_cpu(x) le32_to_cpu(x)
3161 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3162 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3163 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3164 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3165 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3166 #endif
3167 #define skb_first_frag_len(skb) skb_headlen(skb)
3168 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3169 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3170 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3171 #ifdef VLAN_SUPPORT
3172 #define RxComplType RxComplType3
3173 #else
3174 #define RxComplType RxComplType2
3175 #endif
3176 #ifdef ADDR_64BITS
3177 #define TX_DESC_TYPE TxDescType2
3178 #else
3179 #define TX_DESC_TYPE TxDescType1
3180 #endif
3181 #define TX_DESC_SPACING TxDescSpaceUnlim
3182 #if 0
3183 #endif
3184 #define PHY_CNT 2
3185 #ifdef VLAN_SUPPORT
3186 #endif
3187 #ifdef VLAN_SUPPORT
3188 #endif
3189 #ifdef VLAN_SUPPORT
3190 #endif
3191 #ifndef MODULE
3192 #endif
3193 #ifdef ZEROCOPY
3194 #endif
3195 #ifdef VLAN_SUPPORT
3196 #endif
3197 #ifdef ADDR_64BITS
3198 #endif
3199 #if ! defined(final_version)
3200 #endif
3201 #ifdef VLAN_SUPPORT
3202 #endif
3203 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3204 #endif
3205 #ifndef final_version
3206 #endif
3207 #ifdef VLAN_SUPPORT
3208 #endif
3209 #ifdef VLAN_SUPPORT
3210 #endif
3211 #ifdef VLAN_SUPPORT
3212 #endif
3213 #ifdef VLAN_SUPPORT
3214 #endif
3215 /* LDV_COMMENT_END_PREP */
3216 /* LDV_COMMENT_BEGIN_PREP */
3217 #ifdef CONFIG_PM
3218 #endif
3219 #ifdef CONFIG_PM
3220 #endif
3221 #ifdef MODULE
3222 #endif
3223 /* LDV_COMMENT_END_PREP */
3224 /* content: static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
3225 /* LDV_COMMENT_BEGIN_PREP */
3226 #define DRV_NAME "starfire"
3227 #define DRV_VERSION "2.1"
3228 #define DRV_RELDATE "July 6, 2008"
3229 #define HAS_BROKEN_FIRMWARE
3230 #ifdef HAS_BROKEN_FIRMWARE
3231 #define PADDING_MASK 3
3232 #endif
3233 #define ZEROCOPY
3234 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3235 #define VLAN_SUPPORT
3236 #endif
3237 #define PKT_BUF_SZ 1536
3238 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3239 #else
3240 #endif
3241 #ifdef __sparc__
3242 #define DMA_BURST_SIZE 64
3243 #else
3244 #define DMA_BURST_SIZE 128
3245 #endif
3246 #define RX_RING_SIZE 256
3247 #define TX_RING_SIZE 32
3248 #define DONE_Q_SIZE 1024
3249 #define QUEUE_ALIGN 256
3250 #if RX_RING_SIZE > 256
3251 #define RX_Q_ENTRIES Rx2048QEntries
3252 #else
3253 #define RX_Q_ENTRIES Rx256QEntries
3254 #endif
3255 #define TX_TIMEOUT (2 * HZ)
3256 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3257 #define ADDR_64BITS
3258 #define netdrv_addr_t __le64
3259 #define cpu_to_dma(x) cpu_to_le64(x)
3260 #define dma_to_cpu(x) le64_to_cpu(x)
3261 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3262 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3263 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3264 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3265 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3266 #else
3267 #define netdrv_addr_t __le32
3268 #define cpu_to_dma(x) cpu_to_le32(x)
3269 #define dma_to_cpu(x) le32_to_cpu(x)
3270 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3271 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3272 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3273 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3274 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3275 #endif
3276 #define skb_first_frag_len(skb) skb_headlen(skb)
3277 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3278 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3279 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3280 #ifdef VLAN_SUPPORT
3281 #define RxComplType RxComplType3
3282 #else
3283 #define RxComplType RxComplType2
3284 #endif
3285 #ifdef ADDR_64BITS
3286 #define TX_DESC_TYPE TxDescType2
3287 #else
3288 #define TX_DESC_TYPE TxDescType1
3289 #endif
3290 #define TX_DESC_SPACING TxDescSpaceUnlim
3291 #if 0
3292 #endif
3293 #define PHY_CNT 2
3294 #ifdef VLAN_SUPPORT
3295 #endif
3296 #ifdef VLAN_SUPPORT
3297 #endif
3298 #ifdef VLAN_SUPPORT
3299 #endif
3300 #ifndef MODULE
3301 #endif
3302 #ifdef ZEROCOPY
3303 #endif
3304 #ifdef VLAN_SUPPORT
3305 #endif
3306 #ifdef ADDR_64BITS
3307 #endif
3308 #if ! defined(final_version)
3309 #endif
3310 #ifdef VLAN_SUPPORT
3311 #endif
3312 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3313 #endif
3314 #ifndef final_version
3315 #endif
3316 #ifdef VLAN_SUPPORT
3317 #endif
3318 #ifdef VLAN_SUPPORT
3319 #endif
3320 #ifdef VLAN_SUPPORT
3321 #endif
3322 #ifdef VLAN_SUPPORT
3323 #endif
3324 /* LDV_COMMENT_END_PREP */
3325 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_drvinfo" */
3326 struct ethtool_drvinfo * var_group4;
3327 /* LDV_COMMENT_BEGIN_PREP */
3328 #ifdef CONFIG_PM
3329 #endif
3330 #ifdef CONFIG_PM
3331 #endif
3332 #ifdef MODULE
3333 #endif
3334 /* LDV_COMMENT_END_PREP */
3335 /* content: static int nway_reset(struct net_device *dev)*/
3336 /* LDV_COMMENT_BEGIN_PREP */
3337 #define DRV_NAME "starfire"
3338 #define DRV_VERSION "2.1"
3339 #define DRV_RELDATE "July 6, 2008"
3340 #define HAS_BROKEN_FIRMWARE
3341 #ifdef HAS_BROKEN_FIRMWARE
3342 #define PADDING_MASK 3
3343 #endif
3344 #define ZEROCOPY
3345 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3346 #define VLAN_SUPPORT
3347 #endif
3348 #define PKT_BUF_SZ 1536
3349 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3350 #else
3351 #endif
3352 #ifdef __sparc__
3353 #define DMA_BURST_SIZE 64
3354 #else
3355 #define DMA_BURST_SIZE 128
3356 #endif
3357 #define RX_RING_SIZE 256
3358 #define TX_RING_SIZE 32
3359 #define DONE_Q_SIZE 1024
3360 #define QUEUE_ALIGN 256
3361 #if RX_RING_SIZE > 256
3362 #define RX_Q_ENTRIES Rx2048QEntries
3363 #else
3364 #define RX_Q_ENTRIES Rx256QEntries
3365 #endif
3366 #define TX_TIMEOUT (2 * HZ)
3367 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3368 #define ADDR_64BITS
3369 #define netdrv_addr_t __le64
3370 #define cpu_to_dma(x) cpu_to_le64(x)
3371 #define dma_to_cpu(x) le64_to_cpu(x)
3372 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3373 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3374 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3375 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3376 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3377 #else
3378 #define netdrv_addr_t __le32
3379 #define cpu_to_dma(x) cpu_to_le32(x)
3380 #define dma_to_cpu(x) le32_to_cpu(x)
3381 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3382 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3383 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3384 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3385 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3386 #endif
3387 #define skb_first_frag_len(skb) skb_headlen(skb)
3388 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3389 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3390 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3391 #ifdef VLAN_SUPPORT
3392 #define RxComplType RxComplType3
3393 #else
3394 #define RxComplType RxComplType2
3395 #endif
3396 #ifdef ADDR_64BITS
3397 #define TX_DESC_TYPE TxDescType2
3398 #else
3399 #define TX_DESC_TYPE TxDescType1
3400 #endif
3401 #define TX_DESC_SPACING TxDescSpaceUnlim
3402 #if 0
3403 #endif
3404 #define PHY_CNT 2
3405 #ifdef VLAN_SUPPORT
3406 #endif
3407 #ifdef VLAN_SUPPORT
3408 #endif
3409 #ifdef VLAN_SUPPORT
3410 #endif
3411 #ifndef MODULE
3412 #endif
3413 #ifdef ZEROCOPY
3414 #endif
3415 #ifdef VLAN_SUPPORT
3416 #endif
3417 #ifdef ADDR_64BITS
3418 #endif
3419 #if ! defined(final_version)
3420 #endif
3421 #ifdef VLAN_SUPPORT
3422 #endif
3423 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3424 #endif
3425 #ifndef final_version
3426 #endif
3427 #ifdef VLAN_SUPPORT
3428 #endif
3429 #ifdef VLAN_SUPPORT
3430 #endif
3431 #ifdef VLAN_SUPPORT
3432 #endif
3433 #ifdef VLAN_SUPPORT
3434 #endif
3435 /* LDV_COMMENT_END_PREP */
3436 /* LDV_COMMENT_BEGIN_PREP */
3437 #ifdef CONFIG_PM
3438 #endif
3439 #ifdef CONFIG_PM
3440 #endif
3441 #ifdef MODULE
3442 #endif
3443 /* LDV_COMMENT_END_PREP */
3444 /* content: static u32 get_link(struct net_device *dev)*/
3445 /* LDV_COMMENT_BEGIN_PREP */
3446 #define DRV_NAME "starfire"
3447 #define DRV_VERSION "2.1"
3448 #define DRV_RELDATE "July 6, 2008"
3449 #define HAS_BROKEN_FIRMWARE
3450 #ifdef HAS_BROKEN_FIRMWARE
3451 #define PADDING_MASK 3
3452 #endif
3453 #define ZEROCOPY
3454 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3455 #define VLAN_SUPPORT
3456 #endif
3457 #define PKT_BUF_SZ 1536
3458 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3459 #else
3460 #endif
3461 #ifdef __sparc__
3462 #define DMA_BURST_SIZE 64
3463 #else
3464 #define DMA_BURST_SIZE 128
3465 #endif
3466 #define RX_RING_SIZE 256
3467 #define TX_RING_SIZE 32
3468 #define DONE_Q_SIZE 1024
3469 #define QUEUE_ALIGN 256
3470 #if RX_RING_SIZE > 256
3471 #define RX_Q_ENTRIES Rx2048QEntries
3472 #else
3473 #define RX_Q_ENTRIES Rx256QEntries
3474 #endif
3475 #define TX_TIMEOUT (2 * HZ)
3476 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3477 #define ADDR_64BITS
3478 #define netdrv_addr_t __le64
3479 #define cpu_to_dma(x) cpu_to_le64(x)
3480 #define dma_to_cpu(x) le64_to_cpu(x)
3481 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3482 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3483 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3484 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3485 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3486 #else
3487 #define netdrv_addr_t __le32
3488 #define cpu_to_dma(x) cpu_to_le32(x)
3489 #define dma_to_cpu(x) le32_to_cpu(x)
3490 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3491 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3492 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3493 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3494 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3495 #endif
3496 #define skb_first_frag_len(skb) skb_headlen(skb)
3497 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3498 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3499 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3500 #ifdef VLAN_SUPPORT
3501 #define RxComplType RxComplType3
3502 #else
3503 #define RxComplType RxComplType2
3504 #endif
3505 #ifdef ADDR_64BITS
3506 #define TX_DESC_TYPE TxDescType2
3507 #else
3508 #define TX_DESC_TYPE TxDescType1
3509 #endif
3510 #define TX_DESC_SPACING TxDescSpaceUnlim
3511 #if 0
3512 #endif
3513 #define PHY_CNT 2
3514 #ifdef VLAN_SUPPORT
3515 #endif
3516 #ifdef VLAN_SUPPORT
3517 #endif
3518 #ifdef VLAN_SUPPORT
3519 #endif
3520 #ifndef MODULE
3521 #endif
3522 #ifdef ZEROCOPY
3523 #endif
3524 #ifdef VLAN_SUPPORT
3525 #endif
3526 #ifdef ADDR_64BITS
3527 #endif
3528 #if ! defined(final_version)
3529 #endif
3530 #ifdef VLAN_SUPPORT
3531 #endif
3532 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3533 #endif
3534 #ifndef final_version
3535 #endif
3536 #ifdef VLAN_SUPPORT
3537 #endif
3538 #ifdef VLAN_SUPPORT
3539 #endif
3540 #ifdef VLAN_SUPPORT
3541 #endif
3542 #ifdef VLAN_SUPPORT
3543 #endif
3544 /* LDV_COMMENT_END_PREP */
3545 /* LDV_COMMENT_BEGIN_PREP */
3546 #ifdef CONFIG_PM
3547 #endif
3548 #ifdef CONFIG_PM
3549 #endif
3550 #ifdef MODULE
3551 #endif
3552 /* LDV_COMMENT_END_PREP */
3553 /* content: static u32 get_msglevel(struct net_device *dev)*/
3554 /* LDV_COMMENT_BEGIN_PREP */
3555 #define DRV_NAME "starfire"
3556 #define DRV_VERSION "2.1"
3557 #define DRV_RELDATE "July 6, 2008"
3558 #define HAS_BROKEN_FIRMWARE
3559 #ifdef HAS_BROKEN_FIRMWARE
3560 #define PADDING_MASK 3
3561 #endif
3562 #define ZEROCOPY
3563 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3564 #define VLAN_SUPPORT
3565 #endif
3566 #define PKT_BUF_SZ 1536
3567 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3568 #else
3569 #endif
3570 #ifdef __sparc__
3571 #define DMA_BURST_SIZE 64
3572 #else
3573 #define DMA_BURST_SIZE 128
3574 #endif
3575 #define RX_RING_SIZE 256
3576 #define TX_RING_SIZE 32
3577 #define DONE_Q_SIZE 1024
3578 #define QUEUE_ALIGN 256
3579 #if RX_RING_SIZE > 256
3580 #define RX_Q_ENTRIES Rx2048QEntries
3581 #else
3582 #define RX_Q_ENTRIES Rx256QEntries
3583 #endif
3584 #define TX_TIMEOUT (2 * HZ)
3585 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3586 #define ADDR_64BITS
3587 #define netdrv_addr_t __le64
3588 #define cpu_to_dma(x) cpu_to_le64(x)
3589 #define dma_to_cpu(x) le64_to_cpu(x)
3590 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3591 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3592 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3593 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3594 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3595 #else
3596 #define netdrv_addr_t __le32
3597 #define cpu_to_dma(x) cpu_to_le32(x)
3598 #define dma_to_cpu(x) le32_to_cpu(x)
3599 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3600 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3601 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3602 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3603 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3604 #endif
3605 #define skb_first_frag_len(skb) skb_headlen(skb)
3606 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3607 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3608 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3609 #ifdef VLAN_SUPPORT
3610 #define RxComplType RxComplType3
3611 #else
3612 #define RxComplType RxComplType2
3613 #endif
3614 #ifdef ADDR_64BITS
3615 #define TX_DESC_TYPE TxDescType2
3616 #else
3617 #define TX_DESC_TYPE TxDescType1
3618 #endif
3619 #define TX_DESC_SPACING TxDescSpaceUnlim
3620 #if 0
3621 #endif
3622 #define PHY_CNT 2
3623 #ifdef VLAN_SUPPORT
3624 #endif
3625 #ifdef VLAN_SUPPORT
3626 #endif
3627 #ifdef VLAN_SUPPORT
3628 #endif
3629 #ifndef MODULE
3630 #endif
3631 #ifdef ZEROCOPY
3632 #endif
3633 #ifdef VLAN_SUPPORT
3634 #endif
3635 #ifdef ADDR_64BITS
3636 #endif
3637 #if ! defined(final_version)
3638 #endif
3639 #ifdef VLAN_SUPPORT
3640 #endif
3641 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3642 #endif
3643 #ifndef final_version
3644 #endif
3645 #ifdef VLAN_SUPPORT
3646 #endif
3647 #ifdef VLAN_SUPPORT
3648 #endif
3649 #ifdef VLAN_SUPPORT
3650 #endif
3651 #ifdef VLAN_SUPPORT
3652 #endif
3653 /* LDV_COMMENT_END_PREP */
3654 /* LDV_COMMENT_BEGIN_PREP */
3655 #ifdef CONFIG_PM
3656 #endif
3657 #ifdef CONFIG_PM
3658 #endif
3659 #ifdef MODULE
3660 #endif
3661 /* LDV_COMMENT_END_PREP */
3662 /* content: static void set_msglevel(struct net_device *dev, u32 val)*/
3663 /* LDV_COMMENT_BEGIN_PREP */
3664 #define DRV_NAME "starfire"
3665 #define DRV_VERSION "2.1"
3666 #define DRV_RELDATE "July 6, 2008"
3667 #define HAS_BROKEN_FIRMWARE
3668 #ifdef HAS_BROKEN_FIRMWARE
3669 #define PADDING_MASK 3
3670 #endif
3671 #define ZEROCOPY
3672 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3673 #define VLAN_SUPPORT
3674 #endif
3675 #define PKT_BUF_SZ 1536
3676 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3677 #else
3678 #endif
3679 #ifdef __sparc__
3680 #define DMA_BURST_SIZE 64
3681 #else
3682 #define DMA_BURST_SIZE 128
3683 #endif
3684 #define RX_RING_SIZE 256
3685 #define TX_RING_SIZE 32
3686 #define DONE_Q_SIZE 1024
3687 #define QUEUE_ALIGN 256
3688 #if RX_RING_SIZE > 256
3689 #define RX_Q_ENTRIES Rx2048QEntries
3690 #else
3691 #define RX_Q_ENTRIES Rx256QEntries
3692 #endif
3693 #define TX_TIMEOUT (2 * HZ)
3694 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3695 #define ADDR_64BITS
3696 #define netdrv_addr_t __le64
3697 #define cpu_to_dma(x) cpu_to_le64(x)
3698 #define dma_to_cpu(x) le64_to_cpu(x)
3699 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3700 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3701 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3702 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3703 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3704 #else
3705 #define netdrv_addr_t __le32
3706 #define cpu_to_dma(x) cpu_to_le32(x)
3707 #define dma_to_cpu(x) le32_to_cpu(x)
3708 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3709 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3710 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3711 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3712 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3713 #endif
3714 #define skb_first_frag_len(skb) skb_headlen(skb)
3715 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3716 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3717 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3718 #ifdef VLAN_SUPPORT
3719 #define RxComplType RxComplType3
3720 #else
3721 #define RxComplType RxComplType2
3722 #endif
3723 #ifdef ADDR_64BITS
3724 #define TX_DESC_TYPE TxDescType2
3725 #else
3726 #define TX_DESC_TYPE TxDescType1
3727 #endif
3728 #define TX_DESC_SPACING TxDescSpaceUnlim
3729 #if 0
3730 #endif
3731 #define PHY_CNT 2
3732 #ifdef VLAN_SUPPORT
3733 #endif
3734 #ifdef VLAN_SUPPORT
3735 #endif
3736 #ifdef VLAN_SUPPORT
3737 #endif
3738 #ifndef MODULE
3739 #endif
3740 #ifdef ZEROCOPY
3741 #endif
3742 #ifdef VLAN_SUPPORT
3743 #endif
3744 #ifdef ADDR_64BITS
3745 #endif
3746 #if ! defined(final_version)
3747 #endif
3748 #ifdef VLAN_SUPPORT
3749 #endif
3750 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3751 #endif
3752 #ifndef final_version
3753 #endif
3754 #ifdef VLAN_SUPPORT
3755 #endif
3756 #ifdef VLAN_SUPPORT
3757 #endif
3758 #ifdef VLAN_SUPPORT
3759 #endif
3760 #ifdef VLAN_SUPPORT
3761 #endif
3762 /* LDV_COMMENT_END_PREP */
3763 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "set_msglevel" */
3764 u32 var_set_msglevel_26_p1;
3765 /* LDV_COMMENT_BEGIN_PREP */
3766 #ifdef CONFIG_PM
3767 #endif
3768 #ifdef CONFIG_PM
3769 #endif
3770 #ifdef MODULE
3771 #endif
3772 /* LDV_COMMENT_END_PREP */
3773 /* content: static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd)*/
3774 /* LDV_COMMENT_BEGIN_PREP */
3775 #define DRV_NAME "starfire"
3776 #define DRV_VERSION "2.1"
3777 #define DRV_RELDATE "July 6, 2008"
3778 #define HAS_BROKEN_FIRMWARE
3779 #ifdef HAS_BROKEN_FIRMWARE
3780 #define PADDING_MASK 3
3781 #endif
3782 #define ZEROCOPY
3783 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3784 #define VLAN_SUPPORT
3785 #endif
3786 #define PKT_BUF_SZ 1536
3787 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3788 #else
3789 #endif
3790 #ifdef __sparc__
3791 #define DMA_BURST_SIZE 64
3792 #else
3793 #define DMA_BURST_SIZE 128
3794 #endif
3795 #define RX_RING_SIZE 256
3796 #define TX_RING_SIZE 32
3797 #define DONE_Q_SIZE 1024
3798 #define QUEUE_ALIGN 256
3799 #if RX_RING_SIZE > 256
3800 #define RX_Q_ENTRIES Rx2048QEntries
3801 #else
3802 #define RX_Q_ENTRIES Rx256QEntries
3803 #endif
3804 #define TX_TIMEOUT (2 * HZ)
3805 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3806 #define ADDR_64BITS
3807 #define netdrv_addr_t __le64
3808 #define cpu_to_dma(x) cpu_to_le64(x)
3809 #define dma_to_cpu(x) le64_to_cpu(x)
3810 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3811 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3812 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3813 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3814 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3815 #else
3816 #define netdrv_addr_t __le32
3817 #define cpu_to_dma(x) cpu_to_le32(x)
3818 #define dma_to_cpu(x) le32_to_cpu(x)
3819 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3820 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3821 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3822 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3823 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3824 #endif
3825 #define skb_first_frag_len(skb) skb_headlen(skb)
3826 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3827 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3828 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3829 #ifdef VLAN_SUPPORT
3830 #define RxComplType RxComplType3
3831 #else
3832 #define RxComplType RxComplType2
3833 #endif
3834 #ifdef ADDR_64BITS
3835 #define TX_DESC_TYPE TxDescType2
3836 #else
3837 #define TX_DESC_TYPE TxDescType1
3838 #endif
3839 #define TX_DESC_SPACING TxDescSpaceUnlim
3840 #if 0
3841 #endif
3842 #define PHY_CNT 2
3843 #ifdef VLAN_SUPPORT
3844 #endif
3845 #ifdef VLAN_SUPPORT
3846 #endif
3847 #ifdef VLAN_SUPPORT
3848 #endif
3849 #ifndef MODULE
3850 #endif
3851 #ifdef ZEROCOPY
3852 #endif
3853 #ifdef VLAN_SUPPORT
3854 #endif
3855 #ifdef ADDR_64BITS
3856 #endif
3857 #if ! defined(final_version)
3858 #endif
3859 #ifdef VLAN_SUPPORT
3860 #endif
3861 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3862 #endif
3863 #ifndef final_version
3864 #endif
3865 #ifdef VLAN_SUPPORT
3866 #endif
3867 #ifdef VLAN_SUPPORT
3868 #endif
3869 #ifdef VLAN_SUPPORT
3870 #endif
3871 #ifdef VLAN_SUPPORT
3872 #endif
3873 /* LDV_COMMENT_END_PREP */
3874 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "get_link_ksettings" */
3875 struct ethtool_link_ksettings * var_group5;
3876 /* LDV_COMMENT_BEGIN_PREP */
3877 #ifdef CONFIG_PM
3878 #endif
3879 #ifdef CONFIG_PM
3880 #endif
3881 #ifdef MODULE
3882 #endif
3883 /* LDV_COMMENT_END_PREP */
3884 /* content: static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd)*/
3885 /* LDV_COMMENT_BEGIN_PREP */
3886 #define DRV_NAME "starfire"
3887 #define DRV_VERSION "2.1"
3888 #define DRV_RELDATE "July 6, 2008"
3889 #define HAS_BROKEN_FIRMWARE
3890 #ifdef HAS_BROKEN_FIRMWARE
3891 #define PADDING_MASK 3
3892 #endif
3893 #define ZEROCOPY
3894 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3895 #define VLAN_SUPPORT
3896 #endif
3897 #define PKT_BUF_SZ 1536
3898 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
3899 #else
3900 #endif
3901 #ifdef __sparc__
3902 #define DMA_BURST_SIZE 64
3903 #else
3904 #define DMA_BURST_SIZE 128
3905 #endif
3906 #define RX_RING_SIZE 256
3907 #define TX_RING_SIZE 32
3908 #define DONE_Q_SIZE 1024
3909 #define QUEUE_ALIGN 256
3910 #if RX_RING_SIZE > 256
3911 #define RX_Q_ENTRIES Rx2048QEntries
3912 #else
3913 #define RX_Q_ENTRIES Rx256QEntries
3914 #endif
3915 #define TX_TIMEOUT (2 * HZ)
3916 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3917 #define ADDR_64BITS
3918 #define netdrv_addr_t __le64
3919 #define cpu_to_dma(x) cpu_to_le64(x)
3920 #define dma_to_cpu(x) le64_to_cpu(x)
3921 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
3922 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
3923 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
3924 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
3925 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
3926 #else
3927 #define netdrv_addr_t __le32
3928 #define cpu_to_dma(x) cpu_to_le32(x)
3929 #define dma_to_cpu(x) le32_to_cpu(x)
3930 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
3931 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
3932 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
3933 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
3934 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
3935 #endif
3936 #define skb_first_frag_len(skb) skb_headlen(skb)
3937 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
3938 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
3939 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
3940 #ifdef VLAN_SUPPORT
3941 #define RxComplType RxComplType3
3942 #else
3943 #define RxComplType RxComplType2
3944 #endif
3945 #ifdef ADDR_64BITS
3946 #define TX_DESC_TYPE TxDescType2
3947 #else
3948 #define TX_DESC_TYPE TxDescType1
3949 #endif
3950 #define TX_DESC_SPACING TxDescSpaceUnlim
3951 #if 0
3952 #endif
3953 #define PHY_CNT 2
3954 #ifdef VLAN_SUPPORT
3955 #endif
3956 #ifdef VLAN_SUPPORT
3957 #endif
3958 #ifdef VLAN_SUPPORT
3959 #endif
3960 #ifndef MODULE
3961 #endif
3962 #ifdef ZEROCOPY
3963 #endif
3964 #ifdef VLAN_SUPPORT
3965 #endif
3966 #ifdef ADDR_64BITS
3967 #endif
3968 #if ! defined(final_version)
3969 #endif
3970 #ifdef VLAN_SUPPORT
3971 #endif
3972 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
3973 #endif
3974 #ifndef final_version
3975 #endif
3976 #ifdef VLAN_SUPPORT
3977 #endif
3978 #ifdef VLAN_SUPPORT
3979 #endif
3980 #ifdef VLAN_SUPPORT
3981 #endif
3982 #ifdef VLAN_SUPPORT
3983 #endif
3984 /* LDV_COMMENT_END_PREP */
3985 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "set_link_ksettings" */
3986 const struct ethtool_link_ksettings * var_set_link_ksettings_22_p1;
3987 /* LDV_COMMENT_BEGIN_PREP */
3988 #ifdef CONFIG_PM
3989 #endif
3990 #ifdef CONFIG_PM
3991 #endif
3992 #ifdef MODULE
3993 #endif
3994 /* LDV_COMMENT_END_PREP */
3995
3996 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
3997 /* content: static int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
3998 /* LDV_COMMENT_BEGIN_PREP */
3999 #define DRV_NAME "starfire"
4000 #define DRV_VERSION "2.1"
4001 #define DRV_RELDATE "July 6, 2008"
4002 #define HAS_BROKEN_FIRMWARE
4003 #ifdef HAS_BROKEN_FIRMWARE
4004 #define PADDING_MASK 3
4005 #endif
4006 #define ZEROCOPY
4007 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4008 #define VLAN_SUPPORT
4009 #endif
4010 #define PKT_BUF_SZ 1536
4011 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4012 #else
4013 #endif
4014 #ifdef __sparc__
4015 #define DMA_BURST_SIZE 64
4016 #else
4017 #define DMA_BURST_SIZE 128
4018 #endif
4019 #define RX_RING_SIZE 256
4020 #define TX_RING_SIZE 32
4021 #define DONE_Q_SIZE 1024
4022 #define QUEUE_ALIGN 256
4023 #if RX_RING_SIZE > 256
4024 #define RX_Q_ENTRIES Rx2048QEntries
4025 #else
4026 #define RX_Q_ENTRIES Rx256QEntries
4027 #endif
4028 #define TX_TIMEOUT (2 * HZ)
4029 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4030 #define ADDR_64BITS
4031 #define netdrv_addr_t __le64
4032 #define cpu_to_dma(x) cpu_to_le64(x)
4033 #define dma_to_cpu(x) le64_to_cpu(x)
4034 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4035 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4036 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4037 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4038 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4039 #else
4040 #define netdrv_addr_t __le32
4041 #define cpu_to_dma(x) cpu_to_le32(x)
4042 #define dma_to_cpu(x) le32_to_cpu(x)
4043 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4044 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4045 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4046 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4047 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4048 #endif
4049 #define skb_first_frag_len(skb) skb_headlen(skb)
4050 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4051 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4052 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4053 #ifdef VLAN_SUPPORT
4054 #define RxComplType RxComplType3
4055 #else
4056 #define RxComplType RxComplType2
4057 #endif
4058 #ifdef ADDR_64BITS
4059 #define TX_DESC_TYPE TxDescType2
4060 #else
4061 #define TX_DESC_TYPE TxDescType1
4062 #endif
4063 #define TX_DESC_SPACING TxDescSpaceUnlim
4064 #if 0
4065 #endif
4066 #define PHY_CNT 2
4067 #ifdef VLAN_SUPPORT
4068 #endif
4069 #ifdef VLAN_SUPPORT
4070 #endif
4071 #ifdef VLAN_SUPPORT
4072 #endif
4073 /* LDV_COMMENT_END_PREP */
4074 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_init_one" */
4075 struct pci_dev * var_group6;
4076 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_init_one" */
4077 const struct pci_device_id * var_starfire_init_one_2_p1;
4078 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "starfire_init_one" */
4079 static int res_starfire_init_one_2;
4080 /* LDV_COMMENT_BEGIN_PREP */
4081 #ifdef VLAN_SUPPORT
4082 #endif
4083 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4084 #endif
4085 #ifndef final_version
4086 #endif
4087 #ifdef VLAN_SUPPORT
4088 #endif
4089 #ifdef VLAN_SUPPORT
4090 #endif
4091 #ifdef VLAN_SUPPORT
4092 #endif
4093 #ifdef VLAN_SUPPORT
4094 #endif
4095 #ifdef CONFIG_PM
4096 #endif
4097 #ifdef CONFIG_PM
4098 #endif
4099 #ifdef MODULE
4100 #endif
4101 /* LDV_COMMENT_END_PREP */
4102 /* content: static void starfire_remove_one(struct pci_dev *pdev)*/
4103 /* LDV_COMMENT_BEGIN_PREP */
4104 #define DRV_NAME "starfire"
4105 #define DRV_VERSION "2.1"
4106 #define DRV_RELDATE "July 6, 2008"
4107 #define HAS_BROKEN_FIRMWARE
4108 #ifdef HAS_BROKEN_FIRMWARE
4109 #define PADDING_MASK 3
4110 #endif
4111 #define ZEROCOPY
4112 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4113 #define VLAN_SUPPORT
4114 #endif
4115 #define PKT_BUF_SZ 1536
4116 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4117 #else
4118 #endif
4119 #ifdef __sparc__
4120 #define DMA_BURST_SIZE 64
4121 #else
4122 #define DMA_BURST_SIZE 128
4123 #endif
4124 #define RX_RING_SIZE 256
4125 #define TX_RING_SIZE 32
4126 #define DONE_Q_SIZE 1024
4127 #define QUEUE_ALIGN 256
4128 #if RX_RING_SIZE > 256
4129 #define RX_Q_ENTRIES Rx2048QEntries
4130 #else
4131 #define RX_Q_ENTRIES Rx256QEntries
4132 #endif
4133 #define TX_TIMEOUT (2 * HZ)
4134 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4135 #define ADDR_64BITS
4136 #define netdrv_addr_t __le64
4137 #define cpu_to_dma(x) cpu_to_le64(x)
4138 #define dma_to_cpu(x) le64_to_cpu(x)
4139 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4140 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4141 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4142 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4143 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4144 #else
4145 #define netdrv_addr_t __le32
4146 #define cpu_to_dma(x) cpu_to_le32(x)
4147 #define dma_to_cpu(x) le32_to_cpu(x)
4148 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4149 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4150 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4151 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4152 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4153 #endif
4154 #define skb_first_frag_len(skb) skb_headlen(skb)
4155 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4156 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4157 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4158 #ifdef VLAN_SUPPORT
4159 #define RxComplType RxComplType3
4160 #else
4161 #define RxComplType RxComplType2
4162 #endif
4163 #ifdef ADDR_64BITS
4164 #define TX_DESC_TYPE TxDescType2
4165 #else
4166 #define TX_DESC_TYPE TxDescType1
4167 #endif
4168 #define TX_DESC_SPACING TxDescSpaceUnlim
4169 #if 0
4170 #endif
4171 #define PHY_CNT 2
4172 #ifdef VLAN_SUPPORT
4173 #endif
4174 #ifdef VLAN_SUPPORT
4175 #endif
4176 #ifdef VLAN_SUPPORT
4177 #endif
4178 #ifndef MODULE
4179 #endif
4180 #ifdef ZEROCOPY
4181 #endif
4182 #ifdef VLAN_SUPPORT
4183 #endif
4184 #ifdef ADDR_64BITS
4185 #endif
4186 #if ! defined(final_version)
4187 #endif
4188 #ifdef VLAN_SUPPORT
4189 #endif
4190 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4191 #endif
4192 #ifndef final_version
4193 #endif
4194 #ifdef VLAN_SUPPORT
4195 #endif
4196 #ifdef VLAN_SUPPORT
4197 #endif
4198 #ifdef VLAN_SUPPORT
4199 #endif
4200 #ifdef VLAN_SUPPORT
4201 #endif
4202 #ifdef CONFIG_PM
4203 #endif
4204 /* LDV_COMMENT_END_PREP */
4205 /* LDV_COMMENT_BEGIN_PREP */
4206 #ifdef CONFIG_PM
4207 #endif
4208 #ifdef MODULE
4209 #endif
4210 /* LDV_COMMENT_END_PREP */
4211 /* content: static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)*/
4212 /* LDV_COMMENT_BEGIN_PREP */
4213 #define DRV_NAME "starfire"
4214 #define DRV_VERSION "2.1"
4215 #define DRV_RELDATE "July 6, 2008"
4216 #define HAS_BROKEN_FIRMWARE
4217 #ifdef HAS_BROKEN_FIRMWARE
4218 #define PADDING_MASK 3
4219 #endif
4220 #define ZEROCOPY
4221 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4222 #define VLAN_SUPPORT
4223 #endif
4224 #define PKT_BUF_SZ 1536
4225 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4226 #else
4227 #endif
4228 #ifdef __sparc__
4229 #define DMA_BURST_SIZE 64
4230 #else
4231 #define DMA_BURST_SIZE 128
4232 #endif
4233 #define RX_RING_SIZE 256
4234 #define TX_RING_SIZE 32
4235 #define DONE_Q_SIZE 1024
4236 #define QUEUE_ALIGN 256
4237 #if RX_RING_SIZE > 256
4238 #define RX_Q_ENTRIES Rx2048QEntries
4239 #else
4240 #define RX_Q_ENTRIES Rx256QEntries
4241 #endif
4242 #define TX_TIMEOUT (2 * HZ)
4243 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4244 #define ADDR_64BITS
4245 #define netdrv_addr_t __le64
4246 #define cpu_to_dma(x) cpu_to_le64(x)
4247 #define dma_to_cpu(x) le64_to_cpu(x)
4248 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4249 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4250 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4251 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4252 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4253 #else
4254 #define netdrv_addr_t __le32
4255 #define cpu_to_dma(x) cpu_to_le32(x)
4256 #define dma_to_cpu(x) le32_to_cpu(x)
4257 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4258 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4259 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4260 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4261 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4262 #endif
4263 #define skb_first_frag_len(skb) skb_headlen(skb)
4264 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4265 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4266 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4267 #ifdef VLAN_SUPPORT
4268 #define RxComplType RxComplType3
4269 #else
4270 #define RxComplType RxComplType2
4271 #endif
4272 #ifdef ADDR_64BITS
4273 #define TX_DESC_TYPE TxDescType2
4274 #else
4275 #define TX_DESC_TYPE TxDescType1
4276 #endif
4277 #define TX_DESC_SPACING TxDescSpaceUnlim
4278 #if 0
4279 #endif
4280 #define PHY_CNT 2
4281 #ifdef VLAN_SUPPORT
4282 #endif
4283 #ifdef VLAN_SUPPORT
4284 #endif
4285 #ifdef VLAN_SUPPORT
4286 #endif
4287 #ifndef MODULE
4288 #endif
4289 #ifdef ZEROCOPY
4290 #endif
4291 #ifdef VLAN_SUPPORT
4292 #endif
4293 #ifdef ADDR_64BITS
4294 #endif
4295 #if ! defined(final_version)
4296 #endif
4297 #ifdef VLAN_SUPPORT
4298 #endif
4299 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4300 #endif
4301 #ifndef final_version
4302 #endif
4303 #ifdef VLAN_SUPPORT
4304 #endif
4305 #ifdef VLAN_SUPPORT
4306 #endif
4307 #ifdef VLAN_SUPPORT
4308 #endif
4309 #ifdef VLAN_SUPPORT
4310 #endif
4311 #ifdef CONFIG_PM
4312 /* LDV_COMMENT_END_PREP */
4313 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "starfire_suspend" */
4314 pm_message_t var_starfire_suspend_29_p1;
4315 /* LDV_COMMENT_BEGIN_PREP */
4316 #endif
4317 #ifdef CONFIG_PM
4318 #endif
4319 #ifdef MODULE
4320 #endif
4321 /* LDV_COMMENT_END_PREP */
4322 /* content: static int starfire_resume(struct pci_dev *pdev)*/
4323 /* LDV_COMMENT_BEGIN_PREP */
4324 #define DRV_NAME "starfire"
4325 #define DRV_VERSION "2.1"
4326 #define DRV_RELDATE "July 6, 2008"
4327 #define HAS_BROKEN_FIRMWARE
4328 #ifdef HAS_BROKEN_FIRMWARE
4329 #define PADDING_MASK 3
4330 #endif
4331 #define ZEROCOPY
4332 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4333 #define VLAN_SUPPORT
4334 #endif
4335 #define PKT_BUF_SZ 1536
4336 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4337 #else
4338 #endif
4339 #ifdef __sparc__
4340 #define DMA_BURST_SIZE 64
4341 #else
4342 #define DMA_BURST_SIZE 128
4343 #endif
4344 #define RX_RING_SIZE 256
4345 #define TX_RING_SIZE 32
4346 #define DONE_Q_SIZE 1024
4347 #define QUEUE_ALIGN 256
4348 #if RX_RING_SIZE > 256
4349 #define RX_Q_ENTRIES Rx2048QEntries
4350 #else
4351 #define RX_Q_ENTRIES Rx256QEntries
4352 #endif
4353 #define TX_TIMEOUT (2 * HZ)
4354 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4355 #define ADDR_64BITS
4356 #define netdrv_addr_t __le64
4357 #define cpu_to_dma(x) cpu_to_le64(x)
4358 #define dma_to_cpu(x) le64_to_cpu(x)
4359 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4360 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4361 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4362 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4363 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4364 #else
4365 #define netdrv_addr_t __le32
4366 #define cpu_to_dma(x) cpu_to_le32(x)
4367 #define dma_to_cpu(x) le32_to_cpu(x)
4368 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4369 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4370 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4371 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4372 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4373 #endif
4374 #define skb_first_frag_len(skb) skb_headlen(skb)
4375 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4376 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4377 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4378 #ifdef VLAN_SUPPORT
4379 #define RxComplType RxComplType3
4380 #else
4381 #define RxComplType RxComplType2
4382 #endif
4383 #ifdef ADDR_64BITS
4384 #define TX_DESC_TYPE TxDescType2
4385 #else
4386 #define TX_DESC_TYPE TxDescType1
4387 #endif
4388 #define TX_DESC_SPACING TxDescSpaceUnlim
4389 #if 0
4390 #endif
4391 #define PHY_CNT 2
4392 #ifdef VLAN_SUPPORT
4393 #endif
4394 #ifdef VLAN_SUPPORT
4395 #endif
4396 #ifdef VLAN_SUPPORT
4397 #endif
4398 #ifndef MODULE
4399 #endif
4400 #ifdef ZEROCOPY
4401 #endif
4402 #ifdef VLAN_SUPPORT
4403 #endif
4404 #ifdef ADDR_64BITS
4405 #endif
4406 #if ! defined(final_version)
4407 #endif
4408 #ifdef VLAN_SUPPORT
4409 #endif
4410 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4411 #endif
4412 #ifndef final_version
4413 #endif
4414 #ifdef VLAN_SUPPORT
4415 #endif
4416 #ifdef VLAN_SUPPORT
4417 #endif
4418 #ifdef VLAN_SUPPORT
4419 #endif
4420 #ifdef VLAN_SUPPORT
4421 #endif
4422 #ifdef CONFIG_PM
4423 /* LDV_COMMENT_END_PREP */
4424 /* LDV_COMMENT_BEGIN_PREP */
4425 #endif
4426 #ifdef CONFIG_PM
4427 #endif
4428 #ifdef MODULE
4429 #endif
4430 /* LDV_COMMENT_END_PREP */
4431
4432 /** CALLBACK SECTION request_irq **/
4433 /* content: static irqreturn_t intr_handler(int irq, void *dev_instance)*/
4434 /* LDV_COMMENT_BEGIN_PREP */
4435 #define DRV_NAME "starfire"
4436 #define DRV_VERSION "2.1"
4437 #define DRV_RELDATE "July 6, 2008"
4438 #define HAS_BROKEN_FIRMWARE
4439 #ifdef HAS_BROKEN_FIRMWARE
4440 #define PADDING_MASK 3
4441 #endif
4442 #define ZEROCOPY
4443 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4444 #define VLAN_SUPPORT
4445 #endif
4446 #define PKT_BUF_SZ 1536
4447 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4448 #else
4449 #endif
4450 #ifdef __sparc__
4451 #define DMA_BURST_SIZE 64
4452 #else
4453 #define DMA_BURST_SIZE 128
4454 #endif
4455 #define RX_RING_SIZE 256
4456 #define TX_RING_SIZE 32
4457 #define DONE_Q_SIZE 1024
4458 #define QUEUE_ALIGN 256
4459 #if RX_RING_SIZE > 256
4460 #define RX_Q_ENTRIES Rx2048QEntries
4461 #else
4462 #define RX_Q_ENTRIES Rx256QEntries
4463 #endif
4464 #define TX_TIMEOUT (2 * HZ)
4465 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4466 #define ADDR_64BITS
4467 #define netdrv_addr_t __le64
4468 #define cpu_to_dma(x) cpu_to_le64(x)
4469 #define dma_to_cpu(x) le64_to_cpu(x)
4470 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4471 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4472 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4473 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4474 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4475 #else
4476 #define netdrv_addr_t __le32
4477 #define cpu_to_dma(x) cpu_to_le32(x)
4478 #define dma_to_cpu(x) le32_to_cpu(x)
4479 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4480 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4481 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4482 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4483 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4484 #endif
4485 #define skb_first_frag_len(skb) skb_headlen(skb)
4486 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4487 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4488 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4489 #ifdef VLAN_SUPPORT
4490 #define RxComplType RxComplType3
4491 #else
4492 #define RxComplType RxComplType2
4493 #endif
4494 #ifdef ADDR_64BITS
4495 #define TX_DESC_TYPE TxDescType2
4496 #else
4497 #define TX_DESC_TYPE TxDescType1
4498 #endif
4499 #define TX_DESC_SPACING TxDescSpaceUnlim
4500 #if 0
4501 #endif
4502 #define PHY_CNT 2
4503 #ifdef VLAN_SUPPORT
4504 #endif
4505 #ifdef VLAN_SUPPORT
4506 #endif
4507 #ifdef VLAN_SUPPORT
4508 #endif
4509 #ifndef MODULE
4510 #endif
4511 #ifdef ZEROCOPY
4512 #endif
4513 #ifdef VLAN_SUPPORT
4514 #endif
4515 #ifdef ADDR_64BITS
4516 #endif
4517 #if ! defined(final_version)
4518 #endif
4519 #ifdef VLAN_SUPPORT
4520 #endif
4521 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4522 #endif
4523 /* LDV_COMMENT_END_PREP */
4524 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "intr_handler" */
4525 int var_intr_handler_10_p0;
4526 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "intr_handler" */
4527 void * var_intr_handler_10_p1;
4528 /* LDV_COMMENT_BEGIN_PREP */
4529 #ifndef final_version
4530 #endif
4531 #ifdef VLAN_SUPPORT
4532 #endif
4533 #ifdef VLAN_SUPPORT
4534 #endif
4535 #ifdef VLAN_SUPPORT
4536 #endif
4537 #ifdef VLAN_SUPPORT
4538 #endif
4539 #ifdef CONFIG_PM
4540 #endif
4541 #ifdef CONFIG_PM
4542 #endif
4543 #ifdef MODULE
4544 #endif
4545 /* LDV_COMMENT_END_PREP */
4546
4547
4548
4549
4550 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
4551 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
4552 /*============================= VARIABLE INITIALIZING PART =============================*/
4553 LDV_IN_INTERRUPT=1;
4554
4555
4556
4557
4558 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
4559 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
4560 /*============================= FUNCTION CALL SECTION =============================*/
4561 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
4562 ldv_initialize();
4563
4564 /** INIT: init_type: ST_MODULE_INIT **/
4565 /* content: static int __init starfire_init (void)*/
4566 /* LDV_COMMENT_BEGIN_PREP */
4567 #define DRV_NAME "starfire"
4568 #define DRV_VERSION "2.1"
4569 #define DRV_RELDATE "July 6, 2008"
4570 #define HAS_BROKEN_FIRMWARE
4571 #ifdef HAS_BROKEN_FIRMWARE
4572 #define PADDING_MASK 3
4573 #endif
4574 #define ZEROCOPY
4575 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4576 #define VLAN_SUPPORT
4577 #endif
4578 #define PKT_BUF_SZ 1536
4579 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4580 #else
4581 #endif
4582 #ifdef __sparc__
4583 #define DMA_BURST_SIZE 64
4584 #else
4585 #define DMA_BURST_SIZE 128
4586 #endif
4587 #define RX_RING_SIZE 256
4588 #define TX_RING_SIZE 32
4589 #define DONE_Q_SIZE 1024
4590 #define QUEUE_ALIGN 256
4591 #if RX_RING_SIZE > 256
4592 #define RX_Q_ENTRIES Rx2048QEntries
4593 #else
4594 #define RX_Q_ENTRIES Rx256QEntries
4595 #endif
4596 #define TX_TIMEOUT (2 * HZ)
4597 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4598 #define ADDR_64BITS
4599 #define netdrv_addr_t __le64
4600 #define cpu_to_dma(x) cpu_to_le64(x)
4601 #define dma_to_cpu(x) le64_to_cpu(x)
4602 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4603 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4604 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4605 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4606 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4607 #else
4608 #define netdrv_addr_t __le32
4609 #define cpu_to_dma(x) cpu_to_le32(x)
4610 #define dma_to_cpu(x) le32_to_cpu(x)
4611 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4612 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4613 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4614 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4615 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4616 #endif
4617 #define skb_first_frag_len(skb) skb_headlen(skb)
4618 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4619 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4620 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4621 #ifdef VLAN_SUPPORT
4622 #define RxComplType RxComplType3
4623 #else
4624 #define RxComplType RxComplType2
4625 #endif
4626 #ifdef ADDR_64BITS
4627 #define TX_DESC_TYPE TxDescType2
4628 #else
4629 #define TX_DESC_TYPE TxDescType1
4630 #endif
4631 #define TX_DESC_SPACING TxDescSpaceUnlim
4632 #if 0
4633 #endif
4634 #define PHY_CNT 2
4635 #ifdef VLAN_SUPPORT
4636 #endif
4637 #ifdef VLAN_SUPPORT
4638 #endif
4639 #ifdef VLAN_SUPPORT
4640 #endif
4641 #ifndef MODULE
4642 #endif
4643 #ifdef ZEROCOPY
4644 #endif
4645 #ifdef VLAN_SUPPORT
4646 #endif
4647 #ifdef ADDR_64BITS
4648 #endif
4649 #if ! defined(final_version)
4650 #endif
4651 #ifdef VLAN_SUPPORT
4652 #endif
4653 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4654 #endif
4655 #ifndef final_version
4656 #endif
4657 #ifdef VLAN_SUPPORT
4658 #endif
4659 #ifdef VLAN_SUPPORT
4660 #endif
4661 #ifdef VLAN_SUPPORT
4662 #endif
4663 #ifdef VLAN_SUPPORT
4664 #endif
4665 #ifdef CONFIG_PM
4666 #endif
4667 #ifdef CONFIG_PM
4668 #endif
4669 /* LDV_COMMENT_END_PREP */
4670 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
4671 ldv_handler_precall();
4672 if(starfire_init())
4673 goto ldv_final;
4674 int ldv_s_netdev_ops_net_device_ops = 0;
4675
4676
4677
4678
4679 int ldv_s_starfire_driver_pci_driver = 0;
4680
4681
4682
4683
4684
4685 while( nondet_int()
4686 || !(ldv_s_netdev_ops_net_device_ops == 0)
4687 || !(ldv_s_starfire_driver_pci_driver == 0)
4688 ) {
4689
4690 switch(nondet_int()) {
4691
4692 case 0: {
4693
4694 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
4695 if(ldv_s_netdev_ops_net_device_ops==0) {
4696
4697 /* content: static int netdev_open(struct net_device *dev)*/
4698 /* LDV_COMMENT_BEGIN_PREP */
4699 #define DRV_NAME "starfire"
4700 #define DRV_VERSION "2.1"
4701 #define DRV_RELDATE "July 6, 2008"
4702 #define HAS_BROKEN_FIRMWARE
4703 #ifdef HAS_BROKEN_FIRMWARE
4704 #define PADDING_MASK 3
4705 #endif
4706 #define ZEROCOPY
4707 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4708 #define VLAN_SUPPORT
4709 #endif
4710 #define PKT_BUF_SZ 1536
4711 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4712 #else
4713 #endif
4714 #ifdef __sparc__
4715 #define DMA_BURST_SIZE 64
4716 #else
4717 #define DMA_BURST_SIZE 128
4718 #endif
4719 #define RX_RING_SIZE 256
4720 #define TX_RING_SIZE 32
4721 #define DONE_Q_SIZE 1024
4722 #define QUEUE_ALIGN 256
4723 #if RX_RING_SIZE > 256
4724 #define RX_Q_ENTRIES Rx2048QEntries
4725 #else
4726 #define RX_Q_ENTRIES Rx256QEntries
4727 #endif
4728 #define TX_TIMEOUT (2 * HZ)
4729 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4730 #define ADDR_64BITS
4731 #define netdrv_addr_t __le64
4732 #define cpu_to_dma(x) cpu_to_le64(x)
4733 #define dma_to_cpu(x) le64_to_cpu(x)
4734 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4735 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4736 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4737 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4738 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4739 #else
4740 #define netdrv_addr_t __le32
4741 #define cpu_to_dma(x) cpu_to_le32(x)
4742 #define dma_to_cpu(x) le32_to_cpu(x)
4743 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4744 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4745 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4746 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4747 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4748 #endif
4749 #define skb_first_frag_len(skb) skb_headlen(skb)
4750 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4751 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4752 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4753 #ifdef VLAN_SUPPORT
4754 #define RxComplType RxComplType3
4755 #else
4756 #define RxComplType RxComplType2
4757 #endif
4758 #ifdef ADDR_64BITS
4759 #define TX_DESC_TYPE TxDescType2
4760 #else
4761 #define TX_DESC_TYPE TxDescType1
4762 #endif
4763 #define TX_DESC_SPACING TxDescSpaceUnlim
4764 #if 0
4765 #endif
4766 #define PHY_CNT 2
4767 #ifdef VLAN_SUPPORT
4768 #endif
4769 #ifdef VLAN_SUPPORT
4770 #endif
4771 #ifdef VLAN_SUPPORT
4772 #endif
4773 #ifndef MODULE
4774 #endif
4775 #ifdef ZEROCOPY
4776 #endif
4777 #ifdef VLAN_SUPPORT
4778 #endif
4779 #ifdef ADDR_64BITS
4780 #endif
4781 #if ! defined(final_version)
4782 #endif
4783 /* LDV_COMMENT_END_PREP */
4784 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "netdev_ops". Standart function test for correct return result. */
4785 ldv_handler_precall();
4786 res_netdev_open_5 = netdev_open( var_group1);
4787 ldv_check_return_value(res_netdev_open_5);
4788 if(res_netdev_open_5 < 0)
4789 goto ldv_module_exit;
4790 /* LDV_COMMENT_BEGIN_PREP */
4791 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4792 #endif
4793 #ifndef final_version
4794 #endif
4795 #ifdef VLAN_SUPPORT
4796 #endif
4797 #ifdef VLAN_SUPPORT
4798 #endif
4799 #ifdef VLAN_SUPPORT
4800 #endif
4801 #ifdef VLAN_SUPPORT
4802 #endif
4803 #ifdef CONFIG_PM
4804 #endif
4805 #ifdef CONFIG_PM
4806 #endif
4807 #ifdef MODULE
4808 #endif
4809 /* LDV_COMMENT_END_PREP */
4810 ldv_s_netdev_ops_net_device_ops++;
4811
4812 }
4813
4814 }
4815
4816 break;
4817 case 1: {
4818
4819 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
4820 if(ldv_s_netdev_ops_net_device_ops==1) {
4821
4822 /* content: static int netdev_close(struct net_device *dev)*/
4823 /* LDV_COMMENT_BEGIN_PREP */
4824 #define DRV_NAME "starfire"
4825 #define DRV_VERSION "2.1"
4826 #define DRV_RELDATE "July 6, 2008"
4827 #define HAS_BROKEN_FIRMWARE
4828 #ifdef HAS_BROKEN_FIRMWARE
4829 #define PADDING_MASK 3
4830 #endif
4831 #define ZEROCOPY
4832 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4833 #define VLAN_SUPPORT
4834 #endif
4835 #define PKT_BUF_SZ 1536
4836 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4837 #else
4838 #endif
4839 #ifdef __sparc__
4840 #define DMA_BURST_SIZE 64
4841 #else
4842 #define DMA_BURST_SIZE 128
4843 #endif
4844 #define RX_RING_SIZE 256
4845 #define TX_RING_SIZE 32
4846 #define DONE_Q_SIZE 1024
4847 #define QUEUE_ALIGN 256
4848 #if RX_RING_SIZE > 256
4849 #define RX_Q_ENTRIES Rx2048QEntries
4850 #else
4851 #define RX_Q_ENTRIES Rx256QEntries
4852 #endif
4853 #define TX_TIMEOUT (2 * HZ)
4854 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4855 #define ADDR_64BITS
4856 #define netdrv_addr_t __le64
4857 #define cpu_to_dma(x) cpu_to_le64(x)
4858 #define dma_to_cpu(x) le64_to_cpu(x)
4859 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4860 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4861 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4862 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4863 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4864 #else
4865 #define netdrv_addr_t __le32
4866 #define cpu_to_dma(x) cpu_to_le32(x)
4867 #define dma_to_cpu(x) le32_to_cpu(x)
4868 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4869 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4870 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4871 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4872 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
4873 #endif
4874 #define skb_first_frag_len(skb) skb_headlen(skb)
4875 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
4876 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
4877 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
4878 #ifdef VLAN_SUPPORT
4879 #define RxComplType RxComplType3
4880 #else
4881 #define RxComplType RxComplType2
4882 #endif
4883 #ifdef ADDR_64BITS
4884 #define TX_DESC_TYPE TxDescType2
4885 #else
4886 #define TX_DESC_TYPE TxDescType1
4887 #endif
4888 #define TX_DESC_SPACING TxDescSpaceUnlim
4889 #if 0
4890 #endif
4891 #define PHY_CNT 2
4892 #ifdef VLAN_SUPPORT
4893 #endif
4894 #ifdef VLAN_SUPPORT
4895 #endif
4896 #ifdef VLAN_SUPPORT
4897 #endif
4898 #ifndef MODULE
4899 #endif
4900 #ifdef ZEROCOPY
4901 #endif
4902 #ifdef VLAN_SUPPORT
4903 #endif
4904 #ifdef ADDR_64BITS
4905 #endif
4906 #if ! defined(final_version)
4907 #endif
4908 #ifdef VLAN_SUPPORT
4909 #endif
4910 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
4911 #endif
4912 #ifndef final_version
4913 #endif
4914 #ifdef VLAN_SUPPORT
4915 #endif
4916 #ifdef VLAN_SUPPORT
4917 #endif
4918 #ifdef VLAN_SUPPORT
4919 #endif
4920 #ifdef VLAN_SUPPORT
4921 #endif
4922 /* LDV_COMMENT_END_PREP */
4923 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "netdev_ops". Standart function test for correct return result. */
4924 ldv_handler_precall();
4925 res_netdev_close_28 = netdev_close( var_group1);
4926 ldv_check_return_value(res_netdev_close_28);
4927 if(res_netdev_close_28)
4928 goto ldv_module_exit;
4929 /* LDV_COMMENT_BEGIN_PREP */
4930 #ifdef CONFIG_PM
4931 #endif
4932 #ifdef CONFIG_PM
4933 #endif
4934 #ifdef MODULE
4935 #endif
4936 /* LDV_COMMENT_END_PREP */
4937 ldv_s_netdev_ops_net_device_ops=0;
4938
4939 }
4940
4941 }
4942
4943 break;
4944 case 2: {
4945
4946 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
4947
4948
4949 /* content: static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)*/
4950 /* LDV_COMMENT_BEGIN_PREP */
4951 #define DRV_NAME "starfire"
4952 #define DRV_VERSION "2.1"
4953 #define DRV_RELDATE "July 6, 2008"
4954 #define HAS_BROKEN_FIRMWARE
4955 #ifdef HAS_BROKEN_FIRMWARE
4956 #define PADDING_MASK 3
4957 #endif
4958 #define ZEROCOPY
4959 #if IS_ENABLED(CONFIG_VLAN_8021Q)
4960 #define VLAN_SUPPORT
4961 #endif
4962 #define PKT_BUF_SZ 1536
4963 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
4964 #else
4965 #endif
4966 #ifdef __sparc__
4967 #define DMA_BURST_SIZE 64
4968 #else
4969 #define DMA_BURST_SIZE 128
4970 #endif
4971 #define RX_RING_SIZE 256
4972 #define TX_RING_SIZE 32
4973 #define DONE_Q_SIZE 1024
4974 #define QUEUE_ALIGN 256
4975 #if RX_RING_SIZE > 256
4976 #define RX_Q_ENTRIES Rx2048QEntries
4977 #else
4978 #define RX_Q_ENTRIES Rx256QEntries
4979 #endif
4980 #define TX_TIMEOUT (2 * HZ)
4981 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4982 #define ADDR_64BITS
4983 #define netdrv_addr_t __le64
4984 #define cpu_to_dma(x) cpu_to_le64(x)
4985 #define dma_to_cpu(x) le64_to_cpu(x)
4986 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
4987 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
4988 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
4989 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
4990 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
4991 #else
4992 #define netdrv_addr_t __le32
4993 #define cpu_to_dma(x) cpu_to_le32(x)
4994 #define dma_to_cpu(x) le32_to_cpu(x)
4995 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
4996 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
4997 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
4998 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
4999 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5000 #endif
5001 #define skb_first_frag_len(skb) skb_headlen(skb)
5002 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5003 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5004 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5005 #ifdef VLAN_SUPPORT
5006 #define RxComplType RxComplType3
5007 #else
5008 #define RxComplType RxComplType2
5009 #endif
5010 #ifdef ADDR_64BITS
5011 #define TX_DESC_TYPE TxDescType2
5012 #else
5013 #define TX_DESC_TYPE TxDescType1
5014 #endif
5015 #define TX_DESC_SPACING TxDescSpaceUnlim
5016 #if 0
5017 #endif
5018 #define PHY_CNT 2
5019 #ifdef VLAN_SUPPORT
5020 #endif
5021 #ifdef VLAN_SUPPORT
5022 #endif
5023 #ifdef VLAN_SUPPORT
5024 #endif
5025 #ifndef MODULE
5026 #endif
5027 #ifdef ZEROCOPY
5028 #endif
5029 #ifdef VLAN_SUPPORT
5030 #endif
5031 #ifdef ADDR_64BITS
5032 #endif
5033 #if ! defined(final_version)
5034 #endif
5035 #ifdef VLAN_SUPPORT
5036 #endif
5037 /* LDV_COMMENT_END_PREP */
5038 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "netdev_ops" */
5039 ldv_handler_precall();
5040 start_tx( var_group2, var_group1);
5041 /* LDV_COMMENT_BEGIN_PREP */
5042 #ifndef final_version
5043 #endif
5044 #ifdef VLAN_SUPPORT
5045 #endif
5046 #ifdef VLAN_SUPPORT
5047 #endif
5048 #ifdef VLAN_SUPPORT
5049 #endif
5050 #ifdef VLAN_SUPPORT
5051 #endif
5052 #ifdef CONFIG_PM
5053 #endif
5054 #ifdef CONFIG_PM
5055 #endif
5056 #ifdef MODULE
5057 #endif
5058 /* LDV_COMMENT_END_PREP */
5059
5060
5061
5062
5063 }
5064
5065 break;
5066 case 3: {
5067
5068 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5069
5070
5071 /* content: static void tx_timeout(struct net_device *dev)*/
5072 /* LDV_COMMENT_BEGIN_PREP */
5073 #define DRV_NAME "starfire"
5074 #define DRV_VERSION "2.1"
5075 #define DRV_RELDATE "July 6, 2008"
5076 #define HAS_BROKEN_FIRMWARE
5077 #ifdef HAS_BROKEN_FIRMWARE
5078 #define PADDING_MASK 3
5079 #endif
5080 #define ZEROCOPY
5081 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5082 #define VLAN_SUPPORT
5083 #endif
5084 #define PKT_BUF_SZ 1536
5085 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5086 #else
5087 #endif
5088 #ifdef __sparc__
5089 #define DMA_BURST_SIZE 64
5090 #else
5091 #define DMA_BURST_SIZE 128
5092 #endif
5093 #define RX_RING_SIZE 256
5094 #define TX_RING_SIZE 32
5095 #define DONE_Q_SIZE 1024
5096 #define QUEUE_ALIGN 256
5097 #if RX_RING_SIZE > 256
5098 #define RX_Q_ENTRIES Rx2048QEntries
5099 #else
5100 #define RX_Q_ENTRIES Rx256QEntries
5101 #endif
5102 #define TX_TIMEOUT (2 * HZ)
5103 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5104 #define ADDR_64BITS
5105 #define netdrv_addr_t __le64
5106 #define cpu_to_dma(x) cpu_to_le64(x)
5107 #define dma_to_cpu(x) le64_to_cpu(x)
5108 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5109 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5110 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5111 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5112 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5113 #else
5114 #define netdrv_addr_t __le32
5115 #define cpu_to_dma(x) cpu_to_le32(x)
5116 #define dma_to_cpu(x) le32_to_cpu(x)
5117 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5118 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5119 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5120 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5121 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5122 #endif
5123 #define skb_first_frag_len(skb) skb_headlen(skb)
5124 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5125 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5126 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5127 #ifdef VLAN_SUPPORT
5128 #define RxComplType RxComplType3
5129 #else
5130 #define RxComplType RxComplType2
5131 #endif
5132 #ifdef ADDR_64BITS
5133 #define TX_DESC_TYPE TxDescType2
5134 #else
5135 #define TX_DESC_TYPE TxDescType1
5136 #endif
5137 #define TX_DESC_SPACING TxDescSpaceUnlim
5138 #if 0
5139 #endif
5140 #define PHY_CNT 2
5141 #ifdef VLAN_SUPPORT
5142 #endif
5143 #ifdef VLAN_SUPPORT
5144 #endif
5145 #ifdef VLAN_SUPPORT
5146 #endif
5147 #ifndef MODULE
5148 #endif
5149 #ifdef ZEROCOPY
5150 #endif
5151 #ifdef VLAN_SUPPORT
5152 #endif
5153 #ifdef ADDR_64BITS
5154 #endif
5155 #if ! defined(final_version)
5156 #endif
5157 #ifdef VLAN_SUPPORT
5158 #endif
5159 /* LDV_COMMENT_END_PREP */
5160 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "netdev_ops" */
5161 ldv_handler_precall();
5162 tx_timeout( var_group1);
5163 /* LDV_COMMENT_BEGIN_PREP */
5164 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5165 #endif
5166 #ifndef final_version
5167 #endif
5168 #ifdef VLAN_SUPPORT
5169 #endif
5170 #ifdef VLAN_SUPPORT
5171 #endif
5172 #ifdef VLAN_SUPPORT
5173 #endif
5174 #ifdef VLAN_SUPPORT
5175 #endif
5176 #ifdef CONFIG_PM
5177 #endif
5178 #ifdef CONFIG_PM
5179 #endif
5180 #ifdef MODULE
5181 #endif
5182 /* LDV_COMMENT_END_PREP */
5183
5184
5185
5186
5187 }
5188
5189 break;
5190 case 4: {
5191
5192 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5193
5194
5195 /* content: static struct net_device_stats *get_stats(struct net_device *dev)*/
5196 /* LDV_COMMENT_BEGIN_PREP */
5197 #define DRV_NAME "starfire"
5198 #define DRV_VERSION "2.1"
5199 #define DRV_RELDATE "July 6, 2008"
5200 #define HAS_BROKEN_FIRMWARE
5201 #ifdef HAS_BROKEN_FIRMWARE
5202 #define PADDING_MASK 3
5203 #endif
5204 #define ZEROCOPY
5205 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5206 #define VLAN_SUPPORT
5207 #endif
5208 #define PKT_BUF_SZ 1536
5209 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5210 #else
5211 #endif
5212 #ifdef __sparc__
5213 #define DMA_BURST_SIZE 64
5214 #else
5215 #define DMA_BURST_SIZE 128
5216 #endif
5217 #define RX_RING_SIZE 256
5218 #define TX_RING_SIZE 32
5219 #define DONE_Q_SIZE 1024
5220 #define QUEUE_ALIGN 256
5221 #if RX_RING_SIZE > 256
5222 #define RX_Q_ENTRIES Rx2048QEntries
5223 #else
5224 #define RX_Q_ENTRIES Rx256QEntries
5225 #endif
5226 #define TX_TIMEOUT (2 * HZ)
5227 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5228 #define ADDR_64BITS
5229 #define netdrv_addr_t __le64
5230 #define cpu_to_dma(x) cpu_to_le64(x)
5231 #define dma_to_cpu(x) le64_to_cpu(x)
5232 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5233 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5234 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5235 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5236 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5237 #else
5238 #define netdrv_addr_t __le32
5239 #define cpu_to_dma(x) cpu_to_le32(x)
5240 #define dma_to_cpu(x) le32_to_cpu(x)
5241 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5242 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5243 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5244 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5245 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5246 #endif
5247 #define skb_first_frag_len(skb) skb_headlen(skb)
5248 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5249 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5250 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5251 #ifdef VLAN_SUPPORT
5252 #define RxComplType RxComplType3
5253 #else
5254 #define RxComplType RxComplType2
5255 #endif
5256 #ifdef ADDR_64BITS
5257 #define TX_DESC_TYPE TxDescType2
5258 #else
5259 #define TX_DESC_TYPE TxDescType1
5260 #endif
5261 #define TX_DESC_SPACING TxDescSpaceUnlim
5262 #if 0
5263 #endif
5264 #define PHY_CNT 2
5265 #ifdef VLAN_SUPPORT
5266 #endif
5267 #ifdef VLAN_SUPPORT
5268 #endif
5269 #ifdef VLAN_SUPPORT
5270 #endif
5271 #ifndef MODULE
5272 #endif
5273 #ifdef ZEROCOPY
5274 #endif
5275 #ifdef VLAN_SUPPORT
5276 #endif
5277 #ifdef ADDR_64BITS
5278 #endif
5279 #if ! defined(final_version)
5280 #endif
5281 #ifdef VLAN_SUPPORT
5282 #endif
5283 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5284 #endif
5285 #ifndef final_version
5286 #endif
5287 #ifdef VLAN_SUPPORT
5288 #endif
5289 #ifdef VLAN_SUPPORT
5290 #endif
5291 /* LDV_COMMENT_END_PREP */
5292 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "netdev_ops" */
5293 ldv_handler_precall();
5294 get_stats( var_group1);
5295 /* LDV_COMMENT_BEGIN_PREP */
5296 #ifdef VLAN_SUPPORT
5297 #endif
5298 #ifdef VLAN_SUPPORT
5299 #endif
5300 #ifdef CONFIG_PM
5301 #endif
5302 #ifdef CONFIG_PM
5303 #endif
5304 #ifdef MODULE
5305 #endif
5306 /* LDV_COMMENT_END_PREP */
5307
5308
5309
5310
5311 }
5312
5313 break;
5314 case 5: {
5315
5316 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5317
5318
5319 /* content: static void set_rx_mode(struct net_device *dev)*/
5320 /* LDV_COMMENT_BEGIN_PREP */
5321 #define DRV_NAME "starfire"
5322 #define DRV_VERSION "2.1"
5323 #define DRV_RELDATE "July 6, 2008"
5324 #define HAS_BROKEN_FIRMWARE
5325 #ifdef HAS_BROKEN_FIRMWARE
5326 #define PADDING_MASK 3
5327 #endif
5328 #define ZEROCOPY
5329 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5330 #define VLAN_SUPPORT
5331 #endif
5332 #define PKT_BUF_SZ 1536
5333 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5334 #else
5335 #endif
5336 #ifdef __sparc__
5337 #define DMA_BURST_SIZE 64
5338 #else
5339 #define DMA_BURST_SIZE 128
5340 #endif
5341 #define RX_RING_SIZE 256
5342 #define TX_RING_SIZE 32
5343 #define DONE_Q_SIZE 1024
5344 #define QUEUE_ALIGN 256
5345 #if RX_RING_SIZE > 256
5346 #define RX_Q_ENTRIES Rx2048QEntries
5347 #else
5348 #define RX_Q_ENTRIES Rx256QEntries
5349 #endif
5350 #define TX_TIMEOUT (2 * HZ)
5351 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5352 #define ADDR_64BITS
5353 #define netdrv_addr_t __le64
5354 #define cpu_to_dma(x) cpu_to_le64(x)
5355 #define dma_to_cpu(x) le64_to_cpu(x)
5356 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5357 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5358 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5359 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5360 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5361 #else
5362 #define netdrv_addr_t __le32
5363 #define cpu_to_dma(x) cpu_to_le32(x)
5364 #define dma_to_cpu(x) le32_to_cpu(x)
5365 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5366 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5367 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5368 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5369 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5370 #endif
5371 #define skb_first_frag_len(skb) skb_headlen(skb)
5372 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5373 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5374 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5375 #ifdef VLAN_SUPPORT
5376 #define RxComplType RxComplType3
5377 #else
5378 #define RxComplType RxComplType2
5379 #endif
5380 #ifdef ADDR_64BITS
5381 #define TX_DESC_TYPE TxDescType2
5382 #else
5383 #define TX_DESC_TYPE TxDescType1
5384 #endif
5385 #define TX_DESC_SPACING TxDescSpaceUnlim
5386 #if 0
5387 #endif
5388 #define PHY_CNT 2
5389 #ifdef VLAN_SUPPORT
5390 #endif
5391 #ifdef VLAN_SUPPORT
5392 #endif
5393 #ifdef VLAN_SUPPORT
5394 #endif
5395 #ifndef MODULE
5396 #endif
5397 #ifdef ZEROCOPY
5398 #endif
5399 #ifdef VLAN_SUPPORT
5400 #endif
5401 #ifdef ADDR_64BITS
5402 #endif
5403 #if ! defined(final_version)
5404 #endif
5405 #ifdef VLAN_SUPPORT
5406 #endif
5407 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5408 #endif
5409 #ifndef final_version
5410 #endif
5411 #ifdef VLAN_SUPPORT
5412 #endif
5413 #ifdef VLAN_SUPPORT
5414 #endif
5415 #ifdef VLAN_SUPPORT
5416 #endif
5417 /* LDV_COMMENT_END_PREP */
5418 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "netdev_ops" */
5419 ldv_handler_precall();
5420 set_rx_mode( var_group1);
5421 /* LDV_COMMENT_BEGIN_PREP */
5422 #ifdef CONFIG_PM
5423 #endif
5424 #ifdef CONFIG_PM
5425 #endif
5426 #ifdef MODULE
5427 #endif
5428 /* LDV_COMMENT_END_PREP */
5429
5430
5431
5432
5433 }
5434
5435 break;
5436 case 6: {
5437
5438 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5439
5440
5441 /* content: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
5442 /* LDV_COMMENT_BEGIN_PREP */
5443 #define DRV_NAME "starfire"
5444 #define DRV_VERSION "2.1"
5445 #define DRV_RELDATE "July 6, 2008"
5446 #define HAS_BROKEN_FIRMWARE
5447 #ifdef HAS_BROKEN_FIRMWARE
5448 #define PADDING_MASK 3
5449 #endif
5450 #define ZEROCOPY
5451 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5452 #define VLAN_SUPPORT
5453 #endif
5454 #define PKT_BUF_SZ 1536
5455 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5456 #else
5457 #endif
5458 #ifdef __sparc__
5459 #define DMA_BURST_SIZE 64
5460 #else
5461 #define DMA_BURST_SIZE 128
5462 #endif
5463 #define RX_RING_SIZE 256
5464 #define TX_RING_SIZE 32
5465 #define DONE_Q_SIZE 1024
5466 #define QUEUE_ALIGN 256
5467 #if RX_RING_SIZE > 256
5468 #define RX_Q_ENTRIES Rx2048QEntries
5469 #else
5470 #define RX_Q_ENTRIES Rx256QEntries
5471 #endif
5472 #define TX_TIMEOUT (2 * HZ)
5473 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5474 #define ADDR_64BITS
5475 #define netdrv_addr_t __le64
5476 #define cpu_to_dma(x) cpu_to_le64(x)
5477 #define dma_to_cpu(x) le64_to_cpu(x)
5478 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5479 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5480 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5481 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5482 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5483 #else
5484 #define netdrv_addr_t __le32
5485 #define cpu_to_dma(x) cpu_to_le32(x)
5486 #define dma_to_cpu(x) le32_to_cpu(x)
5487 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5488 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5489 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5490 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5491 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5492 #endif
5493 #define skb_first_frag_len(skb) skb_headlen(skb)
5494 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5495 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5496 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5497 #ifdef VLAN_SUPPORT
5498 #define RxComplType RxComplType3
5499 #else
5500 #define RxComplType RxComplType2
5501 #endif
5502 #ifdef ADDR_64BITS
5503 #define TX_DESC_TYPE TxDescType2
5504 #else
5505 #define TX_DESC_TYPE TxDescType1
5506 #endif
5507 #define TX_DESC_SPACING TxDescSpaceUnlim
5508 #if 0
5509 #endif
5510 #define PHY_CNT 2
5511 #ifdef VLAN_SUPPORT
5512 #endif
5513 #ifdef VLAN_SUPPORT
5514 #endif
5515 #ifdef VLAN_SUPPORT
5516 #endif
5517 #ifndef MODULE
5518 #endif
5519 #ifdef ZEROCOPY
5520 #endif
5521 #ifdef VLAN_SUPPORT
5522 #endif
5523 #ifdef ADDR_64BITS
5524 #endif
5525 #if ! defined(final_version)
5526 #endif
5527 #ifdef VLAN_SUPPORT
5528 #endif
5529 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5530 #endif
5531 #ifndef final_version
5532 #endif
5533 #ifdef VLAN_SUPPORT
5534 #endif
5535 #ifdef VLAN_SUPPORT
5536 #endif
5537 #ifdef VLAN_SUPPORT
5538 #endif
5539 #ifdef VLAN_SUPPORT
5540 #endif
5541 /* LDV_COMMENT_END_PREP */
5542 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "netdev_ops" */
5543 ldv_handler_precall();
5544 netdev_ioctl( var_group1, var_group3, var_netdev_ioctl_27_p2);
5545 /* LDV_COMMENT_BEGIN_PREP */
5546 #ifdef CONFIG_PM
5547 #endif
5548 #ifdef CONFIG_PM
5549 #endif
5550 #ifdef MODULE
5551 #endif
5552 /* LDV_COMMENT_END_PREP */
5553
5554
5555
5556
5557 }
5558
5559 break;
5560 case 7: {
5561
5562 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5563
5564
5565 /* content: static int netdev_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)*/
5566 /* LDV_COMMENT_BEGIN_PREP */
5567 #define DRV_NAME "starfire"
5568 #define DRV_VERSION "2.1"
5569 #define DRV_RELDATE "July 6, 2008"
5570 #define HAS_BROKEN_FIRMWARE
5571 #ifdef HAS_BROKEN_FIRMWARE
5572 #define PADDING_MASK 3
5573 #endif
5574 #define ZEROCOPY
5575 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5576 #define VLAN_SUPPORT
5577 #endif
5578 #define PKT_BUF_SZ 1536
5579 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5580 #else
5581 #endif
5582 #ifdef __sparc__
5583 #define DMA_BURST_SIZE 64
5584 #else
5585 #define DMA_BURST_SIZE 128
5586 #endif
5587 #define RX_RING_SIZE 256
5588 #define TX_RING_SIZE 32
5589 #define DONE_Q_SIZE 1024
5590 #define QUEUE_ALIGN 256
5591 #if RX_RING_SIZE > 256
5592 #define RX_Q_ENTRIES Rx2048QEntries
5593 #else
5594 #define RX_Q_ENTRIES Rx256QEntries
5595 #endif
5596 #define TX_TIMEOUT (2 * HZ)
5597 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5598 #define ADDR_64BITS
5599 #define netdrv_addr_t __le64
5600 #define cpu_to_dma(x) cpu_to_le64(x)
5601 #define dma_to_cpu(x) le64_to_cpu(x)
5602 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5603 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5604 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5605 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5606 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5607 #else
5608 #define netdrv_addr_t __le32
5609 #define cpu_to_dma(x) cpu_to_le32(x)
5610 #define dma_to_cpu(x) le32_to_cpu(x)
5611 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5612 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5613 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5614 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5615 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5616 #endif
5617 #define skb_first_frag_len(skb) skb_headlen(skb)
5618 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5619 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5620 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5621 #ifdef VLAN_SUPPORT
5622 #define RxComplType RxComplType3
5623 #else
5624 #define RxComplType RxComplType2
5625 #endif
5626 #ifdef ADDR_64BITS
5627 #define TX_DESC_TYPE TxDescType2
5628 #else
5629 #define TX_DESC_TYPE TxDescType1
5630 #endif
5631 #define TX_DESC_SPACING TxDescSpaceUnlim
5632 #if 0
5633 #endif
5634 #define PHY_CNT 2
5635 #ifdef VLAN_SUPPORT
5636 #endif
5637 #ifdef VLAN_SUPPORT
5638 /* LDV_COMMENT_END_PREP */
5639 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_add_vid" from driver structure with callbacks "netdev_ops" */
5640 ldv_handler_precall();
5641 netdev_vlan_rx_add_vid( var_group1, var_netdev_vlan_rx_add_vid_0_p1, var_netdev_vlan_rx_add_vid_0_p2);
5642 /* LDV_COMMENT_BEGIN_PREP */
5643 #endif
5644 #ifdef VLAN_SUPPORT
5645 #endif
5646 #ifndef MODULE
5647 #endif
5648 #ifdef ZEROCOPY
5649 #endif
5650 #ifdef VLAN_SUPPORT
5651 #endif
5652 #ifdef ADDR_64BITS
5653 #endif
5654 #if ! defined(final_version)
5655 #endif
5656 #ifdef VLAN_SUPPORT
5657 #endif
5658 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5659 #endif
5660 #ifndef final_version
5661 #endif
5662 #ifdef VLAN_SUPPORT
5663 #endif
5664 #ifdef VLAN_SUPPORT
5665 #endif
5666 #ifdef VLAN_SUPPORT
5667 #endif
5668 #ifdef VLAN_SUPPORT
5669 #endif
5670 #ifdef CONFIG_PM
5671 #endif
5672 #ifdef CONFIG_PM
5673 #endif
5674 #ifdef MODULE
5675 #endif
5676 /* LDV_COMMENT_END_PREP */
5677
5678
5679
5680
5681 }
5682
5683 break;
5684 case 8: {
5685
5686 /** STRUCT: struct type: net_device_ops, struct name: netdev_ops **/
5687
5688
5689 /* content: static int netdev_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)*/
5690 /* LDV_COMMENT_BEGIN_PREP */
5691 #define DRV_NAME "starfire"
5692 #define DRV_VERSION "2.1"
5693 #define DRV_RELDATE "July 6, 2008"
5694 #define HAS_BROKEN_FIRMWARE
5695 #ifdef HAS_BROKEN_FIRMWARE
5696 #define PADDING_MASK 3
5697 #endif
5698 #define ZEROCOPY
5699 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5700 #define VLAN_SUPPORT
5701 #endif
5702 #define PKT_BUF_SZ 1536
5703 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5704 #else
5705 #endif
5706 #ifdef __sparc__
5707 #define DMA_BURST_SIZE 64
5708 #else
5709 #define DMA_BURST_SIZE 128
5710 #endif
5711 #define RX_RING_SIZE 256
5712 #define TX_RING_SIZE 32
5713 #define DONE_Q_SIZE 1024
5714 #define QUEUE_ALIGN 256
5715 #if RX_RING_SIZE > 256
5716 #define RX_Q_ENTRIES Rx2048QEntries
5717 #else
5718 #define RX_Q_ENTRIES Rx256QEntries
5719 #endif
5720 #define TX_TIMEOUT (2 * HZ)
5721 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5722 #define ADDR_64BITS
5723 #define netdrv_addr_t __le64
5724 #define cpu_to_dma(x) cpu_to_le64(x)
5725 #define dma_to_cpu(x) le64_to_cpu(x)
5726 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5727 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5728 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5729 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5730 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5731 #else
5732 #define netdrv_addr_t __le32
5733 #define cpu_to_dma(x) cpu_to_le32(x)
5734 #define dma_to_cpu(x) le32_to_cpu(x)
5735 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5736 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5737 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5738 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5739 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5740 #endif
5741 #define skb_first_frag_len(skb) skb_headlen(skb)
5742 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5743 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5744 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5745 #ifdef VLAN_SUPPORT
5746 #define RxComplType RxComplType3
5747 #else
5748 #define RxComplType RxComplType2
5749 #endif
5750 #ifdef ADDR_64BITS
5751 #define TX_DESC_TYPE TxDescType2
5752 #else
5753 #define TX_DESC_TYPE TxDescType1
5754 #endif
5755 #define TX_DESC_SPACING TxDescSpaceUnlim
5756 #if 0
5757 #endif
5758 #define PHY_CNT 2
5759 #ifdef VLAN_SUPPORT
5760 #endif
5761 #ifdef VLAN_SUPPORT
5762 /* LDV_COMMENT_END_PREP */
5763 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_kill_vid" from driver structure with callbacks "netdev_ops" */
5764 ldv_handler_precall();
5765 netdev_vlan_rx_kill_vid( var_group1, var_netdev_vlan_rx_kill_vid_1_p1, var_netdev_vlan_rx_kill_vid_1_p2);
5766 /* LDV_COMMENT_BEGIN_PREP */
5767 #endif
5768 #ifdef VLAN_SUPPORT
5769 #endif
5770 #ifndef MODULE
5771 #endif
5772 #ifdef ZEROCOPY
5773 #endif
5774 #ifdef VLAN_SUPPORT
5775 #endif
5776 #ifdef ADDR_64BITS
5777 #endif
5778 #if ! defined(final_version)
5779 #endif
5780 #ifdef VLAN_SUPPORT
5781 #endif
5782 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5783 #endif
5784 #ifndef final_version
5785 #endif
5786 #ifdef VLAN_SUPPORT
5787 #endif
5788 #ifdef VLAN_SUPPORT
5789 #endif
5790 #ifdef VLAN_SUPPORT
5791 #endif
5792 #ifdef VLAN_SUPPORT
5793 #endif
5794 #ifdef CONFIG_PM
5795 #endif
5796 #ifdef CONFIG_PM
5797 #endif
5798 #ifdef MODULE
5799 #endif
5800 /* LDV_COMMENT_END_PREP */
5801
5802
5803
5804
5805 }
5806
5807 break;
5808 case 9: {
5809
5810 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
5811
5812
5813 /* content: static int check_if_running(struct net_device *dev)*/
5814 /* LDV_COMMENT_BEGIN_PREP */
5815 #define DRV_NAME "starfire"
5816 #define DRV_VERSION "2.1"
5817 #define DRV_RELDATE "July 6, 2008"
5818 #define HAS_BROKEN_FIRMWARE
5819 #ifdef HAS_BROKEN_FIRMWARE
5820 #define PADDING_MASK 3
5821 #endif
5822 #define ZEROCOPY
5823 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5824 #define VLAN_SUPPORT
5825 #endif
5826 #define PKT_BUF_SZ 1536
5827 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5828 #else
5829 #endif
5830 #ifdef __sparc__
5831 #define DMA_BURST_SIZE 64
5832 #else
5833 #define DMA_BURST_SIZE 128
5834 #endif
5835 #define RX_RING_SIZE 256
5836 #define TX_RING_SIZE 32
5837 #define DONE_Q_SIZE 1024
5838 #define QUEUE_ALIGN 256
5839 #if RX_RING_SIZE > 256
5840 #define RX_Q_ENTRIES Rx2048QEntries
5841 #else
5842 #define RX_Q_ENTRIES Rx256QEntries
5843 #endif
5844 #define TX_TIMEOUT (2 * HZ)
5845 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5846 #define ADDR_64BITS
5847 #define netdrv_addr_t __le64
5848 #define cpu_to_dma(x) cpu_to_le64(x)
5849 #define dma_to_cpu(x) le64_to_cpu(x)
5850 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5851 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5852 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5853 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5854 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5855 #else
5856 #define netdrv_addr_t __le32
5857 #define cpu_to_dma(x) cpu_to_le32(x)
5858 #define dma_to_cpu(x) le32_to_cpu(x)
5859 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5860 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5861 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5862 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5863 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5864 #endif
5865 #define skb_first_frag_len(skb) skb_headlen(skb)
5866 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5867 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5868 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5869 #ifdef VLAN_SUPPORT
5870 #define RxComplType RxComplType3
5871 #else
5872 #define RxComplType RxComplType2
5873 #endif
5874 #ifdef ADDR_64BITS
5875 #define TX_DESC_TYPE TxDescType2
5876 #else
5877 #define TX_DESC_TYPE TxDescType1
5878 #endif
5879 #define TX_DESC_SPACING TxDescSpaceUnlim
5880 #if 0
5881 #endif
5882 #define PHY_CNT 2
5883 #ifdef VLAN_SUPPORT
5884 #endif
5885 #ifdef VLAN_SUPPORT
5886 #endif
5887 #ifdef VLAN_SUPPORT
5888 #endif
5889 #ifndef MODULE
5890 #endif
5891 #ifdef ZEROCOPY
5892 #endif
5893 #ifdef VLAN_SUPPORT
5894 #endif
5895 #ifdef ADDR_64BITS
5896 #endif
5897 #if ! defined(final_version)
5898 #endif
5899 #ifdef VLAN_SUPPORT
5900 #endif
5901 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
5902 #endif
5903 #ifndef final_version
5904 #endif
5905 #ifdef VLAN_SUPPORT
5906 #endif
5907 #ifdef VLAN_SUPPORT
5908 #endif
5909 #ifdef VLAN_SUPPORT
5910 #endif
5911 #ifdef VLAN_SUPPORT
5912 #endif
5913 /* LDV_COMMENT_END_PREP */
5914 /* LDV_COMMENT_FUNCTION_CALL Function from field "begin" from driver structure with callbacks "ethtool_ops" */
5915 ldv_handler_precall();
5916 check_if_running( var_group1);
5917 /* LDV_COMMENT_BEGIN_PREP */
5918 #ifdef CONFIG_PM
5919 #endif
5920 #ifdef CONFIG_PM
5921 #endif
5922 #ifdef MODULE
5923 #endif
5924 /* LDV_COMMENT_END_PREP */
5925
5926
5927
5928
5929 }
5930
5931 break;
5932 case 10: {
5933
5934 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
5935
5936
5937 /* content: static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
5938 /* LDV_COMMENT_BEGIN_PREP */
5939 #define DRV_NAME "starfire"
5940 #define DRV_VERSION "2.1"
5941 #define DRV_RELDATE "July 6, 2008"
5942 #define HAS_BROKEN_FIRMWARE
5943 #ifdef HAS_BROKEN_FIRMWARE
5944 #define PADDING_MASK 3
5945 #endif
5946 #define ZEROCOPY
5947 #if IS_ENABLED(CONFIG_VLAN_8021Q)
5948 #define VLAN_SUPPORT
5949 #endif
5950 #define PKT_BUF_SZ 1536
5951 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
5952 #else
5953 #endif
5954 #ifdef __sparc__
5955 #define DMA_BURST_SIZE 64
5956 #else
5957 #define DMA_BURST_SIZE 128
5958 #endif
5959 #define RX_RING_SIZE 256
5960 #define TX_RING_SIZE 32
5961 #define DONE_Q_SIZE 1024
5962 #define QUEUE_ALIGN 256
5963 #if RX_RING_SIZE > 256
5964 #define RX_Q_ENTRIES Rx2048QEntries
5965 #else
5966 #define RX_Q_ENTRIES Rx256QEntries
5967 #endif
5968 #define TX_TIMEOUT (2 * HZ)
5969 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5970 #define ADDR_64BITS
5971 #define netdrv_addr_t __le64
5972 #define cpu_to_dma(x) cpu_to_le64(x)
5973 #define dma_to_cpu(x) le64_to_cpu(x)
5974 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
5975 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
5976 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
5977 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
5978 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
5979 #else
5980 #define netdrv_addr_t __le32
5981 #define cpu_to_dma(x) cpu_to_le32(x)
5982 #define dma_to_cpu(x) le32_to_cpu(x)
5983 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
5984 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
5985 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
5986 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
5987 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
5988 #endif
5989 #define skb_first_frag_len(skb) skb_headlen(skb)
5990 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
5991 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
5992 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
5993 #ifdef VLAN_SUPPORT
5994 #define RxComplType RxComplType3
5995 #else
5996 #define RxComplType RxComplType2
5997 #endif
5998 #ifdef ADDR_64BITS
5999 #define TX_DESC_TYPE TxDescType2
6000 #else
6001 #define TX_DESC_TYPE TxDescType1
6002 #endif
6003 #define TX_DESC_SPACING TxDescSpaceUnlim
6004 #if 0
6005 #endif
6006 #define PHY_CNT 2
6007 #ifdef VLAN_SUPPORT
6008 #endif
6009 #ifdef VLAN_SUPPORT
6010 #endif
6011 #ifdef VLAN_SUPPORT
6012 #endif
6013 #ifndef MODULE
6014 #endif
6015 #ifdef ZEROCOPY
6016 #endif
6017 #ifdef VLAN_SUPPORT
6018 #endif
6019 #ifdef ADDR_64BITS
6020 #endif
6021 #if ! defined(final_version)
6022 #endif
6023 #ifdef VLAN_SUPPORT
6024 #endif
6025 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6026 #endif
6027 #ifndef final_version
6028 #endif
6029 #ifdef VLAN_SUPPORT
6030 #endif
6031 #ifdef VLAN_SUPPORT
6032 #endif
6033 #ifdef VLAN_SUPPORT
6034 #endif
6035 #ifdef VLAN_SUPPORT
6036 #endif
6037 /* LDV_COMMENT_END_PREP */
6038 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_drvinfo" from driver structure with callbacks "ethtool_ops" */
6039 ldv_handler_precall();
6040 get_drvinfo( var_group1, var_group4);
6041 /* LDV_COMMENT_BEGIN_PREP */
6042 #ifdef CONFIG_PM
6043 #endif
6044 #ifdef CONFIG_PM
6045 #endif
6046 #ifdef MODULE
6047 #endif
6048 /* LDV_COMMENT_END_PREP */
6049
6050
6051
6052
6053 }
6054
6055 break;
6056 case 11: {
6057
6058 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6059
6060
6061 /* content: static int nway_reset(struct net_device *dev)*/
6062 /* LDV_COMMENT_BEGIN_PREP */
6063 #define DRV_NAME "starfire"
6064 #define DRV_VERSION "2.1"
6065 #define DRV_RELDATE "July 6, 2008"
6066 #define HAS_BROKEN_FIRMWARE
6067 #ifdef HAS_BROKEN_FIRMWARE
6068 #define PADDING_MASK 3
6069 #endif
6070 #define ZEROCOPY
6071 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6072 #define VLAN_SUPPORT
6073 #endif
6074 #define PKT_BUF_SZ 1536
6075 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6076 #else
6077 #endif
6078 #ifdef __sparc__
6079 #define DMA_BURST_SIZE 64
6080 #else
6081 #define DMA_BURST_SIZE 128
6082 #endif
6083 #define RX_RING_SIZE 256
6084 #define TX_RING_SIZE 32
6085 #define DONE_Q_SIZE 1024
6086 #define QUEUE_ALIGN 256
6087 #if RX_RING_SIZE > 256
6088 #define RX_Q_ENTRIES Rx2048QEntries
6089 #else
6090 #define RX_Q_ENTRIES Rx256QEntries
6091 #endif
6092 #define TX_TIMEOUT (2 * HZ)
6093 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6094 #define ADDR_64BITS
6095 #define netdrv_addr_t __le64
6096 #define cpu_to_dma(x) cpu_to_le64(x)
6097 #define dma_to_cpu(x) le64_to_cpu(x)
6098 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6099 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6100 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6101 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6102 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6103 #else
6104 #define netdrv_addr_t __le32
6105 #define cpu_to_dma(x) cpu_to_le32(x)
6106 #define dma_to_cpu(x) le32_to_cpu(x)
6107 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6108 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6109 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6110 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6111 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6112 #endif
6113 #define skb_first_frag_len(skb) skb_headlen(skb)
6114 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6115 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6116 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6117 #ifdef VLAN_SUPPORT
6118 #define RxComplType RxComplType3
6119 #else
6120 #define RxComplType RxComplType2
6121 #endif
6122 #ifdef ADDR_64BITS
6123 #define TX_DESC_TYPE TxDescType2
6124 #else
6125 #define TX_DESC_TYPE TxDescType1
6126 #endif
6127 #define TX_DESC_SPACING TxDescSpaceUnlim
6128 #if 0
6129 #endif
6130 #define PHY_CNT 2
6131 #ifdef VLAN_SUPPORT
6132 #endif
6133 #ifdef VLAN_SUPPORT
6134 #endif
6135 #ifdef VLAN_SUPPORT
6136 #endif
6137 #ifndef MODULE
6138 #endif
6139 #ifdef ZEROCOPY
6140 #endif
6141 #ifdef VLAN_SUPPORT
6142 #endif
6143 #ifdef ADDR_64BITS
6144 #endif
6145 #if ! defined(final_version)
6146 #endif
6147 #ifdef VLAN_SUPPORT
6148 #endif
6149 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6150 #endif
6151 #ifndef final_version
6152 #endif
6153 #ifdef VLAN_SUPPORT
6154 #endif
6155 #ifdef VLAN_SUPPORT
6156 #endif
6157 #ifdef VLAN_SUPPORT
6158 #endif
6159 #ifdef VLAN_SUPPORT
6160 #endif
6161 /* LDV_COMMENT_END_PREP */
6162 /* LDV_COMMENT_FUNCTION_CALL Function from field "nway_reset" from driver structure with callbacks "ethtool_ops" */
6163 ldv_handler_precall();
6164 nway_reset( var_group1);
6165 /* LDV_COMMENT_BEGIN_PREP */
6166 #ifdef CONFIG_PM
6167 #endif
6168 #ifdef CONFIG_PM
6169 #endif
6170 #ifdef MODULE
6171 #endif
6172 /* LDV_COMMENT_END_PREP */
6173
6174
6175
6176
6177 }
6178
6179 break;
6180 case 12: {
6181
6182 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6183
6184
6185 /* content: static u32 get_link(struct net_device *dev)*/
6186 /* LDV_COMMENT_BEGIN_PREP */
6187 #define DRV_NAME "starfire"
6188 #define DRV_VERSION "2.1"
6189 #define DRV_RELDATE "July 6, 2008"
6190 #define HAS_BROKEN_FIRMWARE
6191 #ifdef HAS_BROKEN_FIRMWARE
6192 #define PADDING_MASK 3
6193 #endif
6194 #define ZEROCOPY
6195 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6196 #define VLAN_SUPPORT
6197 #endif
6198 #define PKT_BUF_SZ 1536
6199 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6200 #else
6201 #endif
6202 #ifdef __sparc__
6203 #define DMA_BURST_SIZE 64
6204 #else
6205 #define DMA_BURST_SIZE 128
6206 #endif
6207 #define RX_RING_SIZE 256
6208 #define TX_RING_SIZE 32
6209 #define DONE_Q_SIZE 1024
6210 #define QUEUE_ALIGN 256
6211 #if RX_RING_SIZE > 256
6212 #define RX_Q_ENTRIES Rx2048QEntries
6213 #else
6214 #define RX_Q_ENTRIES Rx256QEntries
6215 #endif
6216 #define TX_TIMEOUT (2 * HZ)
6217 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6218 #define ADDR_64BITS
6219 #define netdrv_addr_t __le64
6220 #define cpu_to_dma(x) cpu_to_le64(x)
6221 #define dma_to_cpu(x) le64_to_cpu(x)
6222 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6223 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6224 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6225 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6226 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6227 #else
6228 #define netdrv_addr_t __le32
6229 #define cpu_to_dma(x) cpu_to_le32(x)
6230 #define dma_to_cpu(x) le32_to_cpu(x)
6231 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6232 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6233 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6234 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6235 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6236 #endif
6237 #define skb_first_frag_len(skb) skb_headlen(skb)
6238 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6239 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6240 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6241 #ifdef VLAN_SUPPORT
6242 #define RxComplType RxComplType3
6243 #else
6244 #define RxComplType RxComplType2
6245 #endif
6246 #ifdef ADDR_64BITS
6247 #define TX_DESC_TYPE TxDescType2
6248 #else
6249 #define TX_DESC_TYPE TxDescType1
6250 #endif
6251 #define TX_DESC_SPACING TxDescSpaceUnlim
6252 #if 0
6253 #endif
6254 #define PHY_CNT 2
6255 #ifdef VLAN_SUPPORT
6256 #endif
6257 #ifdef VLAN_SUPPORT
6258 #endif
6259 #ifdef VLAN_SUPPORT
6260 #endif
6261 #ifndef MODULE
6262 #endif
6263 #ifdef ZEROCOPY
6264 #endif
6265 #ifdef VLAN_SUPPORT
6266 #endif
6267 #ifdef ADDR_64BITS
6268 #endif
6269 #if ! defined(final_version)
6270 #endif
6271 #ifdef VLAN_SUPPORT
6272 #endif
6273 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6274 #endif
6275 #ifndef final_version
6276 #endif
6277 #ifdef VLAN_SUPPORT
6278 #endif
6279 #ifdef VLAN_SUPPORT
6280 #endif
6281 #ifdef VLAN_SUPPORT
6282 #endif
6283 #ifdef VLAN_SUPPORT
6284 #endif
6285 /* LDV_COMMENT_END_PREP */
6286 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_link" from driver structure with callbacks "ethtool_ops" */
6287 ldv_handler_precall();
6288 get_link( var_group1);
6289 /* LDV_COMMENT_BEGIN_PREP */
6290 #ifdef CONFIG_PM
6291 #endif
6292 #ifdef CONFIG_PM
6293 #endif
6294 #ifdef MODULE
6295 #endif
6296 /* LDV_COMMENT_END_PREP */
6297
6298
6299
6300
6301 }
6302
6303 break;
6304 case 13: {
6305
6306 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6307
6308
6309 /* content: static u32 get_msglevel(struct net_device *dev)*/
6310 /* LDV_COMMENT_BEGIN_PREP */
6311 #define DRV_NAME "starfire"
6312 #define DRV_VERSION "2.1"
6313 #define DRV_RELDATE "July 6, 2008"
6314 #define HAS_BROKEN_FIRMWARE
6315 #ifdef HAS_BROKEN_FIRMWARE
6316 #define PADDING_MASK 3
6317 #endif
6318 #define ZEROCOPY
6319 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6320 #define VLAN_SUPPORT
6321 #endif
6322 #define PKT_BUF_SZ 1536
6323 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6324 #else
6325 #endif
6326 #ifdef __sparc__
6327 #define DMA_BURST_SIZE 64
6328 #else
6329 #define DMA_BURST_SIZE 128
6330 #endif
6331 #define RX_RING_SIZE 256
6332 #define TX_RING_SIZE 32
6333 #define DONE_Q_SIZE 1024
6334 #define QUEUE_ALIGN 256
6335 #if RX_RING_SIZE > 256
6336 #define RX_Q_ENTRIES Rx2048QEntries
6337 #else
6338 #define RX_Q_ENTRIES Rx256QEntries
6339 #endif
6340 #define TX_TIMEOUT (2 * HZ)
6341 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6342 #define ADDR_64BITS
6343 #define netdrv_addr_t __le64
6344 #define cpu_to_dma(x) cpu_to_le64(x)
6345 #define dma_to_cpu(x) le64_to_cpu(x)
6346 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6347 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6348 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6349 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6350 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6351 #else
6352 #define netdrv_addr_t __le32
6353 #define cpu_to_dma(x) cpu_to_le32(x)
6354 #define dma_to_cpu(x) le32_to_cpu(x)
6355 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6356 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6357 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6358 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6359 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6360 #endif
6361 #define skb_first_frag_len(skb) skb_headlen(skb)
6362 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6363 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6364 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6365 #ifdef VLAN_SUPPORT
6366 #define RxComplType RxComplType3
6367 #else
6368 #define RxComplType RxComplType2
6369 #endif
6370 #ifdef ADDR_64BITS
6371 #define TX_DESC_TYPE TxDescType2
6372 #else
6373 #define TX_DESC_TYPE TxDescType1
6374 #endif
6375 #define TX_DESC_SPACING TxDescSpaceUnlim
6376 #if 0
6377 #endif
6378 #define PHY_CNT 2
6379 #ifdef VLAN_SUPPORT
6380 #endif
6381 #ifdef VLAN_SUPPORT
6382 #endif
6383 #ifdef VLAN_SUPPORT
6384 #endif
6385 #ifndef MODULE
6386 #endif
6387 #ifdef ZEROCOPY
6388 #endif
6389 #ifdef VLAN_SUPPORT
6390 #endif
6391 #ifdef ADDR_64BITS
6392 #endif
6393 #if ! defined(final_version)
6394 #endif
6395 #ifdef VLAN_SUPPORT
6396 #endif
6397 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6398 #endif
6399 #ifndef final_version
6400 #endif
6401 #ifdef VLAN_SUPPORT
6402 #endif
6403 #ifdef VLAN_SUPPORT
6404 #endif
6405 #ifdef VLAN_SUPPORT
6406 #endif
6407 #ifdef VLAN_SUPPORT
6408 #endif
6409 /* LDV_COMMENT_END_PREP */
6410 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_msglevel" from driver structure with callbacks "ethtool_ops" */
6411 ldv_handler_precall();
6412 get_msglevel( var_group1);
6413 /* LDV_COMMENT_BEGIN_PREP */
6414 #ifdef CONFIG_PM
6415 #endif
6416 #ifdef CONFIG_PM
6417 #endif
6418 #ifdef MODULE
6419 #endif
6420 /* LDV_COMMENT_END_PREP */
6421
6422
6423
6424
6425 }
6426
6427 break;
6428 case 14: {
6429
6430 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6431
6432
6433 /* content: static void set_msglevel(struct net_device *dev, u32 val)*/
6434 /* LDV_COMMENT_BEGIN_PREP */
6435 #define DRV_NAME "starfire"
6436 #define DRV_VERSION "2.1"
6437 #define DRV_RELDATE "July 6, 2008"
6438 #define HAS_BROKEN_FIRMWARE
6439 #ifdef HAS_BROKEN_FIRMWARE
6440 #define PADDING_MASK 3
6441 #endif
6442 #define ZEROCOPY
6443 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6444 #define VLAN_SUPPORT
6445 #endif
6446 #define PKT_BUF_SZ 1536
6447 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6448 #else
6449 #endif
6450 #ifdef __sparc__
6451 #define DMA_BURST_SIZE 64
6452 #else
6453 #define DMA_BURST_SIZE 128
6454 #endif
6455 #define RX_RING_SIZE 256
6456 #define TX_RING_SIZE 32
6457 #define DONE_Q_SIZE 1024
6458 #define QUEUE_ALIGN 256
6459 #if RX_RING_SIZE > 256
6460 #define RX_Q_ENTRIES Rx2048QEntries
6461 #else
6462 #define RX_Q_ENTRIES Rx256QEntries
6463 #endif
6464 #define TX_TIMEOUT (2 * HZ)
6465 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6466 #define ADDR_64BITS
6467 #define netdrv_addr_t __le64
6468 #define cpu_to_dma(x) cpu_to_le64(x)
6469 #define dma_to_cpu(x) le64_to_cpu(x)
6470 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6471 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6472 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6473 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6474 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6475 #else
6476 #define netdrv_addr_t __le32
6477 #define cpu_to_dma(x) cpu_to_le32(x)
6478 #define dma_to_cpu(x) le32_to_cpu(x)
6479 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6480 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6481 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6482 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6483 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6484 #endif
6485 #define skb_first_frag_len(skb) skb_headlen(skb)
6486 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6487 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6488 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6489 #ifdef VLAN_SUPPORT
6490 #define RxComplType RxComplType3
6491 #else
6492 #define RxComplType RxComplType2
6493 #endif
6494 #ifdef ADDR_64BITS
6495 #define TX_DESC_TYPE TxDescType2
6496 #else
6497 #define TX_DESC_TYPE TxDescType1
6498 #endif
6499 #define TX_DESC_SPACING TxDescSpaceUnlim
6500 #if 0
6501 #endif
6502 #define PHY_CNT 2
6503 #ifdef VLAN_SUPPORT
6504 #endif
6505 #ifdef VLAN_SUPPORT
6506 #endif
6507 #ifdef VLAN_SUPPORT
6508 #endif
6509 #ifndef MODULE
6510 #endif
6511 #ifdef ZEROCOPY
6512 #endif
6513 #ifdef VLAN_SUPPORT
6514 #endif
6515 #ifdef ADDR_64BITS
6516 #endif
6517 #if ! defined(final_version)
6518 #endif
6519 #ifdef VLAN_SUPPORT
6520 #endif
6521 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6522 #endif
6523 #ifndef final_version
6524 #endif
6525 #ifdef VLAN_SUPPORT
6526 #endif
6527 #ifdef VLAN_SUPPORT
6528 #endif
6529 #ifdef VLAN_SUPPORT
6530 #endif
6531 #ifdef VLAN_SUPPORT
6532 #endif
6533 /* LDV_COMMENT_END_PREP */
6534 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_msglevel" from driver structure with callbacks "ethtool_ops" */
6535 ldv_handler_precall();
6536 set_msglevel( var_group1, var_set_msglevel_26_p1);
6537 /* LDV_COMMENT_BEGIN_PREP */
6538 #ifdef CONFIG_PM
6539 #endif
6540 #ifdef CONFIG_PM
6541 #endif
6542 #ifdef MODULE
6543 #endif
6544 /* LDV_COMMENT_END_PREP */
6545
6546
6547
6548
6549 }
6550
6551 break;
6552 case 15: {
6553
6554 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6555
6556
6557 /* content: static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd)*/
6558 /* LDV_COMMENT_BEGIN_PREP */
6559 #define DRV_NAME "starfire"
6560 #define DRV_VERSION "2.1"
6561 #define DRV_RELDATE "July 6, 2008"
6562 #define HAS_BROKEN_FIRMWARE
6563 #ifdef HAS_BROKEN_FIRMWARE
6564 #define PADDING_MASK 3
6565 #endif
6566 #define ZEROCOPY
6567 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6568 #define VLAN_SUPPORT
6569 #endif
6570 #define PKT_BUF_SZ 1536
6571 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6572 #else
6573 #endif
6574 #ifdef __sparc__
6575 #define DMA_BURST_SIZE 64
6576 #else
6577 #define DMA_BURST_SIZE 128
6578 #endif
6579 #define RX_RING_SIZE 256
6580 #define TX_RING_SIZE 32
6581 #define DONE_Q_SIZE 1024
6582 #define QUEUE_ALIGN 256
6583 #if RX_RING_SIZE > 256
6584 #define RX_Q_ENTRIES Rx2048QEntries
6585 #else
6586 #define RX_Q_ENTRIES Rx256QEntries
6587 #endif
6588 #define TX_TIMEOUT (2 * HZ)
6589 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6590 #define ADDR_64BITS
6591 #define netdrv_addr_t __le64
6592 #define cpu_to_dma(x) cpu_to_le64(x)
6593 #define dma_to_cpu(x) le64_to_cpu(x)
6594 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6595 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6596 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6597 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6598 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6599 #else
6600 #define netdrv_addr_t __le32
6601 #define cpu_to_dma(x) cpu_to_le32(x)
6602 #define dma_to_cpu(x) le32_to_cpu(x)
6603 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6604 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6605 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6606 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6607 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6608 #endif
6609 #define skb_first_frag_len(skb) skb_headlen(skb)
6610 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6611 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6612 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6613 #ifdef VLAN_SUPPORT
6614 #define RxComplType RxComplType3
6615 #else
6616 #define RxComplType RxComplType2
6617 #endif
6618 #ifdef ADDR_64BITS
6619 #define TX_DESC_TYPE TxDescType2
6620 #else
6621 #define TX_DESC_TYPE TxDescType1
6622 #endif
6623 #define TX_DESC_SPACING TxDescSpaceUnlim
6624 #if 0
6625 #endif
6626 #define PHY_CNT 2
6627 #ifdef VLAN_SUPPORT
6628 #endif
6629 #ifdef VLAN_SUPPORT
6630 #endif
6631 #ifdef VLAN_SUPPORT
6632 #endif
6633 #ifndef MODULE
6634 #endif
6635 #ifdef ZEROCOPY
6636 #endif
6637 #ifdef VLAN_SUPPORT
6638 #endif
6639 #ifdef ADDR_64BITS
6640 #endif
6641 #if ! defined(final_version)
6642 #endif
6643 #ifdef VLAN_SUPPORT
6644 #endif
6645 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6646 #endif
6647 #ifndef final_version
6648 #endif
6649 #ifdef VLAN_SUPPORT
6650 #endif
6651 #ifdef VLAN_SUPPORT
6652 #endif
6653 #ifdef VLAN_SUPPORT
6654 #endif
6655 #ifdef VLAN_SUPPORT
6656 #endif
6657 /* LDV_COMMENT_END_PREP */
6658 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_link_ksettings" from driver structure with callbacks "ethtool_ops" */
6659 ldv_handler_precall();
6660 get_link_ksettings( var_group1, var_group5);
6661 /* LDV_COMMENT_BEGIN_PREP */
6662 #ifdef CONFIG_PM
6663 #endif
6664 #ifdef CONFIG_PM
6665 #endif
6666 #ifdef MODULE
6667 #endif
6668 /* LDV_COMMENT_END_PREP */
6669
6670
6671
6672
6673 }
6674
6675 break;
6676 case 16: {
6677
6678 /** STRUCT: struct type: ethtool_ops, struct name: ethtool_ops **/
6679
6680
6681 /* content: static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd)*/
6682 /* LDV_COMMENT_BEGIN_PREP */
6683 #define DRV_NAME "starfire"
6684 #define DRV_VERSION "2.1"
6685 #define DRV_RELDATE "July 6, 2008"
6686 #define HAS_BROKEN_FIRMWARE
6687 #ifdef HAS_BROKEN_FIRMWARE
6688 #define PADDING_MASK 3
6689 #endif
6690 #define ZEROCOPY
6691 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6692 #define VLAN_SUPPORT
6693 #endif
6694 #define PKT_BUF_SZ 1536
6695 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6696 #else
6697 #endif
6698 #ifdef __sparc__
6699 #define DMA_BURST_SIZE 64
6700 #else
6701 #define DMA_BURST_SIZE 128
6702 #endif
6703 #define RX_RING_SIZE 256
6704 #define TX_RING_SIZE 32
6705 #define DONE_Q_SIZE 1024
6706 #define QUEUE_ALIGN 256
6707 #if RX_RING_SIZE > 256
6708 #define RX_Q_ENTRIES Rx2048QEntries
6709 #else
6710 #define RX_Q_ENTRIES Rx256QEntries
6711 #endif
6712 #define TX_TIMEOUT (2 * HZ)
6713 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6714 #define ADDR_64BITS
6715 #define netdrv_addr_t __le64
6716 #define cpu_to_dma(x) cpu_to_le64(x)
6717 #define dma_to_cpu(x) le64_to_cpu(x)
6718 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6719 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6720 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6721 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6722 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6723 #else
6724 #define netdrv_addr_t __le32
6725 #define cpu_to_dma(x) cpu_to_le32(x)
6726 #define dma_to_cpu(x) le32_to_cpu(x)
6727 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6728 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6729 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6730 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6731 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6732 #endif
6733 #define skb_first_frag_len(skb) skb_headlen(skb)
6734 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6735 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6736 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6737 #ifdef VLAN_SUPPORT
6738 #define RxComplType RxComplType3
6739 #else
6740 #define RxComplType RxComplType2
6741 #endif
6742 #ifdef ADDR_64BITS
6743 #define TX_DESC_TYPE TxDescType2
6744 #else
6745 #define TX_DESC_TYPE TxDescType1
6746 #endif
6747 #define TX_DESC_SPACING TxDescSpaceUnlim
6748 #if 0
6749 #endif
6750 #define PHY_CNT 2
6751 #ifdef VLAN_SUPPORT
6752 #endif
6753 #ifdef VLAN_SUPPORT
6754 #endif
6755 #ifdef VLAN_SUPPORT
6756 #endif
6757 #ifndef MODULE
6758 #endif
6759 #ifdef ZEROCOPY
6760 #endif
6761 #ifdef VLAN_SUPPORT
6762 #endif
6763 #ifdef ADDR_64BITS
6764 #endif
6765 #if ! defined(final_version)
6766 #endif
6767 #ifdef VLAN_SUPPORT
6768 #endif
6769 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6770 #endif
6771 #ifndef final_version
6772 #endif
6773 #ifdef VLAN_SUPPORT
6774 #endif
6775 #ifdef VLAN_SUPPORT
6776 #endif
6777 #ifdef VLAN_SUPPORT
6778 #endif
6779 #ifdef VLAN_SUPPORT
6780 #endif
6781 /* LDV_COMMENT_END_PREP */
6782 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_link_ksettings" from driver structure with callbacks "ethtool_ops" */
6783 ldv_handler_precall();
6784 set_link_ksettings( var_group1, var_set_link_ksettings_22_p1);
6785 /* LDV_COMMENT_BEGIN_PREP */
6786 #ifdef CONFIG_PM
6787 #endif
6788 #ifdef CONFIG_PM
6789 #endif
6790 #ifdef MODULE
6791 #endif
6792 /* LDV_COMMENT_END_PREP */
6793
6794
6795
6796
6797 }
6798
6799 break;
6800 case 17: {
6801
6802 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
6803 if(ldv_s_starfire_driver_pci_driver==0) {
6804
6805 /* content: static int starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
6806 /* LDV_COMMENT_BEGIN_PREP */
6807 #define DRV_NAME "starfire"
6808 #define DRV_VERSION "2.1"
6809 #define DRV_RELDATE "July 6, 2008"
6810 #define HAS_BROKEN_FIRMWARE
6811 #ifdef HAS_BROKEN_FIRMWARE
6812 #define PADDING_MASK 3
6813 #endif
6814 #define ZEROCOPY
6815 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6816 #define VLAN_SUPPORT
6817 #endif
6818 #define PKT_BUF_SZ 1536
6819 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6820 #else
6821 #endif
6822 #ifdef __sparc__
6823 #define DMA_BURST_SIZE 64
6824 #else
6825 #define DMA_BURST_SIZE 128
6826 #endif
6827 #define RX_RING_SIZE 256
6828 #define TX_RING_SIZE 32
6829 #define DONE_Q_SIZE 1024
6830 #define QUEUE_ALIGN 256
6831 #if RX_RING_SIZE > 256
6832 #define RX_Q_ENTRIES Rx2048QEntries
6833 #else
6834 #define RX_Q_ENTRIES Rx256QEntries
6835 #endif
6836 #define TX_TIMEOUT (2 * HZ)
6837 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6838 #define ADDR_64BITS
6839 #define netdrv_addr_t __le64
6840 #define cpu_to_dma(x) cpu_to_le64(x)
6841 #define dma_to_cpu(x) le64_to_cpu(x)
6842 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6843 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6844 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6845 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6846 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6847 #else
6848 #define netdrv_addr_t __le32
6849 #define cpu_to_dma(x) cpu_to_le32(x)
6850 #define dma_to_cpu(x) le32_to_cpu(x)
6851 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6852 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6853 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6854 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6855 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6856 #endif
6857 #define skb_first_frag_len(skb) skb_headlen(skb)
6858 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6859 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6860 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6861 #ifdef VLAN_SUPPORT
6862 #define RxComplType RxComplType3
6863 #else
6864 #define RxComplType RxComplType2
6865 #endif
6866 #ifdef ADDR_64BITS
6867 #define TX_DESC_TYPE TxDescType2
6868 #else
6869 #define TX_DESC_TYPE TxDescType1
6870 #endif
6871 #define TX_DESC_SPACING TxDescSpaceUnlim
6872 #if 0
6873 #endif
6874 #define PHY_CNT 2
6875 #ifdef VLAN_SUPPORT
6876 #endif
6877 #ifdef VLAN_SUPPORT
6878 #endif
6879 #ifdef VLAN_SUPPORT
6880 #endif
6881 /* LDV_COMMENT_END_PREP */
6882 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "starfire_driver". Standart function test for correct return result. */
6883 res_starfire_init_one_2 = starfire_init_one( var_group6, var_starfire_init_one_2_p1);
6884 ldv_check_return_value(res_starfire_init_one_2);
6885 ldv_check_return_value_probe(res_starfire_init_one_2);
6886 if(res_starfire_init_one_2)
6887 goto ldv_module_exit;
6888 /* LDV_COMMENT_BEGIN_PREP */
6889 #ifdef VLAN_SUPPORT
6890 #endif
6891 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
6892 #endif
6893 #ifndef final_version
6894 #endif
6895 #ifdef VLAN_SUPPORT
6896 #endif
6897 #ifdef VLAN_SUPPORT
6898 #endif
6899 #ifdef VLAN_SUPPORT
6900 #endif
6901 #ifdef VLAN_SUPPORT
6902 #endif
6903 #ifdef CONFIG_PM
6904 #endif
6905 #ifdef CONFIG_PM
6906 #endif
6907 #ifdef MODULE
6908 #endif
6909 /* LDV_COMMENT_END_PREP */
6910 ldv_s_starfire_driver_pci_driver++;
6911
6912 }
6913
6914 }
6915
6916 break;
6917 case 18: {
6918
6919 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
6920 if(ldv_s_starfire_driver_pci_driver==1) {
6921
6922 /* content: static void starfire_remove_one(struct pci_dev *pdev)*/
6923 /* LDV_COMMENT_BEGIN_PREP */
6924 #define DRV_NAME "starfire"
6925 #define DRV_VERSION "2.1"
6926 #define DRV_RELDATE "July 6, 2008"
6927 #define HAS_BROKEN_FIRMWARE
6928 #ifdef HAS_BROKEN_FIRMWARE
6929 #define PADDING_MASK 3
6930 #endif
6931 #define ZEROCOPY
6932 #if IS_ENABLED(CONFIG_VLAN_8021Q)
6933 #define VLAN_SUPPORT
6934 #endif
6935 #define PKT_BUF_SZ 1536
6936 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
6937 #else
6938 #endif
6939 #ifdef __sparc__
6940 #define DMA_BURST_SIZE 64
6941 #else
6942 #define DMA_BURST_SIZE 128
6943 #endif
6944 #define RX_RING_SIZE 256
6945 #define TX_RING_SIZE 32
6946 #define DONE_Q_SIZE 1024
6947 #define QUEUE_ALIGN 256
6948 #if RX_RING_SIZE > 256
6949 #define RX_Q_ENTRIES Rx2048QEntries
6950 #else
6951 #define RX_Q_ENTRIES Rx256QEntries
6952 #endif
6953 #define TX_TIMEOUT (2 * HZ)
6954 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6955 #define ADDR_64BITS
6956 #define netdrv_addr_t __le64
6957 #define cpu_to_dma(x) cpu_to_le64(x)
6958 #define dma_to_cpu(x) le64_to_cpu(x)
6959 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
6960 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
6961 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
6962 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
6963 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
6964 #else
6965 #define netdrv_addr_t __le32
6966 #define cpu_to_dma(x) cpu_to_le32(x)
6967 #define dma_to_cpu(x) le32_to_cpu(x)
6968 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
6969 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
6970 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
6971 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
6972 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
6973 #endif
6974 #define skb_first_frag_len(skb) skb_headlen(skb)
6975 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
6976 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
6977 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
6978 #ifdef VLAN_SUPPORT
6979 #define RxComplType RxComplType3
6980 #else
6981 #define RxComplType RxComplType2
6982 #endif
6983 #ifdef ADDR_64BITS
6984 #define TX_DESC_TYPE TxDescType2
6985 #else
6986 #define TX_DESC_TYPE TxDescType1
6987 #endif
6988 #define TX_DESC_SPACING TxDescSpaceUnlim
6989 #if 0
6990 #endif
6991 #define PHY_CNT 2
6992 #ifdef VLAN_SUPPORT
6993 #endif
6994 #ifdef VLAN_SUPPORT
6995 #endif
6996 #ifdef VLAN_SUPPORT
6997 #endif
6998 #ifndef MODULE
6999 #endif
7000 #ifdef ZEROCOPY
7001 #endif
7002 #ifdef VLAN_SUPPORT
7003 #endif
7004 #ifdef ADDR_64BITS
7005 #endif
7006 #if ! defined(final_version)
7007 #endif
7008 #ifdef VLAN_SUPPORT
7009 #endif
7010 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7011 #endif
7012 #ifndef final_version
7013 #endif
7014 #ifdef VLAN_SUPPORT
7015 #endif
7016 #ifdef VLAN_SUPPORT
7017 #endif
7018 #ifdef VLAN_SUPPORT
7019 #endif
7020 #ifdef VLAN_SUPPORT
7021 #endif
7022 #ifdef CONFIG_PM
7023 #endif
7024 /* LDV_COMMENT_END_PREP */
7025 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "starfire_driver" */
7026 ldv_handler_precall();
7027 starfire_remove_one( var_group6);
7028 /* LDV_COMMENT_BEGIN_PREP */
7029 #ifdef CONFIG_PM
7030 #endif
7031 #ifdef MODULE
7032 #endif
7033 /* LDV_COMMENT_END_PREP */
7034 ldv_s_starfire_driver_pci_driver=0;
7035
7036 }
7037
7038 }
7039
7040 break;
7041 case 19: {
7042
7043 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
7044
7045
7046 /* content: static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)*/
7047 /* LDV_COMMENT_BEGIN_PREP */
7048 #define DRV_NAME "starfire"
7049 #define DRV_VERSION "2.1"
7050 #define DRV_RELDATE "July 6, 2008"
7051 #define HAS_BROKEN_FIRMWARE
7052 #ifdef HAS_BROKEN_FIRMWARE
7053 #define PADDING_MASK 3
7054 #endif
7055 #define ZEROCOPY
7056 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7057 #define VLAN_SUPPORT
7058 #endif
7059 #define PKT_BUF_SZ 1536
7060 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7061 #else
7062 #endif
7063 #ifdef __sparc__
7064 #define DMA_BURST_SIZE 64
7065 #else
7066 #define DMA_BURST_SIZE 128
7067 #endif
7068 #define RX_RING_SIZE 256
7069 #define TX_RING_SIZE 32
7070 #define DONE_Q_SIZE 1024
7071 #define QUEUE_ALIGN 256
7072 #if RX_RING_SIZE > 256
7073 #define RX_Q_ENTRIES Rx2048QEntries
7074 #else
7075 #define RX_Q_ENTRIES Rx256QEntries
7076 #endif
7077 #define TX_TIMEOUT (2 * HZ)
7078 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7079 #define ADDR_64BITS
7080 #define netdrv_addr_t __le64
7081 #define cpu_to_dma(x) cpu_to_le64(x)
7082 #define dma_to_cpu(x) le64_to_cpu(x)
7083 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7084 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7085 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7086 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7087 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7088 #else
7089 #define netdrv_addr_t __le32
7090 #define cpu_to_dma(x) cpu_to_le32(x)
7091 #define dma_to_cpu(x) le32_to_cpu(x)
7092 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7093 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7094 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7095 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7096 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7097 #endif
7098 #define skb_first_frag_len(skb) skb_headlen(skb)
7099 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7100 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7101 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7102 #ifdef VLAN_SUPPORT
7103 #define RxComplType RxComplType3
7104 #else
7105 #define RxComplType RxComplType2
7106 #endif
7107 #ifdef ADDR_64BITS
7108 #define TX_DESC_TYPE TxDescType2
7109 #else
7110 #define TX_DESC_TYPE TxDescType1
7111 #endif
7112 #define TX_DESC_SPACING TxDescSpaceUnlim
7113 #if 0
7114 #endif
7115 #define PHY_CNT 2
7116 #ifdef VLAN_SUPPORT
7117 #endif
7118 #ifdef VLAN_SUPPORT
7119 #endif
7120 #ifdef VLAN_SUPPORT
7121 #endif
7122 #ifndef MODULE
7123 #endif
7124 #ifdef ZEROCOPY
7125 #endif
7126 #ifdef VLAN_SUPPORT
7127 #endif
7128 #ifdef ADDR_64BITS
7129 #endif
7130 #if ! defined(final_version)
7131 #endif
7132 #ifdef VLAN_SUPPORT
7133 #endif
7134 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7135 #endif
7136 #ifndef final_version
7137 #endif
7138 #ifdef VLAN_SUPPORT
7139 #endif
7140 #ifdef VLAN_SUPPORT
7141 #endif
7142 #ifdef VLAN_SUPPORT
7143 #endif
7144 #ifdef VLAN_SUPPORT
7145 #endif
7146 #ifdef CONFIG_PM
7147 /* LDV_COMMENT_END_PREP */
7148 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "starfire_driver" */
7149 ldv_handler_precall();
7150 starfire_suspend( var_group6, var_starfire_suspend_29_p1);
7151 /* LDV_COMMENT_BEGIN_PREP */
7152 #endif
7153 #ifdef CONFIG_PM
7154 #endif
7155 #ifdef MODULE
7156 #endif
7157 /* LDV_COMMENT_END_PREP */
7158
7159
7160
7161
7162 }
7163
7164 break;
7165 case 20: {
7166
7167 /** STRUCT: struct type: pci_driver, struct name: starfire_driver **/
7168
7169
7170 /* content: static int starfire_resume(struct pci_dev *pdev)*/
7171 /* LDV_COMMENT_BEGIN_PREP */
7172 #define DRV_NAME "starfire"
7173 #define DRV_VERSION "2.1"
7174 #define DRV_RELDATE "July 6, 2008"
7175 #define HAS_BROKEN_FIRMWARE
7176 #ifdef HAS_BROKEN_FIRMWARE
7177 #define PADDING_MASK 3
7178 #endif
7179 #define ZEROCOPY
7180 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7181 #define VLAN_SUPPORT
7182 #endif
7183 #define PKT_BUF_SZ 1536
7184 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7185 #else
7186 #endif
7187 #ifdef __sparc__
7188 #define DMA_BURST_SIZE 64
7189 #else
7190 #define DMA_BURST_SIZE 128
7191 #endif
7192 #define RX_RING_SIZE 256
7193 #define TX_RING_SIZE 32
7194 #define DONE_Q_SIZE 1024
7195 #define QUEUE_ALIGN 256
7196 #if RX_RING_SIZE > 256
7197 #define RX_Q_ENTRIES Rx2048QEntries
7198 #else
7199 #define RX_Q_ENTRIES Rx256QEntries
7200 #endif
7201 #define TX_TIMEOUT (2 * HZ)
7202 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7203 #define ADDR_64BITS
7204 #define netdrv_addr_t __le64
7205 #define cpu_to_dma(x) cpu_to_le64(x)
7206 #define dma_to_cpu(x) le64_to_cpu(x)
7207 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7208 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7209 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7210 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7211 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7212 #else
7213 #define netdrv_addr_t __le32
7214 #define cpu_to_dma(x) cpu_to_le32(x)
7215 #define dma_to_cpu(x) le32_to_cpu(x)
7216 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7217 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7218 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7219 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7220 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7221 #endif
7222 #define skb_first_frag_len(skb) skb_headlen(skb)
7223 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7224 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7225 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7226 #ifdef VLAN_SUPPORT
7227 #define RxComplType RxComplType3
7228 #else
7229 #define RxComplType RxComplType2
7230 #endif
7231 #ifdef ADDR_64BITS
7232 #define TX_DESC_TYPE TxDescType2
7233 #else
7234 #define TX_DESC_TYPE TxDescType1
7235 #endif
7236 #define TX_DESC_SPACING TxDescSpaceUnlim
7237 #if 0
7238 #endif
7239 #define PHY_CNT 2
7240 #ifdef VLAN_SUPPORT
7241 #endif
7242 #ifdef VLAN_SUPPORT
7243 #endif
7244 #ifdef VLAN_SUPPORT
7245 #endif
7246 #ifndef MODULE
7247 #endif
7248 #ifdef ZEROCOPY
7249 #endif
7250 #ifdef VLAN_SUPPORT
7251 #endif
7252 #ifdef ADDR_64BITS
7253 #endif
7254 #if ! defined(final_version)
7255 #endif
7256 #ifdef VLAN_SUPPORT
7257 #endif
7258 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7259 #endif
7260 #ifndef final_version
7261 #endif
7262 #ifdef VLAN_SUPPORT
7263 #endif
7264 #ifdef VLAN_SUPPORT
7265 #endif
7266 #ifdef VLAN_SUPPORT
7267 #endif
7268 #ifdef VLAN_SUPPORT
7269 #endif
7270 #ifdef CONFIG_PM
7271 /* LDV_COMMENT_END_PREP */
7272 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "starfire_driver" */
7273 ldv_handler_precall();
7274 starfire_resume( var_group6);
7275 /* LDV_COMMENT_BEGIN_PREP */
7276 #endif
7277 #ifdef CONFIG_PM
7278 #endif
7279 #ifdef MODULE
7280 #endif
7281 /* LDV_COMMENT_END_PREP */
7282
7283
7284
7285
7286 }
7287
7288 break;
7289 case 21: {
7290
7291 /** CALLBACK SECTION request_irq **/
7292 LDV_IN_INTERRUPT=2;
7293
7294 /* content: static irqreturn_t intr_handler(int irq, void *dev_instance)*/
7295 /* LDV_COMMENT_BEGIN_PREP */
7296 #define DRV_NAME "starfire"
7297 #define DRV_VERSION "2.1"
7298 #define DRV_RELDATE "July 6, 2008"
7299 #define HAS_BROKEN_FIRMWARE
7300 #ifdef HAS_BROKEN_FIRMWARE
7301 #define PADDING_MASK 3
7302 #endif
7303 #define ZEROCOPY
7304 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7305 #define VLAN_SUPPORT
7306 #endif
7307 #define PKT_BUF_SZ 1536
7308 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7309 #else
7310 #endif
7311 #ifdef __sparc__
7312 #define DMA_BURST_SIZE 64
7313 #else
7314 #define DMA_BURST_SIZE 128
7315 #endif
7316 #define RX_RING_SIZE 256
7317 #define TX_RING_SIZE 32
7318 #define DONE_Q_SIZE 1024
7319 #define QUEUE_ALIGN 256
7320 #if RX_RING_SIZE > 256
7321 #define RX_Q_ENTRIES Rx2048QEntries
7322 #else
7323 #define RX_Q_ENTRIES Rx256QEntries
7324 #endif
7325 #define TX_TIMEOUT (2 * HZ)
7326 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7327 #define ADDR_64BITS
7328 #define netdrv_addr_t __le64
7329 #define cpu_to_dma(x) cpu_to_le64(x)
7330 #define dma_to_cpu(x) le64_to_cpu(x)
7331 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7332 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7333 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7334 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7335 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7336 #else
7337 #define netdrv_addr_t __le32
7338 #define cpu_to_dma(x) cpu_to_le32(x)
7339 #define dma_to_cpu(x) le32_to_cpu(x)
7340 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7341 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7342 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7343 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7344 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7345 #endif
7346 #define skb_first_frag_len(skb) skb_headlen(skb)
7347 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7348 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7349 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7350 #ifdef VLAN_SUPPORT
7351 #define RxComplType RxComplType3
7352 #else
7353 #define RxComplType RxComplType2
7354 #endif
7355 #ifdef ADDR_64BITS
7356 #define TX_DESC_TYPE TxDescType2
7357 #else
7358 #define TX_DESC_TYPE TxDescType1
7359 #endif
7360 #define TX_DESC_SPACING TxDescSpaceUnlim
7361 #if 0
7362 #endif
7363 #define PHY_CNT 2
7364 #ifdef VLAN_SUPPORT
7365 #endif
7366 #ifdef VLAN_SUPPORT
7367 #endif
7368 #ifdef VLAN_SUPPORT
7369 #endif
7370 #ifndef MODULE
7371 #endif
7372 #ifdef ZEROCOPY
7373 #endif
7374 #ifdef VLAN_SUPPORT
7375 #endif
7376 #ifdef ADDR_64BITS
7377 #endif
7378 #if ! defined(final_version)
7379 #endif
7380 #ifdef VLAN_SUPPORT
7381 #endif
7382 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7383 #endif
7384 /* LDV_COMMENT_END_PREP */
7385 /* LDV_COMMENT_FUNCTION_CALL */
7386 ldv_handler_precall();
7387 intr_handler( var_intr_handler_10_p0, var_intr_handler_10_p1);
7388 /* LDV_COMMENT_BEGIN_PREP */
7389 #ifndef final_version
7390 #endif
7391 #ifdef VLAN_SUPPORT
7392 #endif
7393 #ifdef VLAN_SUPPORT
7394 #endif
7395 #ifdef VLAN_SUPPORT
7396 #endif
7397 #ifdef VLAN_SUPPORT
7398 #endif
7399 #ifdef CONFIG_PM
7400 #endif
7401 #ifdef CONFIG_PM
7402 #endif
7403 #ifdef MODULE
7404 #endif
7405 /* LDV_COMMENT_END_PREP */
7406 LDV_IN_INTERRUPT=1;
7407
7408
7409
7410 }
7411
7412 break;
7413 default: break;
7414
7415 }
7416
7417 }
7418
7419 ldv_module_exit:
7420
7421 /** INIT: init_type: ST_MODULE_EXIT **/
7422 /* content: static void __exit starfire_cleanup (void)*/
7423 /* LDV_COMMENT_BEGIN_PREP */
7424 #define DRV_NAME "starfire"
7425 #define DRV_VERSION "2.1"
7426 #define DRV_RELDATE "July 6, 2008"
7427 #define HAS_BROKEN_FIRMWARE
7428 #ifdef HAS_BROKEN_FIRMWARE
7429 #define PADDING_MASK 3
7430 #endif
7431 #define ZEROCOPY
7432 #if IS_ENABLED(CONFIG_VLAN_8021Q)
7433 #define VLAN_SUPPORT
7434 #endif
7435 #define PKT_BUF_SZ 1536
7436 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
7437 #else
7438 #endif
7439 #ifdef __sparc__
7440 #define DMA_BURST_SIZE 64
7441 #else
7442 #define DMA_BURST_SIZE 128
7443 #endif
7444 #define RX_RING_SIZE 256
7445 #define TX_RING_SIZE 32
7446 #define DONE_Q_SIZE 1024
7447 #define QUEUE_ALIGN 256
7448 #if RX_RING_SIZE > 256
7449 #define RX_Q_ENTRIES Rx2048QEntries
7450 #else
7451 #define RX_Q_ENTRIES Rx256QEntries
7452 #endif
7453 #define TX_TIMEOUT (2 * HZ)
7454 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7455 #define ADDR_64BITS
7456 #define netdrv_addr_t __le64
7457 #define cpu_to_dma(x) cpu_to_le64(x)
7458 #define dma_to_cpu(x) le64_to_cpu(x)
7459 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
7460 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
7461 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
7462 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
7463 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
7464 #else
7465 #define netdrv_addr_t __le32
7466 #define cpu_to_dma(x) cpu_to_le32(x)
7467 #define dma_to_cpu(x) le32_to_cpu(x)
7468 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
7469 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
7470 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
7471 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
7472 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
7473 #endif
7474 #define skb_first_frag_len(skb) skb_headlen(skb)
7475 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
7476 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
7477 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
7478 #ifdef VLAN_SUPPORT
7479 #define RxComplType RxComplType3
7480 #else
7481 #define RxComplType RxComplType2
7482 #endif
7483 #ifdef ADDR_64BITS
7484 #define TX_DESC_TYPE TxDescType2
7485 #else
7486 #define TX_DESC_TYPE TxDescType1
7487 #endif
7488 #define TX_DESC_SPACING TxDescSpaceUnlim
7489 #if 0
7490 #endif
7491 #define PHY_CNT 2
7492 #ifdef VLAN_SUPPORT
7493 #endif
7494 #ifdef VLAN_SUPPORT
7495 #endif
7496 #ifdef VLAN_SUPPORT
7497 #endif
7498 #ifndef MODULE
7499 #endif
7500 #ifdef ZEROCOPY
7501 #endif
7502 #ifdef VLAN_SUPPORT
7503 #endif
7504 #ifdef ADDR_64BITS
7505 #endif
7506 #if ! defined(final_version)
7507 #endif
7508 #ifdef VLAN_SUPPORT
7509 #endif
7510 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
7511 #endif
7512 #ifndef final_version
7513 #endif
7514 #ifdef VLAN_SUPPORT
7515 #endif
7516 #ifdef VLAN_SUPPORT
7517 #endif
7518 #ifdef VLAN_SUPPORT
7519 #endif
7520 #ifdef VLAN_SUPPORT
7521 #endif
7522 #ifdef CONFIG_PM
7523 #endif
7524 #ifdef CONFIG_PM
7525 #endif
7526 #ifdef MODULE
7527 #endif
7528 /* LDV_COMMENT_END_PREP */
7529 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
7530 ldv_handler_precall();
7531 starfire_cleanup();
7532
7533 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
7534 ldv_final: ldv_check_final_state();
7535
7536 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
7537 return;
7538
7539 }
7540 #endif
7541
7542 /* LDV_COMMENT_END_MAIN */
7543
7544 #line 10 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.10-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.10-rc1.tar.xz/csd_deg_dscv/12798/dscv_tempdir/dscv/ri/331_1a/drivers/net/ethernet/adaptec/starfire.o.c.prepared" 1
2 #include <verifier/rcv.h>
3 #include <kernel-model/ERR.inc>
4
5 int LDV_DMA_MAP_CALLS = 0;
6
7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
8 void ldv_dma_map_page(void) {
9 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
10 ldv_assert(LDV_DMA_MAP_CALLS == 0);
11 /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
12 LDV_DMA_MAP_CALLS++;
13 }
14
15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
16 void ldv_dma_mapping_error(void) {
17 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
18 ldv_assert(LDV_DMA_MAP_CALLS != 0);
19 /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
20 LDV_DMA_MAP_CALLS--;
21 }
22
23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
24 void ldv_check_final_state(void) {
25 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
26 ldv_assert(LDV_DMA_MAP_CALLS == 0);
27 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 /*
2 * device.h - generic, centralized driver model
3 *
4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
6 * Copyright (c) 2008-2009 Novell Inc.
7 *
8 * This file is released under the GPLv2
9 *
10 * See Documentation/driver-model/ for more information.
11 */
12
13 #ifndef _DEVICE_H_
14 #define _DEVICE_H_
15
16 #include <linux/ioport.h>
17 #include <linux/kobject.h>
18 #include <linux/klist.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/compiler.h>
22 #include <linux/types.h>
23 #include <linux/mutex.h>
24 #include <linux/pinctrl/devinfo.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/ratelimit.h>
28 #include <linux/uidgid.h>
29 #include <linux/gfp.h>
30 #include <asm/device.h>
31
32 struct device;
33 struct device_private;
34 struct device_driver;
35 struct driver_private;
36 struct module;
37 struct class;
38 struct subsys_private;
39 struct bus_type;
40 struct device_node;
41 struct fwnode_handle;
42 struct iommu_ops;
43 struct iommu_group;
44 struct iommu_fwspec;
45
46 struct bus_attribute {
47 struct attribute attr;
48 ssize_t (*show)(struct bus_type *bus, char *buf);
49 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
50 };
51
52 #define BUS_ATTR(_name, _mode, _show, _store) \
53 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
54 #define BUS_ATTR_RW(_name) \
55 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
56 #define BUS_ATTR_RO(_name) \
57 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
58
59 extern int __must_check bus_create_file(struct bus_type *,
60 struct bus_attribute *);
61 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
62
63 /**
64 * struct bus_type - The bus type of the device
65 *
66 * @name: The name of the bus.
67 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
68 * @dev_root: Default device to use as the parent.
69 * @dev_attrs: Default attributes of the devices on the bus.
70 * @bus_groups: Default attributes of the bus.
71 * @dev_groups: Default attributes of the devices on the bus.
72 * @drv_groups: Default attributes of the device drivers on the bus.
73 * @match: Called, perhaps multiple times, whenever a new device or driver
74 * is added for this bus. It should return a positive value if the
75 * given device can be handled by the given driver and zero
76 * otherwise. It may also return error code if determining that
77 * the driver supports the device is not possible. In case of
78 * -EPROBE_DEFER it will queue the device for deferred probing.
79 * @uevent: Called when a device is added, removed, or a few other things
80 * that generate uevents to add the environment variables.
81 * @probe: Called when a new device or driver add to this bus, and callback
82 * the specific driver's probe to initial the matched device.
83 * @remove: Called when a device removed from this bus.
84 * @shutdown: Called at shut-down time to quiesce the device.
85 *
86 * @online: Called to put the device back online (after offlining it).
87 * @offline: Called to put the device offline for hot-removal. May fail.
88 *
89 * @suspend: Called when a device on this bus wants to go to sleep mode.
90 * @resume: Called to bring a device on this bus out of sleep mode.
91 * @pm: Power management operations of this bus, callback the specific
92 * device driver's pm-ops.
93 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
94 * driver implementations to a bus and allow the driver to do
95 * bus-specific setup
96 * @p: The private data of the driver core, only the driver core can
97 * touch this.
98 * @lock_key: Lock class key for use by the lock validator
99 *
100 * A bus is a channel between the processor and one or more devices. For the
101 * purposes of the device model, all devices are connected via a bus, even if
102 * it is an internal, virtual, "platform" bus. Buses can plug into each other.
103 * A USB controller is usually a PCI device, for example. The device model
104 * represents the actual connections between buses and the devices they control.
105 * A bus is represented by the bus_type structure. It contains the name, the
106 * default attributes, the bus' methods, PM operations, and the driver core's
107 * private data.
108 */
109 struct bus_type {
110 const char *name;
111 const char *dev_name;
112 struct device *dev_root;
113 struct device_attribute *dev_attrs; /* use dev_groups instead */
114 const struct attribute_group **bus_groups;
115 const struct attribute_group **dev_groups;
116 const struct attribute_group **drv_groups;
117
118 int (*match)(struct device *dev, struct device_driver *drv);
119 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
120 int (*probe)(struct device *dev);
121 int (*remove)(struct device *dev);
122 void (*shutdown)(struct device *dev);
123
124 int (*online)(struct device *dev);
125 int (*offline)(struct device *dev);
126
127 int (*suspend)(struct device *dev, pm_message_t state);
128 int (*resume)(struct device *dev);
129
130 const struct dev_pm_ops *pm;
131
132 const struct iommu_ops *iommu_ops;
133
134 struct subsys_private *p;
135 struct lock_class_key lock_key;
136 };
137
138 extern int __must_check bus_register(struct bus_type *bus);
139
140 extern void bus_unregister(struct bus_type *bus);
141
142 extern int __must_check bus_rescan_devices(struct bus_type *bus);
143
144 /* iterator helpers for buses */
145 struct subsys_dev_iter {
146 struct klist_iter ki;
147 const struct device_type *type;
148 };
149 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
150 struct bus_type *subsys,
151 struct device *start,
152 const struct device_type *type);
153 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
154 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
155
156 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
157 int (*fn)(struct device *dev, void *data));
158 struct device *bus_find_device(struct bus_type *bus, struct device *start,
159 void *data,
160 int (*match)(struct device *dev, void *data));
161 struct device *bus_find_device_by_name(struct bus_type *bus,
162 struct device *start,
163 const char *name);
164 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
165 struct device *hint);
166 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
167 void *data, int (*fn)(struct device_driver *, void *));
168 void bus_sort_breadthfirst(struct bus_type *bus,
169 int (*compare)(const struct device *a,
170 const struct device *b));
171 /*
172 * Bus notifiers: Get notified of addition/removal of devices
173 * and binding/unbinding of drivers to devices.
174 * In the long run, it should be a replacement for the platform
175 * notify hooks.
176 */
177 struct notifier_block;
178
179 extern int bus_register_notifier(struct bus_type *bus,
180 struct notifier_block *nb);
181 extern int bus_unregister_notifier(struct bus_type *bus,
182 struct notifier_block *nb);
183
184 /* All 4 notifers below get called with the target struct device *
185 * as an argument. Note that those functions are likely to be called
186 * with the device lock held in the core, so be careful.
187 */
188 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
189 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
190 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
191 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
192 bound */
193 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
194 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
195 unbound */
196 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
197 from the device */
198 #define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
199
200 extern struct kset *bus_get_kset(struct bus_type *bus);
201 extern struct klist *bus_get_device_klist(struct bus_type *bus);
202
203 /**
204 * enum probe_type - device driver probe type to try
205 * Device drivers may opt in for special handling of their
206 * respective probe routines. This tells the core what to
207 * expect and prefer.
208 *
209 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
210 * whether probed synchronously or asynchronously.
211 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
212 * probing order is not essential for booting the system may
213 * opt into executing their probes asynchronously.
214 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
215 * their probe routines to run synchronously with driver and
216 * device registration (with the exception of -EPROBE_DEFER
217 * handling - re-probing always ends up being done asynchronously).
218 *
219 * Note that the end goal is to switch the kernel to use asynchronous
220 * probing by default, so annotating drivers with
221 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
222 * to speed up boot process while we are validating the rest of the
223 * drivers.
224 */
225 enum probe_type {
226 PROBE_DEFAULT_STRATEGY,
227 PROBE_PREFER_ASYNCHRONOUS,
228 PROBE_FORCE_SYNCHRONOUS,
229 };
230
231 /**
232 * struct device_driver - The basic device driver structure
233 * @name: Name of the device driver.
234 * @bus: The bus which the device of this driver belongs to.
235 * @owner: The module owner.
236 * @mod_name: Used for built-in modules.
237 * @suppress_bind_attrs: Disables bind/unbind via sysfs.
238 * @probe_type: Type of the probe (synchronous or asynchronous) to use.
239 * @of_match_table: The open firmware table.
240 * @acpi_match_table: The ACPI match table.
241 * @probe: Called to query the existence of a specific device,
242 * whether this driver can work with it, and bind the driver
243 * to a specific device.
244 * @remove: Called when the device is removed from the system to
245 * unbind a device from this driver.
246 * @shutdown: Called at shut-down time to quiesce the device.
247 * @suspend: Called to put the device to sleep mode. Usually to a
248 * low power state.
249 * @resume: Called to bring a device from sleep mode.
250 * @groups: Default attributes that get created by the driver core
251 * automatically.
252 * @pm: Power management operations of the device which matched
253 * this driver.
254 * @p: Driver core's private data, no one other than the driver
255 * core can touch this.
256 *
257 * The device driver-model tracks all of the drivers known to the system.
258 * The main reason for this tracking is to enable the driver core to match
259 * up drivers with new devices. Once drivers are known objects within the
260 * system, however, a number of other things become possible. Device drivers
261 * can export information and configuration variables that are independent
262 * of any specific device.
263 */
264 struct device_driver {
265 const char *name;
266 struct bus_type *bus;
267
268 struct module *owner;
269 const char *mod_name; /* used for built-in modules */
270
271 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
272 enum probe_type probe_type;
273
274 const struct of_device_id *of_match_table;
275 const struct acpi_device_id *acpi_match_table;
276
277 int (*probe) (struct device *dev);
278 int (*remove) (struct device *dev);
279 void (*shutdown) (struct device *dev);
280 int (*suspend) (struct device *dev, pm_message_t state);
281 int (*resume) (struct device *dev);
282 const struct attribute_group **groups;
283
284 const struct dev_pm_ops *pm;
285
286 struct driver_private *p;
287 };
288
289
290 extern int __must_check driver_register(struct device_driver *drv);
291 extern void driver_unregister(struct device_driver *drv);
292
293 extern struct device_driver *driver_find(const char *name,
294 struct bus_type *bus);
295 extern int driver_probe_done(void);
296 extern void wait_for_device_probe(void);
297
298
299 /* sysfs interface for exporting driver attributes */
300
301 struct driver_attribute {
302 struct attribute attr;
303 ssize_t (*show)(struct device_driver *driver, char *buf);
304 ssize_t (*store)(struct device_driver *driver, const char *buf,
305 size_t count);
306 };
307
308 #define DRIVER_ATTR(_name, _mode, _show, _store) \
309 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
310 #define DRIVER_ATTR_RW(_name) \
311 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
312 #define DRIVER_ATTR_RO(_name) \
313 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
314 #define DRIVER_ATTR_WO(_name) \
315 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
316
317 extern int __must_check driver_create_file(struct device_driver *driver,
318 const struct driver_attribute *attr);
319 extern void driver_remove_file(struct device_driver *driver,
320 const struct driver_attribute *attr);
321
322 extern int __must_check driver_for_each_device(struct device_driver *drv,
323 struct device *start,
324 void *data,
325 int (*fn)(struct device *dev,
326 void *));
327 struct device *driver_find_device(struct device_driver *drv,
328 struct device *start, void *data,
329 int (*match)(struct device *dev, void *data));
330
331 /**
332 * struct subsys_interface - interfaces to device functions
333 * @name: name of the device function
334 * @subsys: subsytem of the devices to attach to
335 * @node: the list of functions registered at the subsystem
336 * @add_dev: device hookup to device function handler
337 * @remove_dev: device hookup to device function handler
338 *
339 * Simple interfaces attached to a subsystem. Multiple interfaces can
340 * attach to a subsystem and its devices. Unlike drivers, they do not
341 * exclusively claim or control devices. Interfaces usually represent
342 * a specific functionality of a subsystem/class of devices.
343 */
344 struct subsys_interface {
345 const char *name;
346 struct bus_type *subsys;
347 struct list_head node;
348 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
349 void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
350 };
351
352 int subsys_interface_register(struct subsys_interface *sif);
353 void subsys_interface_unregister(struct subsys_interface *sif);
354
355 int subsys_system_register(struct bus_type *subsys,
356 const struct attribute_group **groups);
357 int subsys_virtual_register(struct bus_type *subsys,
358 const struct attribute_group **groups);
359
360 /**
361 * struct class - device classes
362 * @name: Name of the class.
363 * @owner: The module owner.
364 * @class_attrs: Default attributes of this class.
365 * @class_groups: Default attributes of this class.
366 * @dev_groups: Default attributes of the devices that belong to the class.
367 * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
368 * @dev_uevent: Called when a device is added, removed from this class, or a
369 * few other things that generate uevents to add the environment
370 * variables.
371 * @devnode: Callback to provide the devtmpfs.
372 * @class_release: Called to release this class.
373 * @dev_release: Called to release the device.
374 * @suspend: Used to put the device to sleep mode, usually to a low power
375 * state.
376 * @resume: Used to bring the device from the sleep mode.
377 * @ns_type: Callbacks so sysfs can detemine namespaces.
378 * @namespace: Namespace of the device belongs to this class.
379 * @pm: The default device power management operations of this class.
380 * @p: The private data of the driver core, no one other than the
381 * driver core can touch this.
382 *
383 * A class is a higher-level view of a device that abstracts out low-level
384 * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
385 * at the class level, they are all simply disks. Classes allow user space
386 * to work with devices based on what they do, rather than how they are
387 * connected or how they work.
388 */
389 struct class {
390 const char *name;
391 struct module *owner;
392
393 struct class_attribute *class_attrs;
394 const struct attribute_group **class_groups;
395 const struct attribute_group **dev_groups;
396 struct kobject *dev_kobj;
397
398 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
399 char *(*devnode)(struct device *dev, umode_t *mode);
400
401 void (*class_release)(struct class *class);
402 void (*dev_release)(struct device *dev);
403
404 int (*suspend)(struct device *dev, pm_message_t state);
405 int (*resume)(struct device *dev);
406
407 const struct kobj_ns_type_operations *ns_type;
408 const void *(*namespace)(struct device *dev);
409
410 const struct dev_pm_ops *pm;
411
412 struct subsys_private *p;
413 };
414
415 struct class_dev_iter {
416 struct klist_iter ki;
417 const struct device_type *type;
418 };
419
420 extern struct kobject *sysfs_dev_block_kobj;
421 extern struct kobject *sysfs_dev_char_kobj;
422 extern int __must_check __class_register(struct class *class,
423 struct lock_class_key *key);
424 extern void class_unregister(struct class *class);
425
426 /* This is a #define to keep the compiler from merging different
427 * instances of the __key variable */
428 #define class_register(class) \
429 ({ \
430 static struct lock_class_key __key; \
431 __class_register(class, &__key); \
432 })
433
434 struct class_compat;
435 struct class_compat *class_compat_register(const char *name);
436 void class_compat_unregister(struct class_compat *cls);
437 int class_compat_create_link(struct class_compat *cls, struct device *dev,
438 struct device *device_link);
439 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
440 struct device *device_link);
441
442 extern void class_dev_iter_init(struct class_dev_iter *iter,
443 struct class *class,
444 struct device *start,
445 const struct device_type *type);
446 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
447 extern void class_dev_iter_exit(struct class_dev_iter *iter);
448
449 extern int class_for_each_device(struct class *class, struct device *start,
450 void *data,
451 int (*fn)(struct device *dev, void *data));
452 extern struct device *class_find_device(struct class *class,
453 struct device *start, const void *data,
454 int (*match)(struct device *, const void *));
455
456 struct class_attribute {
457 struct attribute attr;
458 ssize_t (*show)(struct class *class, struct class_attribute *attr,
459 char *buf);
460 ssize_t (*store)(struct class *class, struct class_attribute *attr,
461 const char *buf, size_t count);
462 };
463
464 #define CLASS_ATTR(_name, _mode, _show, _store) \
465 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
466 #define CLASS_ATTR_RW(_name) \
467 struct class_attribute class_attr_##_name = __ATTR_RW(_name)
468 #define CLASS_ATTR_RO(_name) \
469 struct class_attribute class_attr_##_name = __ATTR_RO(_name)
470 #define CLASS_ATTR_WO(_name) \
471 struct class_attribute class_attr_##_name = __ATTR_WO(_name)
472
473 extern int __must_check class_create_file_ns(struct class *class,
474 const struct class_attribute *attr,
475 const void *ns);
476 extern void class_remove_file_ns(struct class *class,
477 const struct class_attribute *attr,
478 const void *ns);
479
480 static inline int __must_check class_create_file(struct class *class,
481 const struct class_attribute *attr)
482 {
483 return class_create_file_ns(class, attr, NULL);
484 }
485
486 static inline void class_remove_file(struct class *class,
487 const struct class_attribute *attr)
488 {
489 return class_remove_file_ns(class, attr, NULL);
490 }
491
492 /* Simple class attribute that is just a static string */
493 struct class_attribute_string {
494 struct class_attribute attr;
495 char *str;
496 };
497
498 /* Currently read-only only */
499 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
500 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
501 #define CLASS_ATTR_STRING(_name, _mode, _str) \
502 struct class_attribute_string class_attr_##_name = \
503 _CLASS_ATTR_STRING(_name, _mode, _str)
504
505 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
506 char *buf);
507
508 struct class_interface {
509 struct list_head node;
510 struct class *class;
511
512 int (*add_dev) (struct device *, struct class_interface *);
513 void (*remove_dev) (struct device *, struct class_interface *);
514 };
515
516 extern int __must_check class_interface_register(struct class_interface *);
517 extern void class_interface_unregister(struct class_interface *);
518
519 extern struct class * __must_check __class_create(struct module *owner,
520 const char *name,
521 struct lock_class_key *key);
522 extern void class_destroy(struct class *cls);
523
524 /* This is a #define to keep the compiler from merging different
525 * instances of the __key variable */
526 #define class_create(owner, name) \
527 ({ \
528 static struct lock_class_key __key; \
529 __class_create(owner, name, &__key); \
530 })
531
532 /*
533 * The type of device, "struct device" is embedded in. A class
534 * or bus can contain devices of different types
535 * like "partitions" and "disks", "mouse" and "event".
536 * This identifies the device type and carries type-specific
537 * information, equivalent to the kobj_type of a kobject.
538 * If "name" is specified, the uevent will contain it in
539 * the DEVTYPE variable.
540 */
541 struct device_type {
542 const char *name;
543 const struct attribute_group **groups;
544 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
545 char *(*devnode)(struct device *dev, umode_t *mode,
546 kuid_t *uid, kgid_t *gid);
547 void (*release)(struct device *dev);
548
549 const struct dev_pm_ops *pm;
550 };
551
552 /* interface for exporting device attributes */
553 struct device_attribute {
554 struct attribute attr;
555 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
556 char *buf);
557 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
558 const char *buf, size_t count);
559 };
560
561 struct dev_ext_attribute {
562 struct device_attribute attr;
563 void *var;
564 };
565
566 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
567 char *buf);
568 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
569 const char *buf, size_t count);
570 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
571 char *buf);
572 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
573 const char *buf, size_t count);
574 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
575 char *buf);
576 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
577 const char *buf, size_t count);
578
579 #define DEVICE_ATTR(_name, _mode, _show, _store) \
580 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
581 #define DEVICE_ATTR_RW(_name) \
582 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
583 #define DEVICE_ATTR_RO(_name) \
584 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
585 #define DEVICE_ATTR_WO(_name) \
586 struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
587 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
588 struct dev_ext_attribute dev_attr_##_name = \
589 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
590 #define DEVICE_INT_ATTR(_name, _mode, _var) \
591 struct dev_ext_attribute dev_attr_##_name = \
592 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
593 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
594 struct dev_ext_attribute dev_attr_##_name = \
595 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
596 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
597 struct device_attribute dev_attr_##_name = \
598 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
599
600 extern int device_create_file(struct device *device,
601 const struct device_attribute *entry);
602 extern void device_remove_file(struct device *dev,
603 const struct device_attribute *attr);
604 extern bool device_remove_file_self(struct device *dev,
605 const struct device_attribute *attr);
606 extern int __must_check device_create_bin_file(struct device *dev,
607 const struct bin_attribute *attr);
608 extern void device_remove_bin_file(struct device *dev,
609 const struct bin_attribute *attr);
610
611 /* device resource management */
612 typedef void (*dr_release_t)(struct device *dev, void *res);
613 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
614
615 #ifdef CONFIG_DEBUG_DEVRES
616 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
617 int nid, const char *name) __malloc;
618 #define devres_alloc(release, size, gfp) \
619 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
620 #define devres_alloc_node(release, size, gfp, nid) \
621 __devres_alloc_node(release, size, gfp, nid, #release)
622 #else
623 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
624 int nid) __malloc;
625 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
626 {
627 return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
628 }
629 #endif
630
631 extern void devres_for_each_res(struct device *dev, dr_release_t release,
632 dr_match_t match, void *match_data,
633 void (*fn)(struct device *, void *, void *),
634 void *data);
635 extern void devres_free(void *res);
636 extern void devres_add(struct device *dev, void *res);
637 extern void *devres_find(struct device *dev, dr_release_t release,
638 dr_match_t match, void *match_data);
639 extern void *devres_get(struct device *dev, void *new_res,
640 dr_match_t match, void *match_data);
641 extern void *devres_remove(struct device *dev, dr_release_t release,
642 dr_match_t match, void *match_data);
643 extern int devres_destroy(struct device *dev, dr_release_t release,
644 dr_match_t match, void *match_data);
645 extern int devres_release(struct device *dev, dr_release_t release,
646 dr_match_t match, void *match_data);
647
648 /* devres group */
649 extern void * __must_check devres_open_group(struct device *dev, void *id,
650 gfp_t gfp);
651 extern void devres_close_group(struct device *dev, void *id);
652 extern void devres_remove_group(struct device *dev, void *id);
653 extern int devres_release_group(struct device *dev, void *id);
654
655 /* managed devm_k.alloc/kfree for device drivers */
656 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
657 extern __printf(3, 0)
658 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
659 va_list ap) __malloc;
660 extern __printf(3, 4)
661 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
662 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
663 {
664 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
665 }
666 static inline void *devm_kmalloc_array(struct device *dev,
667 size_t n, size_t size, gfp_t flags)
668 {
669 if (size != 0 && n > SIZE_MAX / size)
670 return NULL;
671 return devm_kmalloc(dev, n * size, flags);
672 }
673 static inline void *devm_kcalloc(struct device *dev,
674 size_t n, size_t size, gfp_t flags)
675 {
676 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
677 }
678 extern void devm_kfree(struct device *dev, void *p);
679 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
680 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
681 gfp_t gfp);
682
683 extern unsigned long devm_get_free_pages(struct device *dev,
684 gfp_t gfp_mask, unsigned int order);
685 extern void devm_free_pages(struct device *dev, unsigned long addr);
686
687 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
688
689 /* allows to add/remove a custom action to devres stack */
690 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
691 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
692
693 static inline int devm_add_action_or_reset(struct device *dev,
694 void (*action)(void *), void *data)
695 {
696 int ret;
697
698 ret = devm_add_action(dev, action, data);
699 if (ret)
700 action(data);
701
702 return ret;
703 }
704
705 /**
706 * devm_alloc_percpu - Resource-managed alloc_percpu
707 * @dev: Device to allocate per-cpu memory for
708 * @type: Type to allocate per-cpu memory for
709 *
710 * Managed alloc_percpu. Per-cpu memory allocated with this function is
711 * automatically freed on driver detach.
712 *
713 * RETURNS:
714 * Pointer to allocated memory on success, NULL on failure.
715 */
716 #define devm_alloc_percpu(dev, type) \
717 ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
718 __alignof__(type)))
719
720 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
721 size_t align);
722 void devm_free_percpu(struct device *dev, void __percpu *pdata);
723
724 struct device_dma_parameters {
725 /*
726 * a low level driver may set these to teach IOMMU code about
727 * sg limitations.
728 */
729 unsigned int max_segment_size;
730 unsigned long segment_boundary_mask;
731 };
732
733 /**
734 * enum device_link_state - Device link states.
735 * @DL_STATE_NONE: The presence of the drivers is not being tracked.
736 * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
737 * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
738 * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
739 * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
740 * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
741 */
742 enum device_link_state {
743 DL_STATE_NONE = -1,
744 DL_STATE_DORMANT = 0,
745 DL_STATE_AVAILABLE,
746 DL_STATE_CONSUMER_PROBE,
747 DL_STATE_ACTIVE,
748 DL_STATE_SUPPLIER_UNBIND,
749 };
750
751 /*
752 * Device link flags.
753 *
754 * STATELESS: The core won't track the presence of supplier/consumer drivers.
755 * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
756 * PM_RUNTIME: If set, the runtime PM framework will use this link.
757 * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
758 */
759 #define DL_FLAG_STATELESS BIT(0)
760 #define DL_FLAG_AUTOREMOVE BIT(1)
761 #define DL_FLAG_PM_RUNTIME BIT(2)
762 #define DL_FLAG_RPM_ACTIVE BIT(3)
763
764 /**
765 * struct device_link - Device link representation.
766 * @supplier: The device on the supplier end of the link.
767 * @s_node: Hook to the supplier device's list of links to consumers.
768 * @consumer: The device on the consumer end of the link.
769 * @c_node: Hook to the consumer device's list of links to suppliers.
770 * @status: The state of the link (with respect to the presence of drivers).
771 * @flags: Link flags.
772 * @rpm_active: Whether or not the consumer device is runtime-PM-active.
773 * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
774 */
775 struct device_link {
776 struct device *supplier;
777 struct list_head s_node;
778 struct device *consumer;
779 struct list_head c_node;
780 enum device_link_state status;
781 u32 flags;
782 bool rpm_active;
783 #ifdef CONFIG_SRCU
784 struct rcu_head rcu_head;
785 #endif
786 };
787
788 /**
789 * enum dl_dev_state - Device driver presence tracking information.
790 * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
791 * @DL_DEV_PROBING: A driver is probing.
792 * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
793 * @DL_DEV_UNBINDING: The driver is unbinding from the device.
794 */
795 enum dl_dev_state {
796 DL_DEV_NO_DRIVER = 0,
797 DL_DEV_PROBING,
798 DL_DEV_DRIVER_BOUND,
799 DL_DEV_UNBINDING,
800 };
801
802 /**
803 * struct dev_links_info - Device data related to device links.
804 * @suppliers: List of links to supplier devices.
805 * @consumers: List of links to consumer devices.
806 * @status: Driver status information.
807 */
808 struct dev_links_info {
809 struct list_head suppliers;
810 struct list_head consumers;
811 enum dl_dev_state status;
812 };
813
814 /**
815 * struct device - The basic device structure
816 * @parent: The device's "parent" device, the device to which it is attached.
817 * In most cases, a parent device is some sort of bus or host
818 * controller. If parent is NULL, the device, is a top-level device,
819 * which is not usually what you want.
820 * @p: Holds the private data of the driver core portions of the device.
821 * See the comment of the struct device_private for detail.
822 * @kobj: A top-level, abstract class from which other classes are derived.
823 * @init_name: Initial name of the device.
824 * @type: The type of device.
825 * This identifies the device type and carries type-specific
826 * information.
827 * @mutex: Mutex to synchronize calls to its driver.
828 * @bus: Type of bus device is on.
829 * @driver: Which driver has allocated this
830 * @platform_data: Platform data specific to the device.
831 * Example: For devices on custom boards, as typical of embedded
832 * and SOC based hardware, Linux often uses platform_data to point
833 * to board-specific structures describing devices and how they
834 * are wired. That can include what ports are available, chip
835 * variants, which GPIO pins act in what additional roles, and so
836 * on. This shrinks the "Board Support Packages" (BSPs) and
837 * minimizes board-specific #ifdefs in drivers.
838 * @driver_data: Private pointer for driver specific info.
839 * @links: Links to suppliers and consumers of this device.
840 * @power: For device power management.
841 * See Documentation/power/admin-guide/devices.rst for details.
842 * @pm_domain: Provide callbacks that are executed during system suspend,
843 * hibernation, system resume and during runtime PM transitions
844 * along with subsystem-level and driver-level callbacks.
845 * @pins: For device pin management.
846 * See Documentation/pinctrl.txt for details.
847 * @msi_list: Hosts MSI descriptors
848 * @msi_domain: The generic MSI domain this device is using.
849 * @numa_node: NUMA node this device is close to.
850 * @dma_mask: Dma mask (if dma'ble device).
851 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
852 * hardware supports 64-bit addresses for consistent allocations
853 * such descriptors.
854 * @dma_pfn_offset: offset of DMA memory range relatively of RAM
855 * @dma_parms: A low level driver may set these to teach IOMMU code about
856 * segment limitations.
857 * @dma_pools: Dma pools (if dma'ble device).
858 * @dma_mem: Internal for coherent mem override.
859 * @cma_area: Contiguous memory area for dma allocations
860 * @archdata: For arch-specific additions.
861 * @of_node: Associated device tree node.
862 * @fwnode: Associated device node supplied by platform firmware.
863 * @devt: For creating the sysfs "dev".
864 * @id: device instance
865 * @devres_lock: Spinlock to protect the resource of the device.
866 * @devres_head: The resources list of the device.
867 * @knode_class: The node used to add the device to the class list.
868 * @class: The class of the device.
869 * @groups: Optional attribute groups.
870 * @release: Callback to free the device after all references have
871 * gone away. This should be set by the allocator of the
872 * device (i.e. the bus driver that discovered the device).
873 * @iommu_group: IOMMU group the device belongs to.
874 * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
875 *
876 * @offline_disabled: If set, the device is permanently online.
877 * @offline: Set after successful invocation of bus type's .offline().
878 *
879 * At the lowest level, every device in a Linux system is represented by an
880 * instance of struct device. The device structure contains the information
881 * that the device model core needs to model the system. Most subsystems,
882 * however, track additional information about the devices they host. As a
883 * result, it is rare for devices to be represented by bare device structures;
884 * instead, that structure, like kobject structures, is usually embedded within
885 * a higher-level representation of the device.
886 */
887 struct device {
888 struct device *parent;
889
890 struct device_private *p;
891
892 struct kobject kobj;
893 const char *init_name; /* initial name of the device */
894 const struct device_type *type;
895
896 struct mutex mutex; /* mutex to synchronize calls to
897 * its driver.
898 */
899
900 struct bus_type *bus; /* type of bus device is on */
901 struct device_driver *driver; /* which driver has allocated this
902 device */
903 void *platform_data; /* Platform specific data, device
904 core doesn't touch it */
905 void *driver_data; /* Driver data, set and get with
906 dev_set/get_drvdata */
907 struct dev_links_info links;
908 struct dev_pm_info power;
909 struct dev_pm_domain *pm_domain;
910
911 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
912 struct irq_domain *msi_domain;
913 #endif
914 #ifdef CONFIG_PINCTRL
915 struct dev_pin_info *pins;
916 #endif
917 #ifdef CONFIG_GENERIC_MSI_IRQ
918 struct list_head msi_list;
919 #endif
920
921 #ifdef CONFIG_NUMA
922 int numa_node; /* NUMA node this device is close to */
923 #endif
924 u64 *dma_mask; /* dma mask (if dma'able device) */
925 u64 coherent_dma_mask;/* Like dma_mask, but for
926 alloc_coherent mappings as
927 not all hardware supports
928 64 bit addresses for consistent
929 allocations such descriptors. */
930 unsigned long dma_pfn_offset;
931
932 struct device_dma_parameters *dma_parms;
933
934 struct list_head dma_pools; /* dma pools (if dma'ble) */
935
936 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
937 override */
938 #ifdef CONFIG_DMA_CMA
939 struct cma *cma_area; /* contiguous memory area for dma
940 allocations */
941 #endif
942 /* arch specific additions */
943 struct dev_archdata archdata;
944
945 struct device_node *of_node; /* associated device tree node */
946 struct fwnode_handle *fwnode; /* firmware device node */
947
948 dev_t devt; /* dev_t, creates the sysfs "dev" */
949 u32 id; /* device instance */
950
951 spinlock_t devres_lock;
952 struct list_head devres_head;
953
954 struct klist_node knode_class;
955 struct class *class;
956 const struct attribute_group **groups; /* optional groups */
957
958 void (*release)(struct device *dev);
959 struct iommu_group *iommu_group;
960 struct iommu_fwspec *iommu_fwspec;
961
962 bool offline_disabled:1;
963 bool offline:1;
964 };
965
966 static inline struct device *kobj_to_dev(struct kobject *kobj)
967 {
968 return container_of(kobj, struct device, kobj);
969 }
970
971 /* Get the wakeup routines, which depend on struct device */
972 #include <linux/pm_wakeup.h>
973
974 static inline const char *dev_name(const struct device *dev)
975 {
976 /* Use the init name until the kobject becomes available */
977 if (dev->init_name)
978 return dev->init_name;
979
980 return kobject_name(&dev->kobj);
981 }
982
983 extern __printf(2, 3)
984 int dev_set_name(struct device *dev, const char *name, ...);
985
986 #ifdef CONFIG_NUMA
987 static inline int dev_to_node(struct device *dev)
988 {
989 return dev->numa_node;
990 }
991 static inline void set_dev_node(struct device *dev, int node)
992 {
993 dev->numa_node = node;
994 }
995 #else
996 static inline int dev_to_node(struct device *dev)
997 {
998 return -1;
999 }
1000 static inline void set_dev_node(struct device *dev, int node)
1001 {
1002 }
1003 #endif
1004
1005 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
1006 {
1007 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
1008 return dev->msi_domain;
1009 #else
1010 return NULL;
1011 #endif
1012 }
1013
1014 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
1015 {
1016 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
1017 dev->msi_domain = d;
1018 #endif
1019 }
1020
1021 static inline void *dev_get_drvdata(const struct device *dev)
1022 {
1023 return dev->driver_data;
1024 }
1025
1026 static inline void dev_set_drvdata(struct device *dev, void *data)
1027 {
1028 dev->driver_data = data;
1029 }
1030
1031 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
1032 {
1033 return dev ? dev->power.subsys_data : NULL;
1034 }
1035
1036 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
1037 {
1038 return dev->kobj.uevent_suppress;
1039 }
1040
1041 static inline void dev_set_uevent_suppress(struct device *dev, int val)
1042 {
1043 dev->kobj.uevent_suppress = val;
1044 }
1045
1046 static inline int device_is_registered(struct device *dev)
1047 {
1048 return dev->kobj.state_in_sysfs;
1049 }
1050
1051 static inline void device_enable_async_suspend(struct device *dev)
1052 {
1053 if (!dev->power.is_prepared)
1054 dev->power.async_suspend = true;
1055 }
1056
1057 static inline void device_disable_async_suspend(struct device *dev)
1058 {
1059 if (!dev->power.is_prepared)
1060 dev->power.async_suspend = false;
1061 }
1062
1063 static inline bool device_async_suspend_enabled(struct device *dev)
1064 {
1065 return !!dev->power.async_suspend;
1066 }
1067
1068 static inline void dev_pm_syscore_device(struct device *dev, bool val)
1069 {
1070 #ifdef CONFIG_PM_SLEEP
1071 dev->power.syscore = val;
1072 #endif
1073 }
1074
1075 static inline void device_lock(struct device *dev)
1076 {
1077 mutex_lock(&dev->mutex);
1078 }
1079
1080 static inline int device_lock_interruptible(struct device *dev)
1081 {
1082 return mutex_lock_interruptible(&dev->mutex);
1083 }
1084
1085 static inline int device_trylock(struct device *dev)
1086 {
1087 return mutex_trylock(&dev->mutex);
1088 }
1089
1090 static inline void device_unlock(struct device *dev)
1091 {
1092 mutex_unlock(&dev->mutex);
1093 }
1094
1095 static inline void device_lock_assert(struct device *dev)
1096 {
1097 lockdep_assert_held(&dev->mutex);
1098 }
1099
1100 static inline struct device_node *dev_of_node(struct device *dev)
1101 {
1102 if (!IS_ENABLED(CONFIG_OF))
1103 return NULL;
1104 return dev->of_node;
1105 }
1106
1107 void driver_init(void);
1108
1109 /*
1110 * High level routines for use by the bus drivers
1111 */
1112 extern int __must_check device_register(struct device *dev);
1113 extern void device_unregister(struct device *dev);
1114 extern void device_initialize(struct device *dev);
1115 extern int __must_check device_add(struct device *dev);
1116 extern void device_del(struct device *dev);
1117 extern int device_for_each_child(struct device *dev, void *data,
1118 int (*fn)(struct device *dev, void *data));
1119 extern int device_for_each_child_reverse(struct device *dev, void *data,
1120 int (*fn)(struct device *dev, void *data));
1121 extern struct device *device_find_child(struct device *dev, void *data,
1122 int (*match)(struct device *dev, void *data));
1123 extern int device_rename(struct device *dev, const char *new_name);
1124 extern int device_move(struct device *dev, struct device *new_parent,
1125 enum dpm_order dpm_order);
1126 extern const char *device_get_devnode(struct device *dev,
1127 umode_t *mode, kuid_t *uid, kgid_t *gid,
1128 const char **tmp);
1129
1130 static inline bool device_supports_offline(struct device *dev)
1131 {
1132 return dev->bus && dev->bus->offline && dev->bus->online;
1133 }
1134
1135 extern void lock_device_hotplug(void);
1136 extern void unlock_device_hotplug(void);
1137 extern int lock_device_hotplug_sysfs(void);
1138 extern int device_offline(struct device *dev);
1139 extern int device_online(struct device *dev);
1140 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1141 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1142
1143 /*
1144 * Root device objects for grouping under /sys/devices
1145 */
1146 extern struct device *__root_device_register(const char *name,
1147 struct module *owner);
1148
1149 /* This is a macro to avoid include problems with THIS_MODULE */
1150 #define root_device_register(name) \
1151 __root_device_register(name, THIS_MODULE)
1152
1153 extern void root_device_unregister(struct device *root);
1154
1155 static inline void *dev_get_platdata(const struct device *dev)
1156 {
1157 return dev->platform_data;
1158 }
1159
1160 /*
1161 * Manual binding of a device to driver. See drivers/base/bus.c
1162 * for information on use.
1163 */
1164 extern int __must_check device_bind_driver(struct device *dev);
1165 extern void device_release_driver(struct device *dev);
1166 extern int __must_check device_attach(struct device *dev);
1167 extern int __must_check driver_attach(struct device_driver *drv);
1168 extern void device_initial_probe(struct device *dev);
1169 extern int __must_check device_reprobe(struct device *dev);
1170
1171 extern bool device_is_bound(struct device *dev);
1172
1173 /*
1174 * Easy functions for dynamically creating devices on the fly
1175 */
1176 extern __printf(5, 0)
1177 struct device *device_create_vargs(struct class *cls, struct device *parent,
1178 dev_t devt, void *drvdata,
1179 const char *fmt, va_list vargs);
1180 extern __printf(5, 6)
1181 struct device *device_create(struct class *cls, struct device *parent,
1182 dev_t devt, void *drvdata,
1183 const char *fmt, ...);
1184 extern __printf(6, 7)
1185 struct device *device_create_with_groups(struct class *cls,
1186 struct device *parent, dev_t devt, void *drvdata,
1187 const struct attribute_group **groups,
1188 const char *fmt, ...);
1189 extern void device_destroy(struct class *cls, dev_t devt);
1190
1191 /*
1192 * Platform "fixup" functions - allow the platform to have their say
1193 * about devices and actions that the general device layer doesn't
1194 * know about.
1195 */
1196 /* Notify platform of device discovery */
1197 extern int (*platform_notify)(struct device *dev);
1198
1199 extern int (*platform_notify_remove)(struct device *dev);
1200
1201
1202 /*
1203 * get_device - atomically increment the reference count for the device.
1204 *
1205 */
1206 extern struct device *get_device(struct device *dev);
1207 extern void put_device(struct device *dev);
1208
1209 #ifdef CONFIG_DEVTMPFS
1210 extern int devtmpfs_create_node(struct device *dev);
1211 extern int devtmpfs_delete_node(struct device *dev);
1212 extern int devtmpfs_mount(const char *mntdir);
1213 #else
1214 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
1215 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
1216 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
1217 #endif
1218
1219 /* drivers/base/power/shutdown.c */
1220 extern void device_shutdown(void);
1221
1222 /* debugging and troubleshooting/diagnostic helpers. */
1223 extern const char *dev_driver_string(const struct device *dev);
1224
1225 /* Device links interface. */
1226 struct device_link *device_link_add(struct device *consumer,
1227 struct device *supplier, u32 flags);
1228 void device_link_del(struct device_link *link);
1229
1230 #ifdef CONFIG_PRINTK
1231
1232 extern __printf(3, 0)
1233 int dev_vprintk_emit(int level, const struct device *dev,
1234 const char *fmt, va_list args);
1235 extern __printf(3, 4)
1236 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
1237
1238 extern __printf(3, 4)
1239 void dev_printk(const char *level, const struct device *dev,
1240 const char *fmt, ...);
1241 extern __printf(2, 3)
1242 void dev_emerg(const struct device *dev, const char *fmt, ...);
1243 extern __printf(2, 3)
1244 void dev_alert(const struct device *dev, const char *fmt, ...);
1245 extern __printf(2, 3)
1246 void dev_crit(const struct device *dev, const char *fmt, ...);
1247 extern __printf(2, 3)
1248 void dev_err(const struct device *dev, const char *fmt, ...);
1249 extern __printf(2, 3)
1250 void dev_warn(const struct device *dev, const char *fmt, ...);
1251 extern __printf(2, 3)
1252 void dev_notice(const struct device *dev, const char *fmt, ...);
1253 extern __printf(2, 3)
1254 void _dev_info(const struct device *dev, const char *fmt, ...);
1255
1256 #else
1257
1258 static inline __printf(3, 0)
1259 int dev_vprintk_emit(int level, const struct device *dev,
1260 const char *fmt, va_list args)
1261 { return 0; }
1262 static inline __printf(3, 4)
1263 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
1264 { return 0; }
1265
1266 static inline void __dev_printk(const char *level, const struct device *dev,
1267 struct va_format *vaf)
1268 {}
1269 static inline __printf(3, 4)
1270 void dev_printk(const char *level, const struct device *dev,
1271 const char *fmt, ...)
1272 {}
1273
1274 static inline __printf(2, 3)
1275 void dev_emerg(const struct device *dev, const char *fmt, ...)
1276 {}
1277 static inline __printf(2, 3)
1278 void dev_crit(const struct device *dev, const char *fmt, ...)
1279 {}
1280 static inline __printf(2, 3)
1281 void dev_alert(const struct device *dev, const char *fmt, ...)
1282 {}
1283 static inline __printf(2, 3)
1284 void dev_err(const struct device *dev, const char *fmt, ...)
1285 {}
1286 static inline __printf(2, 3)
1287 void dev_warn(const struct device *dev, const char *fmt, ...)
1288 {}
1289 static inline __printf(2, 3)
1290 void dev_notice(const struct device *dev, const char *fmt, ...)
1291 {}
1292 static inline __printf(2, 3)
1293 void _dev_info(const struct device *dev, const char *fmt, ...)
1294 {}
1295
1296 #endif
1297
1298 /*
1299 * Stupid hackaround for existing uses of non-printk uses dev_info
1300 *
1301 * Note that the definition of dev_info below is actually _dev_info
1302 * and a macro is used to avoid redefining dev_info
1303 */
1304
1305 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
1306
1307 #if defined(CONFIG_DYNAMIC_DEBUG)
1308 #define dev_dbg(dev, format, ...) \
1309 do { \
1310 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
1311 } while (0)
1312 #elif defined(DEBUG)
1313 #define dev_dbg(dev, format, arg...) \
1314 dev_printk(KERN_DEBUG, dev, format, ##arg)
1315 #else
1316 #define dev_dbg(dev, format, arg...) \
1317 ({ \
1318 if (0) \
1319 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1320 })
1321 #endif
1322
1323 #ifdef CONFIG_PRINTK
1324 #define dev_level_once(dev_level, dev, fmt, ...) \
1325 do { \
1326 static bool __print_once __read_mostly; \
1327 \
1328 if (!__print_once) { \
1329 __print_once = true; \
1330 dev_level(dev, fmt, ##__VA_ARGS__); \
1331 } \
1332 } while (0)
1333 #else
1334 #define dev_level_once(dev_level, dev, fmt, ...) \
1335 do { \
1336 if (0) \
1337 dev_level(dev, fmt, ##__VA_ARGS__); \
1338 } while (0)
1339 #endif
1340
1341 #define dev_emerg_once(dev, fmt, ...) \
1342 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
1343 #define dev_alert_once(dev, fmt, ...) \
1344 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
1345 #define dev_crit_once(dev, fmt, ...) \
1346 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
1347 #define dev_err_once(dev, fmt, ...) \
1348 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
1349 #define dev_warn_once(dev, fmt, ...) \
1350 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
1351 #define dev_notice_once(dev, fmt, ...) \
1352 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
1353 #define dev_info_once(dev, fmt, ...) \
1354 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
1355 #define dev_dbg_once(dev, fmt, ...) \
1356 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
1357
1358 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1359 do { \
1360 static DEFINE_RATELIMIT_STATE(_rs, \
1361 DEFAULT_RATELIMIT_INTERVAL, \
1362 DEFAULT_RATELIMIT_BURST); \
1363 if (__ratelimit(&_rs)) \
1364 dev_level(dev, fmt, ##__VA_ARGS__); \
1365 } while (0)
1366
1367 #define dev_emerg_ratelimited(dev, fmt, ...) \
1368 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
1369 #define dev_alert_ratelimited(dev, fmt, ...) \
1370 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
1371 #define dev_crit_ratelimited(dev, fmt, ...) \
1372 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
1373 #define dev_err_ratelimited(dev, fmt, ...) \
1374 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
1375 #define dev_warn_ratelimited(dev, fmt, ...) \
1376 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
1377 #define dev_notice_ratelimited(dev, fmt, ...) \
1378 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
1379 #define dev_info_ratelimited(dev, fmt, ...) \
1380 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
1381 #if defined(CONFIG_DYNAMIC_DEBUG)
1382 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
1383 #define dev_dbg_ratelimited(dev, fmt, ...) \
1384 do { \
1385 static DEFINE_RATELIMIT_STATE(_rs, \
1386 DEFAULT_RATELIMIT_INTERVAL, \
1387 DEFAULT_RATELIMIT_BURST); \
1388 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
1389 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
1390 __ratelimit(&_rs)) \
1391 __dynamic_dev_dbg(&descriptor, dev, fmt, \
1392 ##__VA_ARGS__); \
1393 } while (0)
1394 #elif defined(DEBUG)
1395 #define dev_dbg_ratelimited(dev, fmt, ...) \
1396 do { \
1397 static DEFINE_RATELIMIT_STATE(_rs, \
1398 DEFAULT_RATELIMIT_INTERVAL, \
1399 DEFAULT_RATELIMIT_BURST); \
1400 if (__ratelimit(&_rs)) \
1401 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1402 } while (0)
1403 #else
1404 #define dev_dbg_ratelimited(dev, fmt, ...) \
1405 do { \
1406 if (0) \
1407 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1408 } while (0)
1409 #endif
1410
1411 #ifdef VERBOSE_DEBUG
1412 #define dev_vdbg dev_dbg
1413 #else
1414 #define dev_vdbg(dev, format, arg...) \
1415 ({ \
1416 if (0) \
1417 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1418 })
1419 #endif
1420
1421 /*
1422 * dev_WARN*() acts like dev_printk(), but with the key difference of
1423 * using WARN/WARN_ONCE to include file/line information and a backtrace.
1424 */
1425 #define dev_WARN(dev, format, arg...) \
1426 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
1427
1428 #define dev_WARN_ONCE(dev, condition, format, arg...) \
1429 WARN_ONCE(condition, "%s %s: " format, \
1430 dev_driver_string(dev), dev_name(dev), ## arg)
1431
1432 /* Create alias, so I can be autoloaded. */
1433 #define MODULE_ALIAS_CHARDEV(major,minor) \
1434 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
1435 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
1436 MODULE_ALIAS("char-major-" __stringify(major) "-*")
1437
1438 #ifdef CONFIG_SYSFS_DEPRECATED
1439 extern long sysfs_deprecated;
1440 #else
1441 #define sysfs_deprecated 0
1442 #endif
1443
1444 /**
1445 * module_driver() - Helper macro for drivers that don't do anything
1446 * special in module init/exit. This eliminates a lot of boilerplate.
1447 * Each module may only use this macro once, and calling it replaces
1448 * module_init() and module_exit().
1449 *
1450 * @__driver: driver name
1451 * @__register: register function for this driver type
1452 * @__unregister: unregister function for this driver type
1453 * @...: Additional arguments to be passed to __register and __unregister.
1454 *
1455 * Use this macro to construct bus specific macros for registering
1456 * drivers, and do not use it on its own.
1457 */
1458 #define module_driver(__driver, __register, __unregister, ...) \
1459 static int __init __driver##_init(void) \
1460 { \
1461 return __register(&(__driver) , ##__VA_ARGS__); \
1462 } \
1463 module_init(__driver##_init); \
1464 static void __exit __driver##_exit(void) \
1465 { \
1466 __unregister(&(__driver) , ##__VA_ARGS__); \
1467 } \
1468 module_exit(__driver##_exit);
1469
1470 /**
1471 * builtin_driver() - Helper macro for drivers that don't do anything
1472 * special in init and have no exit. This eliminates some boilerplate.
1473 * Each driver may only use this macro once, and calling it replaces
1474 * device_initcall (or in some cases, the legacy __initcall). This is
1475 * meant to be a direct parallel of module_driver() above but without
1476 * the __exit stuff that is not used for builtin cases.
1477 *
1478 * @__driver: driver name
1479 * @__register: register function for this driver type
1480 * @...: Additional arguments to be passed to __register
1481 *
1482 * Use this macro to construct bus specific macros for registering
1483 * drivers, and do not use it on its own.
1484 */
1485 #define builtin_driver(__driver, __register, ...) \
1486 static int __init __driver##_init(void) \
1487 { \
1488 return __register(&(__driver) , ##__VA_ARGS__); \
1489 } \
1490 device_initcall(__driver##_init);
1491
1492 #endif /* _DEVICE_H_ */ 1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
7 #include <linux/err.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13
14 /**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22 /*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27 /*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32 /*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37 /*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42 /*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48 /*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53 /*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59 /*
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63 #define DMA_ATTR_NO_WARN (1UL << 8)
64
65 /*
66 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between
69 * its physical address space and the bus address space.
70 */
71 struct dma_map_ops {
72 void* (*alloc)(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp,
74 unsigned long attrs);
75 void (*free)(struct device *dev, size_t size,
76 void *vaddr, dma_addr_t dma_handle,
77 unsigned long attrs);
78 int (*mmap)(struct device *, struct vm_area_struct *,
79 void *, dma_addr_t, size_t,
80 unsigned long attrs);
81
82 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
83 dma_addr_t, size_t, unsigned long attrs);
84
85 dma_addr_t (*map_page)(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir,
88 unsigned long attrs);
89 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
90 size_t size, enum dma_data_direction dir,
91 unsigned long attrs);
92 /*
93 * map_sg returns 0 on error and a value > 0 on success.
94 * It should never return a value < 0.
95 */
96 int (*map_sg)(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir,
98 unsigned long attrs);
99 void (*unmap_sg)(struct device *dev,
100 struct scatterlist *sg, int nents,
101 enum dma_data_direction dir,
102 unsigned long attrs);
103 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
104 size_t size, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
107 size_t size, enum dma_data_direction dir,
108 unsigned long attrs);
109 void (*sync_single_for_cpu)(struct device *dev,
110 dma_addr_t dma_handle, size_t size,
111 enum dma_data_direction dir);
112 void (*sync_single_for_device)(struct device *dev,
113 dma_addr_t dma_handle, size_t size,
114 enum dma_data_direction dir);
115 void (*sync_sg_for_cpu)(struct device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction dir);
118 void (*sync_sg_for_device)(struct device *dev,
119 struct scatterlist *sg, int nents,
120 enum dma_data_direction dir);
121 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
122 int (*dma_supported)(struct device *dev, u64 mask);
123 int (*set_dma_mask)(struct device *dev, u64 mask);
124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
125 u64 (*get_required_mask)(struct device *dev);
126 #endif
127 int is_phys;
128 };
129
130 extern struct dma_map_ops dma_noop_ops;
131
132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
133
134 #define DMA_MASK_NONE 0x0ULL
135
136 static inline int valid_dma_direction(int dma_direction)
137 {
138 return ((dma_direction == DMA_BIDIRECTIONAL) ||
139 (dma_direction == DMA_TO_DEVICE) ||
140 (dma_direction == DMA_FROM_DEVICE));
141 }
142
143 static inline int is_device_dma_capable(struct device *dev)
144 {
145 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
146 }
147
148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
149 /*
150 * These three functions are only for dma allocator.
151 * Don't use them in device drivers.
152 */
153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
154 dma_addr_t *dma_handle, void **ret);
155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
156
157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
158 void *cpu_addr, size_t size, int *ret);
159 #else
160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
161 #define dma_release_from_coherent(dev, order, vaddr) (0)
162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
164
165 #ifdef CONFIG_HAS_DMA
166 #include <asm/dma-mapping.h>
167 #else
168 /*
169 * Define the dma api to allow compilation but not linking of
170 * dma dependent code. Code that depends on the dma-mapping
171 * API needs to set 'depends on HAS_DMA' in its Kconfig
172 */
173 extern struct dma_map_ops bad_dma_ops;
174 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
175 {
176 return &bad_dma_ops;
177 }
178 #endif
179
180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
181 size_t size,
182 enum dma_data_direction dir,
183 unsigned long attrs)
184 {
185 struct dma_map_ops *ops = get_dma_ops(dev);
186 dma_addr_t addr;
187
188 kmemcheck_mark_initialized(ptr, size);
189 BUG_ON(!valid_dma_direction(dir));
190 addr = ops->map_page(dev, virt_to_page(ptr),
191 offset_in_page(ptr), size,
192 dir, attrs);
193 debug_dma_map_page(dev, virt_to_page(ptr),
194 offset_in_page(ptr), size,
195 dir, addr, true);
196 return addr;
197 }
198
199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
200 size_t size,
201 enum dma_data_direction dir,
202 unsigned long attrs)
203 {
204 struct dma_map_ops *ops = get_dma_ops(dev);
205
206 BUG_ON(!valid_dma_direction(dir));
207 if (ops->unmap_page)
208 ops->unmap_page(dev, addr, size, dir, attrs);
209 debug_dma_unmap_page(dev, addr, size, dir, true);
210 }
211
212 /*
213 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
214 * It should never return a value < 0.
215 */
216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
217 int nents, enum dma_data_direction dir,
218 unsigned long attrs)
219 {
220 struct dma_map_ops *ops = get_dma_ops(dev);
221 int i, ents;
222 struct scatterlist *s;
223
224 for_each_sg(sg, s, nents, i)
225 kmemcheck_mark_initialized(sg_virt(s), s->length);
226 BUG_ON(!valid_dma_direction(dir));
227 ents = ops->map_sg(dev, sg, nents, dir, attrs);
228 BUG_ON(ents < 0);
229 debug_dma_map_sg(dev, sg, nents, ents, dir);
230
231 return ents;
232 }
233
234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
235 int nents, enum dma_data_direction dir,
236 unsigned long attrs)
237 {
238 struct dma_map_ops *ops = get_dma_ops(dev);
239
240 BUG_ON(!valid_dma_direction(dir));
241 debug_dma_unmap_sg(dev, sg, nents, dir);
242 if (ops->unmap_sg)
243 ops->unmap_sg(dev, sg, nents, dir, attrs);
244 }
245
246 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
247 struct page *page,
248 size_t offset, size_t size,
249 enum dma_data_direction dir,
250 unsigned long attrs)
251 {
252 struct dma_map_ops *ops = get_dma_ops(dev);
253 dma_addr_t addr;
254
255 kmemcheck_mark_initialized(page_address(page) + offset, size);
256 BUG_ON(!valid_dma_direction(dir));
257 addr = ops->map_page(dev, page, offset, size, dir, attrs);
258 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
259
260 return addr;
261 }
262
263 static inline void dma_unmap_page_attrs(struct device *dev,
264 dma_addr_t addr, size_t size,
265 enum dma_data_direction dir,
266 unsigned long attrs)
267 {
268 struct dma_map_ops *ops = get_dma_ops(dev);
269
270 BUG_ON(!valid_dma_direction(dir));
271 if (ops->unmap_page)
272 ops->unmap_page(dev, addr, size, dir, attrs);
273 debug_dma_unmap_page(dev, addr, size, dir, false);
274 }
275
276 static inline dma_addr_t dma_map_resource(struct device *dev,
277 phys_addr_t phys_addr,
278 size_t size,
279 enum dma_data_direction dir,
280 unsigned long attrs)
281 {
282 struct dma_map_ops *ops = get_dma_ops(dev);
283 dma_addr_t addr;
284
285 BUG_ON(!valid_dma_direction(dir));
286
287 /* Don't allow RAM to be mapped */
288 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
289
290 addr = phys_addr;
291 if (ops->map_resource)
292 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
293
294 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
295
296 return addr;
297 }
298
299 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
300 size_t size, enum dma_data_direction dir,
301 unsigned long attrs)
302 {
303 struct dma_map_ops *ops = get_dma_ops(dev);
304
305 BUG_ON(!valid_dma_direction(dir));
306 if (ops->unmap_resource)
307 ops->unmap_resource(dev, addr, size, dir, attrs);
308 debug_dma_unmap_resource(dev, addr, size, dir);
309 }
310
311 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
312 size_t size,
313 enum dma_data_direction dir)
314 {
315 struct dma_map_ops *ops = get_dma_ops(dev);
316
317 BUG_ON(!valid_dma_direction(dir));
318 if (ops->sync_single_for_cpu)
319 ops->sync_single_for_cpu(dev, addr, size, dir);
320 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
321 }
322
323 static inline void dma_sync_single_for_device(struct device *dev,
324 dma_addr_t addr, size_t size,
325 enum dma_data_direction dir)
326 {
327 struct dma_map_ops *ops = get_dma_ops(dev);
328
329 BUG_ON(!valid_dma_direction(dir));
330 if (ops->sync_single_for_device)
331 ops->sync_single_for_device(dev, addr, size, dir);
332 debug_dma_sync_single_for_device(dev, addr, size, dir);
333 }
334
335 static inline void dma_sync_single_range_for_cpu(struct device *dev,
336 dma_addr_t addr,
337 unsigned long offset,
338 size_t size,
339 enum dma_data_direction dir)
340 {
341 const struct dma_map_ops *ops = get_dma_ops(dev);
342
343 BUG_ON(!valid_dma_direction(dir));
344 if (ops->sync_single_for_cpu)
345 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
346 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
347 }
348
349 static inline void dma_sync_single_range_for_device(struct device *dev,
350 dma_addr_t addr,
351 unsigned long offset,
352 size_t size,
353 enum dma_data_direction dir)
354 {
355 const struct dma_map_ops *ops = get_dma_ops(dev);
356
357 BUG_ON(!valid_dma_direction(dir));
358 if (ops->sync_single_for_device)
359 ops->sync_single_for_device(dev, addr + offset, size, dir);
360 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
361 }
362
363 static inline void
364 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
365 int nelems, enum dma_data_direction dir)
366 {
367 struct dma_map_ops *ops = get_dma_ops(dev);
368
369 BUG_ON(!valid_dma_direction(dir));
370 if (ops->sync_sg_for_cpu)
371 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
372 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
373 }
374
375 static inline void
376 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
377 int nelems, enum dma_data_direction dir)
378 {
379 struct dma_map_ops *ops = get_dma_ops(dev);
380
381 BUG_ON(!valid_dma_direction(dir));
382 if (ops->sync_sg_for_device)
383 ops->sync_sg_for_device(dev, sg, nelems, dir);
384 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
385
386 }
387
388 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
389 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
390 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
391 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
392 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
393 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
394
395 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
396 void *cpu_addr, dma_addr_t dma_addr, size_t size);
397
398 void *dma_common_contiguous_remap(struct page *page, size_t size,
399 unsigned long vm_flags,
400 pgprot_t prot, const void *caller);
401
402 void *dma_common_pages_remap(struct page **pages, size_t size,
403 unsigned long vm_flags, pgprot_t prot,
404 const void *caller);
405 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
406
407 /**
408 * dma_mmap_attrs - map a coherent DMA allocation into user space
409 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
410 * @vma: vm_area_struct describing requested user mapping
411 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
412 * @handle: device-view address returned from dma_alloc_attrs
413 * @size: size of memory originally requested in dma_alloc_attrs
414 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
415 *
416 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
417 * into user space. The coherent DMA buffer must not be freed by the
418 * driver until the user space mapping has been released.
419 */
420 static inline int
421 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
422 dma_addr_t dma_addr, size_t size, unsigned long attrs)
423 {
424 struct dma_map_ops *ops = get_dma_ops(dev);
425 BUG_ON(!ops);
426 if (ops->mmap)
427 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
428 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
429 }
430
431 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
432
433 int
434 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
435 void *cpu_addr, dma_addr_t dma_addr, size_t size);
436
437 static inline int
438 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
439 dma_addr_t dma_addr, size_t size,
440 unsigned long attrs)
441 {
442 struct dma_map_ops *ops = get_dma_ops(dev);
443 BUG_ON(!ops);
444 if (ops->get_sgtable)
445 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
446 attrs);
447 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
448 }
449
450 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
451
452 #ifndef arch_dma_alloc_attrs
453 #define arch_dma_alloc_attrs(dev, flag) (true)
454 #endif
455
456 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
457 dma_addr_t *dma_handle, gfp_t flag,
458 unsigned long attrs)
459 {
460 struct dma_map_ops *ops = get_dma_ops(dev);
461 void *cpu_addr;
462
463 BUG_ON(!ops);
464
465 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
466 return cpu_addr;
467
468 if (!arch_dma_alloc_attrs(&dev, &flag))
469 return NULL;
470 if (!ops->alloc)
471 return NULL;
472
473 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
474 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
475 return cpu_addr;
476 }
477
478 static inline void dma_free_attrs(struct device *dev, size_t size,
479 void *cpu_addr, dma_addr_t dma_handle,
480 unsigned long attrs)
481 {
482 struct dma_map_ops *ops = get_dma_ops(dev);
483
484 BUG_ON(!ops);
485 WARN_ON(irqs_disabled());
486
487 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
488 return;
489
490 if (!ops->free || !cpu_addr)
491 return;
492
493 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
494 ops->free(dev, size, cpu_addr, dma_handle, attrs);
495 }
496
497 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
498 dma_addr_t *dma_handle, gfp_t flag)
499 {
500 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
501 }
502
503 static inline void dma_free_coherent(struct device *dev, size_t size,
504 void *cpu_addr, dma_addr_t dma_handle)
505 {
506 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
507 }
508
509 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
510 dma_addr_t *dma_handle, gfp_t gfp)
511 {
512 return dma_alloc_attrs(dev, size, dma_handle, gfp,
513 DMA_ATTR_NON_CONSISTENT);
514 }
515
516 static inline void dma_free_noncoherent(struct device *dev, size_t size,
517 void *cpu_addr, dma_addr_t dma_handle)
518 {
519 dma_free_attrs(dev, size, cpu_addr, dma_handle,
520 DMA_ATTR_NON_CONSISTENT);
521 }
522
523 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
524 {
525 debug_dma_mapping_error(dev, dma_addr);
526
527 if (get_dma_ops(dev)->mapping_error)
528 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
529
530 #ifdef DMA_ERROR_CODE
531 return dma_addr == DMA_ERROR_CODE;
532 #else
533 return 0;
534 #endif
535 }
536
537 #ifndef HAVE_ARCH_DMA_SUPPORTED
538 static inline int dma_supported(struct device *dev, u64 mask)
539 {
540 struct dma_map_ops *ops = get_dma_ops(dev);
541
542 if (!ops)
543 return 0;
544 if (!ops->dma_supported)
545 return 1;
546 return ops->dma_supported(dev, mask);
547 }
548 #endif
549
550 #ifndef HAVE_ARCH_DMA_SET_MASK
551 static inline int dma_set_mask(struct device *dev, u64 mask)
552 {
553 struct dma_map_ops *ops = get_dma_ops(dev);
554
555 if (ops->set_dma_mask)
556 return ops->set_dma_mask(dev, mask);
557
558 if (!dev->dma_mask || !dma_supported(dev, mask))
559 return -EIO;
560 *dev->dma_mask = mask;
561 return 0;
562 }
563 #endif
564
565 static inline u64 dma_get_mask(struct device *dev)
566 {
567 if (dev && dev->dma_mask && *dev->dma_mask)
568 return *dev->dma_mask;
569 return DMA_BIT_MASK(32);
570 }
571
572 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
573 int dma_set_coherent_mask(struct device *dev, u64 mask);
574 #else
575 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
576 {
577 if (!dma_supported(dev, mask))
578 return -EIO;
579 dev->coherent_dma_mask = mask;
580 return 0;
581 }
582 #endif
583
584 /*
585 * Set both the DMA mask and the coherent DMA mask to the same thing.
586 * Note that we don't check the return value from dma_set_coherent_mask()
587 * as the DMA API guarantees that the coherent DMA mask can be set to
588 * the same or smaller than the streaming DMA mask.
589 */
590 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
591 {
592 int rc = dma_set_mask(dev, mask);
593 if (rc == 0)
594 dma_set_coherent_mask(dev, mask);
595 return rc;
596 }
597
598 /*
599 * Similar to the above, except it deals with the case where the device
600 * does not have dev->dma_mask appropriately setup.
601 */
602 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
603 {
604 dev->dma_mask = &dev->coherent_dma_mask;
605 return dma_set_mask_and_coherent(dev, mask);
606 }
607
608 extern u64 dma_get_required_mask(struct device *dev);
609
610 #ifndef arch_setup_dma_ops
611 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
612 u64 size, const struct iommu_ops *iommu,
613 bool coherent) { }
614 #endif
615
616 #ifndef arch_teardown_dma_ops
617 static inline void arch_teardown_dma_ops(struct device *dev) { }
618 #endif
619
620 static inline unsigned int dma_get_max_seg_size(struct device *dev)
621 {
622 if (dev->dma_parms && dev->dma_parms->max_segment_size)
623 return dev->dma_parms->max_segment_size;
624 return SZ_64K;
625 }
626
627 static inline unsigned int dma_set_max_seg_size(struct device *dev,
628 unsigned int size)
629 {
630 if (dev->dma_parms) {
631 dev->dma_parms->max_segment_size = size;
632 return 0;
633 }
634 return -EIO;
635 }
636
637 static inline unsigned long dma_get_seg_boundary(struct device *dev)
638 {
639 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
640 return dev->dma_parms->segment_boundary_mask;
641 return DMA_BIT_MASK(32);
642 }
643
644 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
645 {
646 if (dev->dma_parms) {
647 dev->dma_parms->segment_boundary_mask = mask;
648 return 0;
649 }
650 return -EIO;
651 }
652
653 #ifndef dma_max_pfn
654 static inline unsigned long dma_max_pfn(struct device *dev)
655 {
656 return *dev->dma_mask >> PAGE_SHIFT;
657 }
658 #endif
659
660 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
661 dma_addr_t *dma_handle, gfp_t flag)
662 {
663 void *ret = dma_alloc_coherent(dev, size, dma_handle,
664 flag | __GFP_ZERO);
665 return ret;
666 }
667
668 #ifdef CONFIG_HAS_DMA
669 static inline int dma_get_cache_alignment(void)
670 {
671 #ifdef ARCH_DMA_MINALIGN
672 return ARCH_DMA_MINALIGN;
673 #endif
674 return 1;
675 }
676 #endif
677
678 /* flags for the coherent memory api */
679 #define DMA_MEMORY_MAP 0x01
680 #define DMA_MEMORY_IO 0x02
681 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
682 #define DMA_MEMORY_EXCLUSIVE 0x08
683
684 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
685 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
686 dma_addr_t device_addr, size_t size, int flags);
687 void dma_release_declared_memory(struct device *dev);
688 void *dma_mark_declared_memory_occupied(struct device *dev,
689 dma_addr_t device_addr, size_t size);
690 #else
691 static inline int
692 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
693 dma_addr_t device_addr, size_t size, int flags)
694 {
695 return 0;
696 }
697
698 static inline void
699 dma_release_declared_memory(struct device *dev)
700 {
701 }
702
703 static inline void *
704 dma_mark_declared_memory_occupied(struct device *dev,
705 dma_addr_t device_addr, size_t size)
706 {
707 return ERR_PTR(-EBUSY);
708 }
709 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
710
711 /*
712 * Managed DMA API
713 */
714 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
715 dma_addr_t *dma_handle, gfp_t gfp);
716 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
717 dma_addr_t dma_handle);
718 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
719 dma_addr_t *dma_handle, gfp_t gfp);
720 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
721 dma_addr_t dma_handle);
722 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
723 extern int dmam_declare_coherent_memory(struct device *dev,
724 phys_addr_t phys_addr,
725 dma_addr_t device_addr, size_t size,
726 int flags);
727 extern void dmam_release_declared_memory(struct device *dev);
728 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
729 static inline int dmam_declare_coherent_memory(struct device *dev,
730 phys_addr_t phys_addr, dma_addr_t device_addr,
731 size_t size, gfp_t gfp)
732 {
733 return 0;
734 }
735
736 static inline void dmam_release_declared_memory(struct device *dev)
737 {
738 }
739 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
740
741 static inline void *dma_alloc_wc(struct device *dev, size_t size,
742 dma_addr_t *dma_addr, gfp_t gfp)
743 {
744 return dma_alloc_attrs(dev, size, dma_addr, gfp,
745 DMA_ATTR_WRITE_COMBINE);
746 }
747 #ifndef dma_alloc_writecombine
748 #define dma_alloc_writecombine dma_alloc_wc
749 #endif
750
751 static inline void dma_free_wc(struct device *dev, size_t size,
752 void *cpu_addr, dma_addr_t dma_addr)
753 {
754 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
755 DMA_ATTR_WRITE_COMBINE);
756 }
757 #ifndef dma_free_writecombine
758 #define dma_free_writecombine dma_free_wc
759 #endif
760
761 static inline int dma_mmap_wc(struct device *dev,
762 struct vm_area_struct *vma,
763 void *cpu_addr, dma_addr_t dma_addr,
764 size_t size)
765 {
766 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
767 DMA_ATTR_WRITE_COMBINE);
768 }
769 #ifndef dma_mmap_writecombine
770 #define dma_mmap_writecombine dma_mmap_wc
771 #endif
772
773 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
774 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
775 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
776 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
777 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
778 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
779 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
780 #else
781 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
782 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
783 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
784 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
785 #define dma_unmap_len(PTR, LEN_NAME) (0)
786 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
787 #endif
788
789 #endif 1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/irq.h>
21
22 /*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
37
38 /*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
42 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in an shared interrupt is considered for
49 * performance reasons)
50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the
52 * irq line disabled until the threaded handler has been run.
53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 * that this interrupt will wake the system from a suspended
55 * state. See Documentation/power/suspend-and-interrupts.txt
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time.
60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 * interrupt handler after suspending interrupts. For system
62 * wakeup devices users need to implement wakeup detection in
63 * their interrupt handlers.
64 */
65 #define IRQF_SHARED 0x00000080
66 #define IRQF_PROBE_SHARED 0x00000100
67 #define __IRQF_TIMER 0x00000200
68 #define IRQF_PERCPU 0x00000400
69 #define IRQF_NOBALANCING 0x00000800
70 #define IRQF_IRQPOLL 0x00001000
71 #define IRQF_ONESHOT 0x00002000
72 #define IRQF_NO_SUSPEND 0x00004000
73 #define IRQF_FORCE_RESUME 0x00008000
74 #define IRQF_NO_THREAD 0x00010000
75 #define IRQF_EARLY_RESUME 0x00020000
76 #define IRQF_COND_SUSPEND 0x00040000
77
78 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79
80 /*
81 * These values can be returned by request_any_context_irq() and
82 * describe the context the interrupt will be run in.
83 *
84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86 */
87 enum {
88 IRQC_IS_HARDIRQ = 0,
89 IRQC_IS_NESTED,
90 };
91
92 typedef irqreturn_t (*irq_handler_t)(int, void *);
93
94 /**
95 * struct irqaction - per interrupt action descriptor
96 * @handler: interrupt handler function
97 * @name: name of the device
98 * @dev_id: cookie to identify the device
99 * @percpu_dev_id: cookie to identify the device
100 * @next: pointer to the next irqaction for shared interrupts
101 * @irq: interrupt number
102 * @flags: flags (see IRQF_* above)
103 * @thread_fn: interrupt handler function for threaded interrupts
104 * @thread: thread pointer for threaded interrupts
105 * @secondary: pointer to secondary irqaction (force threading)
106 * @thread_flags: flags related to @thread
107 * @thread_mask: bitmask for keeping track of @thread activity
108 * @dir: pointer to the proc/irq/NN/name entry
109 */
110 struct irqaction {
111 irq_handler_t handler;
112 void *dev_id;
113 void __percpu *percpu_dev_id;
114 struct irqaction *next;
115 irq_handler_t thread_fn;
116 struct task_struct *thread;
117 struct irqaction *secondary;
118 unsigned int irq;
119 unsigned int flags;
120 unsigned long thread_flags;
121 unsigned long thread_mask;
122 const char *name;
123 struct proc_dir_entry *dir;
124 } ____cacheline_internodealigned_in_smp;
125
126 extern irqreturn_t no_action(int cpl, void *dev_id);
127
128 /*
129 * If a (PCI) device interrupt is not connected we set dev->irq to
130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 * can distingiush that case from other error returns.
132 *
133 * 0x80000000 is guaranteed to be outside the available range of interrupts
134 * and easy to distinguish from other possible incorrect values.
135 */
136 #define IRQ_NOTCONNECTED (1U << 31)
137
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 irq_handler_t thread_fn,
141 unsigned long flags, const char *name, void *dev);
142
143 static inline int __must_check
144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 const char *name, void *dev)
146 {
147 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
148 }
149
150 extern int __must_check
151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
152 unsigned long flags, const char *name, void *dev_id);
153
154 extern int __must_check
155 request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 const char *devname, void __percpu *percpu_dev_id);
157
158 extern void free_irq(unsigned int, void *);
159 extern void free_percpu_irq(unsigned int, void __percpu *);
160
161 struct device;
162
163 extern int __must_check
164 devm_request_threaded_irq(struct device *dev, unsigned int irq,
165 irq_handler_t handler, irq_handler_t thread_fn,
166 unsigned long irqflags, const char *devname,
167 void *dev_id);
168
169 static inline int __must_check
170 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
171 unsigned long irqflags, const char *devname, void *dev_id)
172 {
173 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
174 devname, dev_id);
175 }
176
177 extern int __must_check
178 devm_request_any_context_irq(struct device *dev, unsigned int irq,
179 irq_handler_t handler, unsigned long irqflags,
180 const char *devname, void *dev_id);
181
182 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
183
184 /*
185 * On lockdep we dont want to enable hardirqs in hardirq
186 * context. Use local_irq_enable_in_hardirq() to annotate
187 * kernel code that has to do this nevertheless (pretty much
188 * the only valid case is for old/broken hardware that is
189 * insanely slow).
190 *
191 * NOTE: in theory this might break fragile code that relies
192 * on hardirq delivery - in practice we dont seem to have such
193 * places left. So the only effect should be slightly increased
194 * irqs-off latencies.
195 */
196 #ifdef CONFIG_LOCKDEP
197 # define local_irq_enable_in_hardirq() do { } while (0)
198 #else
199 # define local_irq_enable_in_hardirq() local_irq_enable()
200 #endif
201
202 extern void disable_irq_nosync(unsigned int irq);
203 extern bool disable_hardirq(unsigned int irq);
204 extern void disable_irq(unsigned int irq);
205 extern void disable_percpu_irq(unsigned int irq);
206 extern void enable_irq(unsigned int irq);
207 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
208 extern bool irq_percpu_is_enabled(unsigned int irq);
209 extern void irq_wake_thread(unsigned int irq, void *dev_id);
210
211 /* The following three functions are for the core kernel use only. */
212 extern void suspend_device_irqs(void);
213 extern void resume_device_irqs(void);
214
215 /**
216 * struct irq_affinity_notify - context for notification of IRQ affinity changes
217 * @irq: Interrupt to which notification applies
218 * @kref: Reference count, for internal use
219 * @work: Work item, for internal use
220 * @notify: Function to be called on change. This will be
221 * called in process context.
222 * @release: Function to be called on release. This will be
223 * called in process context. Once registered, the
224 * structure must only be freed when this function is
225 * called or later.
226 */
227 struct irq_affinity_notify {
228 unsigned int irq;
229 struct kref kref;
230 struct work_struct work;
231 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
232 void (*release)(struct kref *ref);
233 };
234
235 /**
236 * struct irq_affinity - Description for automatic irq affinity assignements
237 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
238 * the MSI(-X) vector space
239 * @post_vectors: Don't apply affinity to @post_vectors at end of
240 * the MSI(-X) vector space
241 */
242 struct irq_affinity {
243 int pre_vectors;
244 int post_vectors;
245 };
246
247 #if defined(CONFIG_SMP)
248
249 extern cpumask_var_t irq_default_affinity;
250
251 /* Internal implementation. Use the helpers below */
252 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
253 bool force);
254
255 /**
256 * irq_set_affinity - Set the irq affinity of a given irq
257 * @irq: Interrupt to set affinity
258 * @cpumask: cpumask
259 *
260 * Fails if cpumask does not contain an online CPU
261 */
262 static inline int
263 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
264 {
265 return __irq_set_affinity(irq, cpumask, false);
266 }
267
268 /**
269 * irq_force_affinity - Force the irq affinity of a given irq
270 * @irq: Interrupt to set affinity
271 * @cpumask: cpumask
272 *
273 * Same as irq_set_affinity, but without checking the mask against
274 * online cpus.
275 *
276 * Solely for low level cpu hotplug code, where we need to make per
277 * cpu interrupts affine before the cpu becomes online.
278 */
279 static inline int
280 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
281 {
282 return __irq_set_affinity(irq, cpumask, true);
283 }
284
285 extern int irq_can_set_affinity(unsigned int irq);
286 extern int irq_select_affinity(unsigned int irq);
287
288 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
289
290 extern int
291 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
292
293 struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
294 int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
295
296 #else /* CONFIG_SMP */
297
298 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
299 {
300 return -EINVAL;
301 }
302
303 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
304 {
305 return 0;
306 }
307
308 static inline int irq_can_set_affinity(unsigned int irq)
309 {
310 return 0;
311 }
312
313 static inline int irq_select_affinity(unsigned int irq) { return 0; }
314
315 static inline int irq_set_affinity_hint(unsigned int irq,
316 const struct cpumask *m)
317 {
318 return -EINVAL;
319 }
320
321 static inline int
322 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
323 {
324 return 0;
325 }
326
327 static inline struct cpumask *
328 irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
329 {
330 return NULL;
331 }
332
333 static inline int
334 irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
335 {
336 return maxvec;
337 }
338
339 #endif /* CONFIG_SMP */
340
341 /*
342 * Special lockdep variants of irq disabling/enabling.
343 * These should be used for locking constructs that
344 * know that a particular irq context which is disabled,
345 * and which is the only irq-context user of a lock,
346 * that it's safe to take the lock in the irq-disabled
347 * section without disabling hardirqs.
348 *
349 * On !CONFIG_LOCKDEP they are equivalent to the normal
350 * irq disable/enable methods.
351 */
352 static inline void disable_irq_nosync_lockdep(unsigned int irq)
353 {
354 disable_irq_nosync(irq);
355 #ifdef CONFIG_LOCKDEP
356 local_irq_disable();
357 #endif
358 }
359
360 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
361 {
362 disable_irq_nosync(irq);
363 #ifdef CONFIG_LOCKDEP
364 local_irq_save(*flags);
365 #endif
366 }
367
368 static inline void disable_irq_lockdep(unsigned int irq)
369 {
370 disable_irq(irq);
371 #ifdef CONFIG_LOCKDEP
372 local_irq_disable();
373 #endif
374 }
375
376 static inline void enable_irq_lockdep(unsigned int irq)
377 {
378 #ifdef CONFIG_LOCKDEP
379 local_irq_enable();
380 #endif
381 enable_irq(irq);
382 }
383
384 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
385 {
386 #ifdef CONFIG_LOCKDEP
387 local_irq_restore(*flags);
388 #endif
389 enable_irq(irq);
390 }
391
392 /* IRQ wakeup (PM) control: */
393 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
394
395 static inline int enable_irq_wake(unsigned int irq)
396 {
397 return irq_set_irq_wake(irq, 1);
398 }
399
400 static inline int disable_irq_wake(unsigned int irq)
401 {
402 return irq_set_irq_wake(irq, 0);
403 }
404
405 /*
406 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
407 */
408 enum irqchip_irq_state {
409 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
410 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
411 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
412 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
413 };
414
415 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
416 bool *state);
417 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
418 bool state);
419
420 #ifdef CONFIG_IRQ_FORCED_THREADING
421 extern bool force_irqthreads;
422 #else
423 #define force_irqthreads (0)
424 #endif
425
426 #ifndef __ARCH_SET_SOFTIRQ_PENDING
427 #define set_softirq_pending(x) (local_softirq_pending() = (x))
428 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
429 #endif
430
431 /* Some architectures might implement lazy enabling/disabling of
432 * interrupts. In some cases, such as stop_machine, we might want
433 * to ensure that after a local_irq_disable(), interrupts have
434 * really been disabled in hardware. Such architectures need to
435 * implement the following hook.
436 */
437 #ifndef hard_irq_disable
438 #define hard_irq_disable() do { } while(0)
439 #endif
440
441 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
442 frequency threaded job scheduling. For almost all the purposes
443 tasklets are more than enough. F.e. all serial device BHs et
444 al. should be converted to tasklets, not to softirqs.
445 */
446
447 enum
448 {
449 HI_SOFTIRQ=0,
450 TIMER_SOFTIRQ,
451 NET_TX_SOFTIRQ,
452 NET_RX_SOFTIRQ,
453 BLOCK_SOFTIRQ,
454 IRQ_POLL_SOFTIRQ,
455 TASKLET_SOFTIRQ,
456 SCHED_SOFTIRQ,
457 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
458 numbering. Sigh! */
459 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
460
461 NR_SOFTIRQS
462 };
463
464 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
465
466 /* map softirq index to softirq name. update 'softirq_to_name' in
467 * kernel/softirq.c when adding a new softirq.
468 */
469 extern const char * const softirq_to_name[NR_SOFTIRQS];
470
471 /* softirq mask and active fields moved to irq_cpustat_t in
472 * asm/hardirq.h to get better cache usage. KAO
473 */
474
475 struct softirq_action
476 {
477 void (*action)(struct softirq_action *);
478 };
479
480 asmlinkage void do_softirq(void);
481 asmlinkage void __do_softirq(void);
482
483 #ifdef __ARCH_HAS_DO_SOFTIRQ
484 void do_softirq_own_stack(void);
485 #else
486 static inline void do_softirq_own_stack(void)
487 {
488 __do_softirq();
489 }
490 #endif
491
492 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
493 extern void softirq_init(void);
494 extern void __raise_softirq_irqoff(unsigned int nr);
495
496 extern void raise_softirq_irqoff(unsigned int nr);
497 extern void raise_softirq(unsigned int nr);
498
499 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
500
501 static inline struct task_struct *this_cpu_ksoftirqd(void)
502 {
503 return this_cpu_read(ksoftirqd);
504 }
505
506 /* Tasklets --- multithreaded analogue of BHs.
507
508 Main feature differing them of generic softirqs: tasklet
509 is running only on one CPU simultaneously.
510
511 Main feature differing them of BHs: different tasklets
512 may be run simultaneously on different CPUs.
513
514 Properties:
515 * If tasklet_schedule() is called, then tasklet is guaranteed
516 to be executed on some cpu at least once after this.
517 * If the tasklet is already scheduled, but its execution is still not
518 started, it will be executed only once.
519 * If this tasklet is already running on another CPU (or schedule is called
520 from tasklet itself), it is rescheduled for later.
521 * Tasklet is strictly serialized wrt itself, but not
522 wrt another tasklets. If client needs some intertask synchronization,
523 he makes it with spinlocks.
524 */
525
526 struct tasklet_struct
527 {
528 struct tasklet_struct *next;
529 unsigned long state;
530 atomic_t count;
531 void (*func)(unsigned long);
532 unsigned long data;
533 };
534
535 #define DECLARE_TASKLET(name, func, data) \
536 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
537
538 #define DECLARE_TASKLET_DISABLED(name, func, data) \
539 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
540
541
542 enum
543 {
544 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
545 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
546 };
547
548 #ifdef CONFIG_SMP
549 static inline int tasklet_trylock(struct tasklet_struct *t)
550 {
551 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
552 }
553
554 static inline void tasklet_unlock(struct tasklet_struct *t)
555 {
556 smp_mb__before_atomic();
557 clear_bit(TASKLET_STATE_RUN, &(t)->state);
558 }
559
560 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
561 {
562 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
563 }
564 #else
565 #define tasklet_trylock(t) 1
566 #define tasklet_unlock_wait(t) do { } while (0)
567 #define tasklet_unlock(t) do { } while (0)
568 #endif
569
570 extern void __tasklet_schedule(struct tasklet_struct *t);
571
572 static inline void tasklet_schedule(struct tasklet_struct *t)
573 {
574 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
575 __tasklet_schedule(t);
576 }
577
578 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
579
580 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
581 {
582 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
583 __tasklet_hi_schedule(t);
584 }
585
586 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
587
588 /*
589 * This version avoids touching any other tasklets. Needed for kmemcheck
590 * in order not to take any page faults while enqueueing this tasklet;
591 * consider VERY carefully whether you really need this or
592 * tasklet_hi_schedule()...
593 */
594 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
595 {
596 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
597 __tasklet_hi_schedule_first(t);
598 }
599
600
601 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
602 {
603 atomic_inc(&t->count);
604 smp_mb__after_atomic();
605 }
606
607 static inline void tasklet_disable(struct tasklet_struct *t)
608 {
609 tasklet_disable_nosync(t);
610 tasklet_unlock_wait(t);
611 smp_mb();
612 }
613
614 static inline void tasklet_enable(struct tasklet_struct *t)
615 {
616 smp_mb__before_atomic();
617 atomic_dec(&t->count);
618 }
619
620 extern void tasklet_kill(struct tasklet_struct *t);
621 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
622 extern void tasklet_init(struct tasklet_struct *t,
623 void (*func)(unsigned long), unsigned long data);
624
625 struct tasklet_hrtimer {
626 struct hrtimer timer;
627 struct tasklet_struct tasklet;
628 enum hrtimer_restart (*function)(struct hrtimer *);
629 };
630
631 extern void
632 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
633 enum hrtimer_restart (*function)(struct hrtimer *),
634 clockid_t which_clock, enum hrtimer_mode mode);
635
636 static inline
637 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
638 const enum hrtimer_mode mode)
639 {
640 hrtimer_start(&ttimer->timer, time, mode);
641 }
642
643 static inline
644 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
645 {
646 hrtimer_cancel(&ttimer->timer);
647 tasklet_kill(&ttimer->tasklet);
648 }
649
650 /*
651 * Autoprobing for irqs:
652 *
653 * probe_irq_on() and probe_irq_off() provide robust primitives
654 * for accurate IRQ probing during kernel initialization. They are
655 * reasonably simple to use, are not "fooled" by spurious interrupts,
656 * and, unlike other attempts at IRQ probing, they do not get hung on
657 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
658 *
659 * For reasonably foolproof probing, use them as follows:
660 *
661 * 1. clear and/or mask the device's internal interrupt.
662 * 2. sti();
663 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
664 * 4. enable the device and cause it to trigger an interrupt.
665 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
666 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
667 * 7. service the device to clear its pending interrupt.
668 * 8. loop again if paranoia is required.
669 *
670 * probe_irq_on() returns a mask of allocated irq's.
671 *
672 * probe_irq_off() takes the mask as a parameter,
673 * and returns the irq number which occurred,
674 * or zero if none occurred, or a negative irq number
675 * if more than one irq occurred.
676 */
677
678 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
679 static inline unsigned long probe_irq_on(void)
680 {
681 return 0;
682 }
683 static inline int probe_irq_off(unsigned long val)
684 {
685 return 0;
686 }
687 static inline unsigned int probe_irq_mask(unsigned long val)
688 {
689 return 0;
690 }
691 #else
692 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
693 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
694 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
695 #endif
696
697 #ifdef CONFIG_PROC_FS
698 /* Initialize /proc/irq/ */
699 extern void init_irq_proc(void);
700 #else
701 static inline void init_irq_proc(void)
702 {
703 }
704 #endif
705
706 struct seq_file;
707 int show_interrupts(struct seq_file *p, void *v);
708 int arch_show_interrupts(struct seq_file *p, int prec);
709
710 extern int early_irq_init(void);
711 extern int arch_probe_nr_irqs(void);
712 extern int arch_early_irq_init(void);
713
714 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
715 /*
716 * We want to know which function is an entrypoint of a hardirq or a softirq.
717 */
718 #define __irq_entry __attribute__((__section__(".irqentry.text")))
719 #define __softirq_entry \
720 __attribute__((__section__(".softirqentry.text")))
721
722 /* Limits of hardirq entrypoints */
723 extern char __irqentry_text_start[];
724 extern char __irqentry_text_end[];
725 /* Limits of softirq entrypoints */
726 extern char __softirqentry_text_start[];
727 extern char __softirqentry_text_end[];
728
729 #else
730 #define __irq_entry
731 #define __softirq_entry
732 #endif
733
734 #endif 1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
3
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
6
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
9
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23 bool kmemcheck_page_is_tracked(struct page *p);
24
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
29
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
36
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
39 /*
40 * Bitfield annotations
41 *
42 * How to use: If you have a struct using bitfields, for example
43 *
44 * struct a {
45 * int x:8, y:8;
46 * };
47 *
48 * then this should be rewritten as
49 *
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
55 *
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
60 *
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
63 */
64 #define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
66
67 #define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
69
70 #define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
73 \
74 if (!ptr) \
75 break; \
76 \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 BUILD_BUG_ON(_n < 0); \
80 \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
83
84 #define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
88
89 #else
90 #define kmemcheck_enabled 0
91
92 static inline void
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 {
95 }
96
97 static inline void
98 kmemcheck_free_shadow(struct page *page, int order)
99 {
100 }
101
102 static inline void
103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 size_t size)
105 {
106 }
107
108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 size_t size)
110 {
111 }
112
113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 unsigned int order, gfp_t gfpflags)
115 {
116 }
117
118 static inline bool kmemcheck_page_is_tracked(struct page *p)
119 {
120 return false;
121 }
122
123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 {
125 }
126
127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 {
129 }
130
131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 {
133 }
134
135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 {
137 }
138
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 unsigned int n)
141 {
142 }
143
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 unsigned int n)
146 {
147 }
148
149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 unsigned int n)
151 {
152 }
153
154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
155 {
156 return true;
157 }
158
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name) \
162 do { \
163 } while (0)
164
165 #define kmemcheck_annotate_variable(var) \
166 do { \
167 } while (0)
168
169 #endif /* CONFIG_KMEMCHECK */
170
171 #endif /* LINUX_KMEMCHECK_H */ 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27
28 #include <linux/timer.h>
29 #include <linux/bug.h>
30 #include <linux/delay.h>
31 #include <linux/atomic.h>
32 #include <linux/prefetch.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
35
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
41
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
44 #include <net/dsa.h>
45 #ifdef CONFIG_DCB
46 #include <net/dcbnl.h>
47 #endif
48 #include <net/netprio_cgroup.h>
49
50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h>
53 #include <uapi/linux/if_bonding.h>
54 #include <uapi/linux/pkt_cls.h>
55 #include <linux/hashtable.h>
56
57 struct netpoll_info;
58 struct device;
59 struct phy_device;
60 /* 802.11 specific */
61 struct wireless_dev;
62 /* 802.15.4 specific */
63 struct wpan_dev;
64 struct mpls_dev;
65 /* UDP Tunnel offloads */
66 struct udp_tunnel_info;
67 struct bpf_prog;
68
69 void netdev_set_default_ethtool_ops(struct net_device *dev,
70 const struct ethtool_ops *ops);
71
72 /* Backlog congestion levels */
73 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74 #define NET_RX_DROP 1 /* packet dropped */
75
76 /*
77 * Transmit return codes: transmit return codes originate from three different
78 * namespaces:
79 *
80 * - qdisc return codes
81 * - driver transmit return codes
82 * - errno values
83 *
84 * Drivers are allowed to return any one of those in their hard_start_xmit()
85 * function. Real network devices commonly used with qdiscs should only return
86 * the driver transmit return codes though - when qdiscs are used, the actual
87 * transmission happens asynchronously, so the value is not propagated to
88 * higher layers. Virtual network devices transmit synchronously; in this case
89 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
90 * others are propagated to higher layers.
91 */
92
93 /* qdisc ->enqueue() return codes. */
94 #define NET_XMIT_SUCCESS 0x00
95 #define NET_XMIT_DROP 0x01 /* skb dropped */
96 #define NET_XMIT_CN 0x02 /* congestion notification */
97 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
98
99 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
100 * indicates that the device will soon be dropping packets, or already drops
101 * some packets of the same priority; prompting us to send less aggressively. */
102 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
103 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
104
105 /* Driver transmit return codes */
106 #define NETDEV_TX_MASK 0xf0
107
108 enum netdev_tx {
109 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
110 NETDEV_TX_OK = 0x00, /* driver took care of packet */
111 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
112 };
113 typedef enum netdev_tx netdev_tx_t;
114
115 /*
116 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
117 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
118 */
119 static inline bool dev_xmit_complete(int rc)
120 {
121 /*
122 * Positive cases with an skb consumed by a driver:
123 * - successful transmission (rc == NETDEV_TX_OK)
124 * - error while transmitting (rc < 0)
125 * - error while queueing to a different device (rc & NET_XMIT_MASK)
126 */
127 if (likely(rc < NET_XMIT_MASK))
128 return true;
129
130 return false;
131 }
132
133 /*
134 * Compute the worst-case header length according to the protocols
135 * used.
136 */
137
138 #if defined(CONFIG_HYPERV_NET)
139 # define LL_MAX_HEADER 128
140 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
141 # if defined(CONFIG_MAC80211_MESH)
142 # define LL_MAX_HEADER 128
143 # else
144 # define LL_MAX_HEADER 96
145 # endif
146 #else
147 # define LL_MAX_HEADER 32
148 #endif
149
150 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
151 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
152 #define MAX_HEADER LL_MAX_HEADER
153 #else
154 #define MAX_HEADER (LL_MAX_HEADER + 48)
155 #endif
156
157 /*
158 * Old network device statistics. Fields are native words
159 * (unsigned long) so they can be read and written atomically.
160 */
161
162 struct net_device_stats {
163 unsigned long rx_packets;
164 unsigned long tx_packets;
165 unsigned long rx_bytes;
166 unsigned long tx_bytes;
167 unsigned long rx_errors;
168 unsigned long tx_errors;
169 unsigned long rx_dropped;
170 unsigned long tx_dropped;
171 unsigned long multicast;
172 unsigned long collisions;
173 unsigned long rx_length_errors;
174 unsigned long rx_over_errors;
175 unsigned long rx_crc_errors;
176 unsigned long rx_frame_errors;
177 unsigned long rx_fifo_errors;
178 unsigned long rx_missed_errors;
179 unsigned long tx_aborted_errors;
180 unsigned long tx_carrier_errors;
181 unsigned long tx_fifo_errors;
182 unsigned long tx_heartbeat_errors;
183 unsigned long tx_window_errors;
184 unsigned long rx_compressed;
185 unsigned long tx_compressed;
186 };
187
188
189 #include <linux/cache.h>
190 #include <linux/skbuff.h>
191
192 #ifdef CONFIG_RPS
193 #include <linux/static_key.h>
194 extern struct static_key rps_needed;
195 extern struct static_key rfs_needed;
196 #endif
197
198 struct neighbour;
199 struct neigh_parms;
200 struct sk_buff;
201
202 struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
206 #define NETDEV_HW_ADDR_T_LAN 1
207 #define NETDEV_HW_ADDR_T_SAN 2
208 #define NETDEV_HW_ADDR_T_SLAVE 3
209 #define NETDEV_HW_ADDR_T_UNICAST 4
210 #define NETDEV_HW_ADDR_T_MULTICAST 5
211 bool global_use;
212 int sync_cnt;
213 int refcount;
214 int synced;
215 struct rcu_head rcu_head;
216 };
217
218 struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221 };
222
223 #define netdev_hw_addr_list_count(l) ((l)->count)
224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225 #define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230 #define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232
233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235 #define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237
238 struct hh_cache {
239 u16 hh_len;
240 u16 __pad;
241 seqlock_t hh_lock;
242
243 /* cached hardware header; allow for machine alignment needs. */
244 #define HH_DATA_MOD 16
245 #define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247 #define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250 };
251
252 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
253 * Alternative is:
254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
256 *
257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment.
259 */
260 #define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265 struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
271 void (*cache_update)(struct hh_cache *hh,
272 const struct net_device *dev,
273 const unsigned char *haddr);
274 bool (*validate)(const char *ll_header, unsigned int len);
275 };
276
277 /* These flag bits are private to the generic network queueing
278 * layer; they may not be explicitly referenced by any other
279 * code.
280 */
281
282 enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288 };
289
290
291 /*
292 * This structure holds boot-time configured netdevice settings. They
293 * are then used in the device probing.
294 */
295 struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298 };
299 #define NETDEV_BOOT_SETUP_MAX 8
300
301 int __init netdev_boot_setup(char *str);
302
303 /*
304 * Structure for NAPI scheduling similar to tasklet but with weighting
305 */
306 struct napi_struct {
307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-CPU poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit.
312 */
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319 #ifdef CONFIG_NETPOLL
320 int poll_owner;
321 #endif
322 struct net_device *dev;
323 struct sk_buff *gro_list;
324 struct sk_buff *skb;
325 struct hrtimer timer;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329 };
330
331 enum {
332 NAPI_STATE_SCHED, /* Poll is scheduled */
333 NAPI_STATE_DISABLE, /* Disable pending */
334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
335 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
336 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
337 NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
338 };
339
340 enum {
341 NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
342 NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
343 NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
344 NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
345 NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
346 NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
347 };
348
349 enum gro_result {
350 GRO_MERGED,
351 GRO_MERGED_FREE,
352 GRO_HELD,
353 GRO_NORMAL,
354 GRO_DROP,
355 };
356 typedef enum gro_result gro_result_t;
357
358 /*
359 * enum rx_handler_result - Possible return values for rx_handlers.
360 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
361 * further.
362 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
363 * case skb->dev was changed by rx_handler.
364 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
365 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
366 *
367 * rx_handlers are functions called from inside __netif_receive_skb(), to do
368 * special processing of the skb, prior to delivery to protocol handlers.
369 *
370 * Currently, a net_device can only have a single rx_handler registered. Trying
371 * to register a second rx_handler will return -EBUSY.
372 *
373 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
374 * To unregister a rx_handler on a net_device, use
375 * netdev_rx_handler_unregister().
376 *
377 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
378 * do with the skb.
379 *
380 * If the rx_handler consumed the skb in some way, it should return
381 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
382 * the skb to be delivered in some other way.
383 *
384 * If the rx_handler changed skb->dev, to divert the skb to another
385 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
386 * new device will be called if it exists.
387 *
388 * If the rx_handler decides the skb should be ignored, it should return
389 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
390 * are registered on exact device (ptype->dev == skb->dev).
391 *
392 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
393 * delivered, it should return RX_HANDLER_PASS.
394 *
395 * A device without a registered rx_handler will behave as if rx_handler
396 * returned RX_HANDLER_PASS.
397 */
398
399 enum rx_handler_result {
400 RX_HANDLER_CONSUMED,
401 RX_HANDLER_ANOTHER,
402 RX_HANDLER_EXACT,
403 RX_HANDLER_PASS,
404 };
405 typedef enum rx_handler_result rx_handler_result_t;
406 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
407
408 void __napi_schedule(struct napi_struct *n);
409 void __napi_schedule_irqoff(struct napi_struct *n);
410
411 static inline bool napi_disable_pending(struct napi_struct *n)
412 {
413 return test_bit(NAPI_STATE_DISABLE, &n->state);
414 }
415
416 /**
417 * napi_schedule_prep - check if NAPI can be scheduled
418 * @n: NAPI context
419 *
420 * Test if NAPI routine is already running, and if not mark
421 * it as running. This is used as a condition variable to
422 * insure only one NAPI poll instance runs. We also make
423 * sure there is no pending NAPI disable.
424 */
425 static inline bool napi_schedule_prep(struct napi_struct *n)
426 {
427 return !napi_disable_pending(n) &&
428 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
429 }
430
431 /**
432 * napi_schedule - schedule NAPI poll
433 * @n: NAPI context
434 *
435 * Schedule NAPI poll routine to be called if it is not already
436 * running.
437 */
438 static inline void napi_schedule(struct napi_struct *n)
439 {
440 if (napi_schedule_prep(n))
441 __napi_schedule(n);
442 }
443
444 /**
445 * napi_schedule_irqoff - schedule NAPI poll
446 * @n: NAPI context
447 *
448 * Variant of napi_schedule(), assuming hard irqs are masked.
449 */
450 static inline void napi_schedule_irqoff(struct napi_struct *n)
451 {
452 if (napi_schedule_prep(n))
453 __napi_schedule_irqoff(n);
454 }
455
456 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
457 static inline bool napi_reschedule(struct napi_struct *napi)
458 {
459 if (napi_schedule_prep(napi)) {
460 __napi_schedule(napi);
461 return true;
462 }
463 return false;
464 }
465
466 bool __napi_complete(struct napi_struct *n);
467 bool napi_complete_done(struct napi_struct *n, int work_done);
468 /**
469 * napi_complete - NAPI processing complete
470 * @n: NAPI context
471 *
472 * Mark NAPI processing as complete.
473 * Consider using napi_complete_done() instead.
474 * Return false if device should avoid rearming interrupts.
475 */
476 static inline bool napi_complete(struct napi_struct *n)
477 {
478 return napi_complete_done(n, 0);
479 }
480
481 /**
482 * napi_hash_del - remove a NAPI from global table
483 * @napi: NAPI context
484 *
485 * Warning: caller must observe RCU grace period
486 * before freeing memory containing @napi, if
487 * this function returns true.
488 * Note: core networking stack automatically calls it
489 * from netif_napi_del().
490 * Drivers might want to call this helper to combine all
491 * the needed RCU grace periods into a single one.
492 */
493 bool napi_hash_del(struct napi_struct *napi);
494
495 /**
496 * napi_disable - prevent NAPI from scheduling
497 * @n: NAPI context
498 *
499 * Stop NAPI from being scheduled on this context.
500 * Waits till any outstanding processing completes.
501 */
502 void napi_disable(struct napi_struct *n);
503
504 /**
505 * napi_enable - enable NAPI scheduling
506 * @n: NAPI context
507 *
508 * Resume NAPI from being scheduled on this context.
509 * Must be paired with napi_disable.
510 */
511 static inline void napi_enable(struct napi_struct *n)
512 {
513 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
514 smp_mb__before_atomic();
515 clear_bit(NAPI_STATE_SCHED, &n->state);
516 clear_bit(NAPI_STATE_NPSVC, &n->state);
517 }
518
519 /**
520 * napi_synchronize - wait until NAPI is not running
521 * @n: NAPI context
522 *
523 * Wait until NAPI is done being scheduled on this context.
524 * Waits till any outstanding processing completes but
525 * does not disable future activations.
526 */
527 static inline void napi_synchronize(const struct napi_struct *n)
528 {
529 if (IS_ENABLED(CONFIG_SMP))
530 while (test_bit(NAPI_STATE_SCHED, &n->state))
531 msleep(1);
532 else
533 barrier();
534 }
535
536 enum netdev_queue_state_t {
537 __QUEUE_STATE_DRV_XOFF,
538 __QUEUE_STATE_STACK_XOFF,
539 __QUEUE_STATE_FROZEN,
540 };
541
542 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
543 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
544 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
545
546 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
547 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
548 QUEUE_STATE_FROZEN)
549 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
550 QUEUE_STATE_FROZEN)
551
552 /*
553 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
554 * netif_tx_* functions below are used to manipulate this flag. The
555 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
556 * queue independently. The netif_xmit_*stopped functions below are called
557 * to check if the queue has been stopped by the driver or stack (either
558 * of the XOFF bits are set in the state). Drivers should not need to call
559 * netif_xmit*stopped functions, they should only be using netif_tx_*.
560 */
561
562 struct netdev_queue {
563 /*
564 * read-mostly part
565 */
566 struct net_device *dev;
567 struct Qdisc __rcu *qdisc;
568 struct Qdisc *qdisc_sleeping;
569 #ifdef CONFIG_SYSFS
570 struct kobject kobj;
571 #endif
572 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
573 int numa_node;
574 #endif
575 unsigned long tx_maxrate;
576 /*
577 * Number of TX timeouts for this queue
578 * (/sys/class/net/DEV/Q/trans_timeout)
579 */
580 unsigned long trans_timeout;
581 /*
582 * write-mostly part
583 */
584 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
585 int xmit_lock_owner;
586 /*
587 * Time (in jiffies) of last Tx
588 */
589 unsigned long trans_start;
590
591 unsigned long state;
592
593 #ifdef CONFIG_BQL
594 struct dql dql;
595 #endif
596 } ____cacheline_aligned_in_smp;
597
598 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
599 {
600 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
601 return q->numa_node;
602 #else
603 return NUMA_NO_NODE;
604 #endif
605 }
606
607 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
608 {
609 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
610 q->numa_node = node;
611 #endif
612 }
613
614 #ifdef CONFIG_RPS
615 /*
616 * This structure holds an RPS map which can be of variable length. The
617 * map is an array of CPUs.
618 */
619 struct rps_map {
620 unsigned int len;
621 struct rcu_head rcu;
622 u16 cpus[0];
623 };
624 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
625
626 /*
627 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
628 * tail pointer for that CPU's input queue at the time of last enqueue, and
629 * a hardware filter index.
630 */
631 struct rps_dev_flow {
632 u16 cpu;
633 u16 filter;
634 unsigned int last_qtail;
635 };
636 #define RPS_NO_FILTER 0xffff
637
638 /*
639 * The rps_dev_flow_table structure contains a table of flow mappings.
640 */
641 struct rps_dev_flow_table {
642 unsigned int mask;
643 struct rcu_head rcu;
644 struct rps_dev_flow flows[0];
645 };
646 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
647 ((_num) * sizeof(struct rps_dev_flow)))
648
649 /*
650 * The rps_sock_flow_table contains mappings of flows to the last CPU
651 * on which they were processed by the application (set in recvmsg).
652 * Each entry is a 32bit value. Upper part is the high-order bits
653 * of flow hash, lower part is CPU number.
654 * rps_cpu_mask is used to partition the space, depending on number of
655 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
656 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
657 * meaning we use 32-6=26 bits for the hash.
658 */
659 struct rps_sock_flow_table {
660 u32 mask;
661
662 u32 ents[0] ____cacheline_aligned_in_smp;
663 };
664 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
665
666 #define RPS_NO_CPU 0xffff
667
668 extern u32 rps_cpu_mask;
669 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
670
671 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
672 u32 hash)
673 {
674 if (table && hash) {
675 unsigned int index = hash & table->mask;
676 u32 val = hash & ~rps_cpu_mask;
677
678 /* We only give a hint, preemption can change CPU under us */
679 val |= raw_smp_processor_id();
680
681 if (table->ents[index] != val)
682 table->ents[index] = val;
683 }
684 }
685
686 #ifdef CONFIG_RFS_ACCEL
687 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
688 u16 filter_id);
689 #endif
690 #endif /* CONFIG_RPS */
691
692 /* This structure contains an instance of an RX queue. */
693 struct netdev_rx_queue {
694 #ifdef CONFIG_RPS
695 struct rps_map __rcu *rps_map;
696 struct rps_dev_flow_table __rcu *rps_flow_table;
697 #endif
698 struct kobject kobj;
699 struct net_device *dev;
700 } ____cacheline_aligned_in_smp;
701
702 /*
703 * RX queue sysfs structures and functions.
704 */
705 struct rx_queue_attribute {
706 struct attribute attr;
707 ssize_t (*show)(struct netdev_rx_queue *queue,
708 struct rx_queue_attribute *attr, char *buf);
709 ssize_t (*store)(struct netdev_rx_queue *queue,
710 struct rx_queue_attribute *attr, const char *buf, size_t len);
711 };
712
713 #ifdef CONFIG_XPS
714 /*
715 * This structure holds an XPS map which can be of variable length. The
716 * map is an array of queues.
717 */
718 struct xps_map {
719 unsigned int len;
720 unsigned int alloc_len;
721 struct rcu_head rcu;
722 u16 queues[0];
723 };
724 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
725 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
726 - sizeof(struct xps_map)) / sizeof(u16))
727
728 /*
729 * This structure holds all XPS maps for device. Maps are indexed by CPU.
730 */
731 struct xps_dev_maps {
732 struct rcu_head rcu;
733 struct xps_map __rcu *cpu_map[0];
734 };
735 #define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
736 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
737 #endif /* CONFIG_XPS */
738
739 #define TC_MAX_QUEUE 16
740 #define TC_BITMASK 15
741 /* HW offloaded queuing disciplines txq count and offset maps */
742 struct netdev_tc_txq {
743 u16 count;
744 u16 offset;
745 };
746
747 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
748 /*
749 * This structure is to hold information about the device
750 * configured to run FCoE protocol stack.
751 */
752 struct netdev_fcoe_hbainfo {
753 char manufacturer[64];
754 char serial_number[64];
755 char hardware_version[64];
756 char driver_version[64];
757 char optionrom_version[64];
758 char firmware_version[64];
759 char model[256];
760 char model_description[256];
761 };
762 #endif
763
764 #define MAX_PHYS_ITEM_ID_LEN 32
765
766 /* This structure holds a unique identifier to identify some
767 * physical item (port for example) used by a netdevice.
768 */
769 struct netdev_phys_item_id {
770 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
771 unsigned char id_len;
772 };
773
774 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
775 struct netdev_phys_item_id *b)
776 {
777 return a->id_len == b->id_len &&
778 memcmp(a->id, b->id, a->id_len) == 0;
779 }
780
781 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
782 struct sk_buff *skb);
783
784 /* These structures hold the attributes of qdisc and classifiers
785 * that are being passed to the netdevice through the setup_tc op.
786 */
787 enum {
788 TC_SETUP_MQPRIO,
789 TC_SETUP_CLSU32,
790 TC_SETUP_CLSFLOWER,
791 TC_SETUP_MATCHALL,
792 TC_SETUP_CLSBPF,
793 };
794
795 struct tc_cls_u32_offload;
796
797 struct tc_to_netdev {
798 unsigned int type;
799 union {
800 u8 tc;
801 struct tc_cls_u32_offload *cls_u32;
802 struct tc_cls_flower_offload *cls_flower;
803 struct tc_cls_matchall_offload *cls_mall;
804 struct tc_cls_bpf_offload *cls_bpf;
805 };
806 bool egress_dev;
807 };
808
809 /* These structures hold the attributes of xdp state that are being passed
810 * to the netdevice through the xdp op.
811 */
812 enum xdp_netdev_command {
813 /* Set or clear a bpf program used in the earliest stages of packet
814 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
815 * is responsible for calling bpf_prog_put on any old progs that are
816 * stored. In case of error, the callee need not release the new prog
817 * reference, but on success it takes ownership and must bpf_prog_put
818 * when it is no longer used.
819 */
820 XDP_SETUP_PROG,
821 /* Check if a bpf program is set on the device. The callee should
822 * return true if a program is currently attached and running.
823 */
824 XDP_QUERY_PROG,
825 };
826
827 struct netdev_xdp {
828 enum xdp_netdev_command command;
829 union {
830 /* XDP_SETUP_PROG */
831 struct bpf_prog *prog;
832 /* XDP_QUERY_PROG */
833 bool prog_attached;
834 };
835 };
836
837 /*
838 * This structure defines the management hooks for network devices.
839 * The following hooks can be defined; unless noted otherwise, they are
840 * optional and can be filled with a null pointer.
841 *
842 * int (*ndo_init)(struct net_device *dev);
843 * This function is called once when a network device is registered.
844 * The network device can use this for any late stage initialization
845 * or semantic validation. It can fail with an error code which will
846 * be propagated back to register_netdev.
847 *
848 * void (*ndo_uninit)(struct net_device *dev);
849 * This function is called when device is unregistered or when registration
850 * fails. It is not called if init fails.
851 *
852 * int (*ndo_open)(struct net_device *dev);
853 * This function is called when a network device transitions to the up
854 * state.
855 *
856 * int (*ndo_stop)(struct net_device *dev);
857 * This function is called when a network device transitions to the down
858 * state.
859 *
860 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
861 * struct net_device *dev);
862 * Called when a packet needs to be transmitted.
863 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
864 * the queue before that can happen; it's for obsolete devices and weird
865 * corner cases, but the stack really does a non-trivial amount
866 * of useless work if you return NETDEV_TX_BUSY.
867 * Required; cannot be NULL.
868 *
869 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
870 * netdev_features_t features);
871 * Adjusts the requested feature flags according to device-specific
872 * constraints, and returns the resulting flags. Must not modify
873 * the device state.
874 *
875 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
876 * void *accel_priv, select_queue_fallback_t fallback);
877 * Called to decide which queue to use when device supports multiple
878 * transmit queues.
879 *
880 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
881 * This function is called to allow device receiver to make
882 * changes to configuration when multicast or promiscuous is enabled.
883 *
884 * void (*ndo_set_rx_mode)(struct net_device *dev);
885 * This function is called device changes address list filtering.
886 * If driver handles unicast address filtering, it should set
887 * IFF_UNICAST_FLT in its priv_flags.
888 *
889 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
890 * This function is called when the Media Access Control address
891 * needs to be changed. If this interface is not defined, the
892 * MAC address can not be changed.
893 *
894 * int (*ndo_validate_addr)(struct net_device *dev);
895 * Test if Media Access Control address is valid for the device.
896 *
897 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
898 * Called when a user requests an ioctl which can't be handled by
899 * the generic interface code. If not defined ioctls return
900 * not supported error code.
901 *
902 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
903 * Used to set network devices bus interface parameters. This interface
904 * is retained for legacy reasons; new devices should use the bus
905 * interface (PCI) for low level management.
906 *
907 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
908 * Called when a user wants to change the Maximum Transfer Unit
909 * of a device. If not defined, any request to change MTU will
910 * will return an error.
911 *
912 * void (*ndo_tx_timeout)(struct net_device *dev);
913 * Callback used when the transmitter has not made any progress
914 * for dev->watchdog ticks.
915 *
916 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
917 * struct rtnl_link_stats64 *storage);
918 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
919 * Called when a user wants to get the network device usage
920 * statistics. Drivers must do one of the following:
921 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
922 * rtnl_link_stats64 structure passed by the caller.
923 * 2. Define @ndo_get_stats to update a net_device_stats structure
924 * (which should normally be dev->stats) and return a pointer to
925 * it. The structure may be changed asynchronously only if each
926 * field is written atomically.
927 * 3. Update dev->stats asynchronously and atomically, and define
928 * neither operation.
929 *
930 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
931 * Return true if this device supports offload stats of this attr_id.
932 *
933 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
934 * void *attr_data)
935 * Get statistics for offload operations by attr_id. Write it into the
936 * attr_data pointer.
937 *
938 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
939 * If device supports VLAN filtering this function is called when a
940 * VLAN id is registered.
941 *
942 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
943 * If device supports VLAN filtering this function is called when a
944 * VLAN id is unregistered.
945 *
946 * void (*ndo_poll_controller)(struct net_device *dev);
947 *
948 * SR-IOV management functions.
949 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
950 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
951 * u8 qos, __be16 proto);
952 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
953 * int max_tx_rate);
954 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
955 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
956 * int (*ndo_get_vf_config)(struct net_device *dev,
957 * int vf, struct ifla_vf_info *ivf);
958 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
959 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
960 * struct nlattr *port[]);
961 *
962 * Enable or disable the VF ability to query its RSS Redirection Table and
963 * Hash Key. This is needed since on some devices VF share this information
964 * with PF and querying it may introduce a theoretical security risk.
965 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
966 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
967 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
968 * Called to setup 'tc' number of traffic classes in the net device. This
969 * is always called from the stack with the rtnl lock held and netif tx
970 * queues stopped. This allows the netdevice to perform queue management
971 * safely.
972 *
973 * Fiber Channel over Ethernet (FCoE) offload functions.
974 * int (*ndo_fcoe_enable)(struct net_device *dev);
975 * Called when the FCoE protocol stack wants to start using LLD for FCoE
976 * so the underlying device can perform whatever needed configuration or
977 * initialization to support acceleration of FCoE traffic.
978 *
979 * int (*ndo_fcoe_disable)(struct net_device *dev);
980 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
981 * so the underlying device can perform whatever needed clean-ups to
982 * stop supporting acceleration of FCoE traffic.
983 *
984 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
985 * struct scatterlist *sgl, unsigned int sgc);
986 * Called when the FCoE Initiator wants to initialize an I/O that
987 * is a possible candidate for Direct Data Placement (DDP). The LLD can
988 * perform necessary setup and returns 1 to indicate the device is set up
989 * successfully to perform DDP on this I/O, otherwise this returns 0.
990 *
991 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
992 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
993 * indicated by the FC exchange id 'xid', so the underlying device can
994 * clean up and reuse resources for later DDP requests.
995 *
996 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
997 * struct scatterlist *sgl, unsigned int sgc);
998 * Called when the FCoE Target wants to initialize an I/O that
999 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1000 * perform necessary setup and returns 1 to indicate the device is set up
1001 * successfully to perform DDP on this I/O, otherwise this returns 0.
1002 *
1003 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1004 * struct netdev_fcoe_hbainfo *hbainfo);
1005 * Called when the FCoE Protocol stack wants information on the underlying
1006 * device. This information is utilized by the FCoE protocol stack to
1007 * register attributes with Fiber Channel management service as per the
1008 * FC-GS Fabric Device Management Information(FDMI) specification.
1009 *
1010 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1011 * Called when the underlying device wants to override default World Wide
1012 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1013 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1014 * protocol stack to use.
1015 *
1016 * RFS acceleration.
1017 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1018 * u16 rxq_index, u32 flow_id);
1019 * Set hardware filter for RFS. rxq_index is the target queue index;
1020 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1021 * Return the filter ID on success, or a negative error code.
1022 *
1023 * Slave management functions (for bridge, bonding, etc).
1024 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1025 * Called to make another netdev an underling.
1026 *
1027 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1028 * Called to release previously enslaved netdev.
1029 *
1030 * Feature/offload setting functions.
1031 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1032 * Called to update device configuration to new features. Passed
1033 * feature set might be less than what was returned by ndo_fix_features()).
1034 * Must return >0 or -errno if it changed dev->features itself.
1035 *
1036 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1037 * struct net_device *dev,
1038 * const unsigned char *addr, u16 vid, u16 flags)
1039 * Adds an FDB entry to dev for addr.
1040 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1041 * struct net_device *dev,
1042 * const unsigned char *addr, u16 vid)
1043 * Deletes the FDB entry from dev coresponding to addr.
1044 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1045 * struct net_device *dev, struct net_device *filter_dev,
1046 * int *idx)
1047 * Used to add FDB entries to dump requests. Implementers should add
1048 * entries to skb and update idx with the number of entries.
1049 *
1050 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1051 * u16 flags)
1052 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1053 * struct net_device *dev, u32 filter_mask,
1054 * int nlflags)
1055 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1056 * u16 flags);
1057 *
1058 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1059 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1060 * which do not represent real hardware may define this to allow their
1061 * userspace components to manage their virtual carrier state. Devices
1062 * that determine carrier state from physical hardware properties (eg
1063 * network cables) or protocol-dependent mechanisms (eg
1064 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1065 *
1066 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1067 * struct netdev_phys_item_id *ppid);
1068 * Called to get ID of physical port of this device. If driver does
1069 * not implement this, it is assumed that the hw is not able to have
1070 * multiple net devices on single physical port.
1071 *
1072 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1073 * struct udp_tunnel_info *ti);
1074 * Called by UDP tunnel to notify a driver about the UDP port and socket
1075 * address family that a UDP tunnel is listnening to. It is called only
1076 * when a new port starts listening. The operation is protected by the
1077 * RTNL.
1078 *
1079 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1080 * struct udp_tunnel_info *ti);
1081 * Called by UDP tunnel to notify the driver about a UDP port and socket
1082 * address family that the UDP tunnel is not listening to anymore. The
1083 * operation is protected by the RTNL.
1084 *
1085 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1086 * struct net_device *dev)
1087 * Called by upper layer devices to accelerate switching or other
1088 * station functionality into hardware. 'pdev is the lowerdev
1089 * to use for the offload and 'dev' is the net device that will
1090 * back the offload. Returns a pointer to the private structure
1091 * the upper layer will maintain.
1092 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1093 * Called by upper layer device to delete the station created
1094 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1095 * the station and priv is the structure returned by the add
1096 * operation.
1097 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
1098 * struct net_device *dev,
1099 * void *priv);
1100 * Callback to use for xmit over the accelerated station. This
1101 * is used in place of ndo_start_xmit on accelerated net
1102 * devices.
1103 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1104 * struct net_device *dev
1105 * netdev_features_t features);
1106 * Called by core transmit path to determine if device is capable of
1107 * performing offload operations on a given packet. This is to give
1108 * the device an opportunity to implement any restrictions that cannot
1109 * be otherwise expressed by feature flags. The check is called with
1110 * the set of features that the stack has calculated and it returns
1111 * those the driver believes to be appropriate.
1112 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1113 * int queue_index, u32 maxrate);
1114 * Called when a user wants to set a max-rate limitation of specific
1115 * TX queue.
1116 * int (*ndo_get_iflink)(const struct net_device *dev);
1117 * Called to get the iflink value of this device.
1118 * void (*ndo_change_proto_down)(struct net_device *dev,
1119 * bool proto_down);
1120 * This function is used to pass protocol port error state information
1121 * to the switch driver. The switch driver can react to the proto_down
1122 * by doing a phys down on the associated switch port.
1123 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1124 * This function is used to get egress tunnel information for given skb.
1125 * This is useful for retrieving outer tunnel header parameters while
1126 * sampling packet.
1127 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1128 * This function is used to specify the headroom that the skb must
1129 * consider when allocation skb during packet reception. Setting
1130 * appropriate rx headroom value allows avoiding skb head copy on
1131 * forward. Setting a negative value resets the rx headroom to the
1132 * default value.
1133 * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
1134 * This function is used to set or query state related to XDP on the
1135 * netdevice. See definition of enum xdp_netdev_command for details.
1136 *
1137 */
1138 struct net_device_ops {
1139 int (*ndo_init)(struct net_device *dev);
1140 void (*ndo_uninit)(struct net_device *dev);
1141 int (*ndo_open)(struct net_device *dev);
1142 int (*ndo_stop)(struct net_device *dev);
1143 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1144 struct net_device *dev);
1145 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1146 struct net_device *dev,
1147 netdev_features_t features);
1148 u16 (*ndo_select_queue)(struct net_device *dev,
1149 struct sk_buff *skb,
1150 void *accel_priv,
1151 select_queue_fallback_t fallback);
1152 void (*ndo_change_rx_flags)(struct net_device *dev,
1153 int flags);
1154 void (*ndo_set_rx_mode)(struct net_device *dev);
1155 int (*ndo_set_mac_address)(struct net_device *dev,
1156 void *addr);
1157 int (*ndo_validate_addr)(struct net_device *dev);
1158 int (*ndo_do_ioctl)(struct net_device *dev,
1159 struct ifreq *ifr, int cmd);
1160 int (*ndo_set_config)(struct net_device *dev,
1161 struct ifmap *map);
1162 int (*ndo_change_mtu)(struct net_device *dev,
1163 int new_mtu);
1164 int (*ndo_neigh_setup)(struct net_device *dev,
1165 struct neigh_parms *);
1166 void (*ndo_tx_timeout) (struct net_device *dev);
1167
1168 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1169 struct rtnl_link_stats64 *storage);
1170 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1171 int (*ndo_get_offload_stats)(int attr_id,
1172 const struct net_device *dev,
1173 void *attr_data);
1174 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1175
1176 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1177 __be16 proto, u16 vid);
1178 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1179 __be16 proto, u16 vid);
1180 #ifdef CONFIG_NET_POLL_CONTROLLER
1181 void (*ndo_poll_controller)(struct net_device *dev);
1182 int (*ndo_netpoll_setup)(struct net_device *dev,
1183 struct netpoll_info *info);
1184 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1185 #endif
1186 #ifdef CONFIG_NET_RX_BUSY_POLL
1187 int (*ndo_busy_poll)(struct napi_struct *dev);
1188 #endif
1189 int (*ndo_set_vf_mac)(struct net_device *dev,
1190 int queue, u8 *mac);
1191 int (*ndo_set_vf_vlan)(struct net_device *dev,
1192 int queue, u16 vlan,
1193 u8 qos, __be16 proto);
1194 int (*ndo_set_vf_rate)(struct net_device *dev,
1195 int vf, int min_tx_rate,
1196 int max_tx_rate);
1197 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1198 int vf, bool setting);
1199 int (*ndo_set_vf_trust)(struct net_device *dev,
1200 int vf, bool setting);
1201 int (*ndo_get_vf_config)(struct net_device *dev,
1202 int vf,
1203 struct ifla_vf_info *ivf);
1204 int (*ndo_set_vf_link_state)(struct net_device *dev,
1205 int vf, int link_state);
1206 int (*ndo_get_vf_stats)(struct net_device *dev,
1207 int vf,
1208 struct ifla_vf_stats
1209 *vf_stats);
1210 int (*ndo_set_vf_port)(struct net_device *dev,
1211 int vf,
1212 struct nlattr *port[]);
1213 int (*ndo_get_vf_port)(struct net_device *dev,
1214 int vf, struct sk_buff *skb);
1215 int (*ndo_set_vf_guid)(struct net_device *dev,
1216 int vf, u64 guid,
1217 int guid_type);
1218 int (*ndo_set_vf_rss_query_en)(
1219 struct net_device *dev,
1220 int vf, bool setting);
1221 int (*ndo_setup_tc)(struct net_device *dev,
1222 u32 handle,
1223 __be16 protocol,
1224 struct tc_to_netdev *tc);
1225 #if IS_ENABLED(CONFIG_FCOE)
1226 int (*ndo_fcoe_enable)(struct net_device *dev);
1227 int (*ndo_fcoe_disable)(struct net_device *dev);
1228 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1229 u16 xid,
1230 struct scatterlist *sgl,
1231 unsigned int sgc);
1232 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1233 u16 xid);
1234 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1235 u16 xid,
1236 struct scatterlist *sgl,
1237 unsigned int sgc);
1238 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1239 struct netdev_fcoe_hbainfo *hbainfo);
1240 #endif
1241
1242 #if IS_ENABLED(CONFIG_LIBFCOE)
1243 #define NETDEV_FCOE_WWNN 0
1244 #define NETDEV_FCOE_WWPN 1
1245 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1246 u64 *wwn, int type);
1247 #endif
1248
1249 #ifdef CONFIG_RFS_ACCEL
1250 int (*ndo_rx_flow_steer)(struct net_device *dev,
1251 const struct sk_buff *skb,
1252 u16 rxq_index,
1253 u32 flow_id);
1254 #endif
1255 int (*ndo_add_slave)(struct net_device *dev,
1256 struct net_device *slave_dev);
1257 int (*ndo_del_slave)(struct net_device *dev,
1258 struct net_device *slave_dev);
1259 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1260 netdev_features_t features);
1261 int (*ndo_set_features)(struct net_device *dev,
1262 netdev_features_t features);
1263 int (*ndo_neigh_construct)(struct net_device *dev,
1264 struct neighbour *n);
1265 void (*ndo_neigh_destroy)(struct net_device *dev,
1266 struct neighbour *n);
1267
1268 int (*ndo_fdb_add)(struct ndmsg *ndm,
1269 struct nlattr *tb[],
1270 struct net_device *dev,
1271 const unsigned char *addr,
1272 u16 vid,
1273 u16 flags);
1274 int (*ndo_fdb_del)(struct ndmsg *ndm,
1275 struct nlattr *tb[],
1276 struct net_device *dev,
1277 const unsigned char *addr,
1278 u16 vid);
1279 int (*ndo_fdb_dump)(struct sk_buff *skb,
1280 struct netlink_callback *cb,
1281 struct net_device *dev,
1282 struct net_device *filter_dev,
1283 int *idx);
1284
1285 int (*ndo_bridge_setlink)(struct net_device *dev,
1286 struct nlmsghdr *nlh,
1287 u16 flags);
1288 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1289 u32 pid, u32 seq,
1290 struct net_device *dev,
1291 u32 filter_mask,
1292 int nlflags);
1293 int (*ndo_bridge_dellink)(struct net_device *dev,
1294 struct nlmsghdr *nlh,
1295 u16 flags);
1296 int (*ndo_change_carrier)(struct net_device *dev,
1297 bool new_carrier);
1298 int (*ndo_get_phys_port_id)(struct net_device *dev,
1299 struct netdev_phys_item_id *ppid);
1300 int (*ndo_get_phys_port_name)(struct net_device *dev,
1301 char *name, size_t len);
1302 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1303 struct udp_tunnel_info *ti);
1304 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1305 struct udp_tunnel_info *ti);
1306 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1307 struct net_device *dev);
1308 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1309 void *priv);
1310
1311 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1312 struct net_device *dev,
1313 void *priv);
1314 int (*ndo_get_lock_subclass)(struct net_device *dev);
1315 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1316 int queue_index,
1317 u32 maxrate);
1318 int (*ndo_get_iflink)(const struct net_device *dev);
1319 int (*ndo_change_proto_down)(struct net_device *dev,
1320 bool proto_down);
1321 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1322 struct sk_buff *skb);
1323 void (*ndo_set_rx_headroom)(struct net_device *dev,
1324 int needed_headroom);
1325 int (*ndo_xdp)(struct net_device *dev,
1326 struct netdev_xdp *xdp);
1327 };
1328
1329 /**
1330 * enum net_device_priv_flags - &struct net_device priv_flags
1331 *
1332 * These are the &struct net_device, they are only set internally
1333 * by drivers and used in the kernel. These flags are invisible to
1334 * userspace; this means that the order of these flags can change
1335 * during any kernel release.
1336 *
1337 * You should have a pretty good reason to be extending these flags.
1338 *
1339 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1340 * @IFF_EBRIDGE: Ethernet bridging device
1341 * @IFF_BONDING: bonding master or slave
1342 * @IFF_ISATAP: ISATAP interface (RFC4214)
1343 * @IFF_WAN_HDLC: WAN HDLC device
1344 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1345 * release skb->dst
1346 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1347 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1348 * @IFF_MACVLAN_PORT: device used as macvlan port
1349 * @IFF_BRIDGE_PORT: device used as bridge port
1350 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1351 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1352 * @IFF_UNICAST_FLT: Supports unicast filtering
1353 * @IFF_TEAM_PORT: device used as team port
1354 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1355 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1356 * change when it's running
1357 * @IFF_MACVLAN: Macvlan device
1358 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1359 * underlying stacked devices
1360 * @IFF_IPVLAN_MASTER: IPvlan master device
1361 * @IFF_IPVLAN_SLAVE: IPvlan slave device
1362 * @IFF_L3MDEV_MASTER: device is an L3 master device
1363 * @IFF_NO_QUEUE: device can run without qdisc attached
1364 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1365 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1366 * @IFF_TEAM: device is a team device
1367 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1368 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1369 * entity (i.e. the master device for bridged veth)
1370 * @IFF_MACSEC: device is a MACsec device
1371 */
1372 enum netdev_priv_flags {
1373 IFF_802_1Q_VLAN = 1<<0,
1374 IFF_EBRIDGE = 1<<1,
1375 IFF_BONDING = 1<<2,
1376 IFF_ISATAP = 1<<3,
1377 IFF_WAN_HDLC = 1<<4,
1378 IFF_XMIT_DST_RELEASE = 1<<5,
1379 IFF_DONT_BRIDGE = 1<<6,
1380 IFF_DISABLE_NETPOLL = 1<<7,
1381 IFF_MACVLAN_PORT = 1<<8,
1382 IFF_BRIDGE_PORT = 1<<9,
1383 IFF_OVS_DATAPATH = 1<<10,
1384 IFF_TX_SKB_SHARING = 1<<11,
1385 IFF_UNICAST_FLT = 1<<12,
1386 IFF_TEAM_PORT = 1<<13,
1387 IFF_SUPP_NOFCS = 1<<14,
1388 IFF_LIVE_ADDR_CHANGE = 1<<15,
1389 IFF_MACVLAN = 1<<16,
1390 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1391 IFF_IPVLAN_MASTER = 1<<18,
1392 IFF_IPVLAN_SLAVE = 1<<19,
1393 IFF_L3MDEV_MASTER = 1<<20,
1394 IFF_NO_QUEUE = 1<<21,
1395 IFF_OPENVSWITCH = 1<<22,
1396 IFF_L3MDEV_SLAVE = 1<<23,
1397 IFF_TEAM = 1<<24,
1398 IFF_RXFH_CONFIGURED = 1<<25,
1399 IFF_PHONY_HEADROOM = 1<<26,
1400 IFF_MACSEC = 1<<27,
1401 };
1402
1403 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1404 #define IFF_EBRIDGE IFF_EBRIDGE
1405 #define IFF_BONDING IFF_BONDING
1406 #define IFF_ISATAP IFF_ISATAP
1407 #define IFF_WAN_HDLC IFF_WAN_HDLC
1408 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1409 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1410 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1411 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1412 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1413 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1414 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1415 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1416 #define IFF_TEAM_PORT IFF_TEAM_PORT
1417 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1418 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1419 #define IFF_MACVLAN IFF_MACVLAN
1420 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1421 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1422 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1423 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1424 #define IFF_NO_QUEUE IFF_NO_QUEUE
1425 #define IFF_OPENVSWITCH IFF_OPENVSWITCH
1426 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1427 #define IFF_TEAM IFF_TEAM
1428 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1429 #define IFF_MACSEC IFF_MACSEC
1430
1431 /**
1432 * struct net_device - The DEVICE structure.
1433 * Actually, this whole structure is a big mistake. It mixes I/O
1434 * data with strictly "high-level" data, and it has to know about
1435 * almost every data structure used in the INET module.
1436 *
1437 * @name: This is the first field of the "visible" part of this structure
1438 * (i.e. as seen by users in the "Space.c" file). It is the name
1439 * of the interface.
1440 *
1441 * @name_hlist: Device name hash chain, please keep it close to name[]
1442 * @ifalias: SNMP alias
1443 * @mem_end: Shared memory end
1444 * @mem_start: Shared memory start
1445 * @base_addr: Device I/O address
1446 * @irq: Device IRQ number
1447 *
1448 * @carrier_changes: Stats to monitor carrier on<->off transitions
1449 *
1450 * @state: Generic network queuing layer state, see netdev_state_t
1451 * @dev_list: The global list of network devices
1452 * @napi_list: List entry used for polling NAPI devices
1453 * @unreg_list: List entry when we are unregistering the
1454 * device; see the function unregister_netdev
1455 * @close_list: List entry used when we are closing the device
1456 * @ptype_all: Device-specific packet handlers for all protocols
1457 * @ptype_specific: Device-specific, protocol-specific packet handlers
1458 *
1459 * @adj_list: Directly linked devices, like slaves for bonding
1460 * @features: Currently active device features
1461 * @hw_features: User-changeable features
1462 *
1463 * @wanted_features: User-requested features
1464 * @vlan_features: Mask of features inheritable by VLAN devices
1465 *
1466 * @hw_enc_features: Mask of features inherited by encapsulating devices
1467 * This field indicates what encapsulation
1468 * offloads the hardware is capable of doing,
1469 * and drivers will need to set them appropriately.
1470 *
1471 * @mpls_features: Mask of features inheritable by MPLS
1472 *
1473 * @ifindex: interface index
1474 * @group: The group the device belongs to
1475 *
1476 * @stats: Statistics struct, which was left as a legacy, use
1477 * rtnl_link_stats64 instead
1478 *
1479 * @rx_dropped: Dropped packets by core network,
1480 * do not use this in drivers
1481 * @tx_dropped: Dropped packets by core network,
1482 * do not use this in drivers
1483 * @rx_nohandler: nohandler dropped packets by core network on
1484 * inactive devices, do not use this in drivers
1485 *
1486 * @wireless_handlers: List of functions to handle Wireless Extensions,
1487 * instead of ioctl,
1488 * see <net/iw_handler.h> for details.
1489 * @wireless_data: Instance data managed by the core of wireless extensions
1490 *
1491 * @netdev_ops: Includes several pointers to callbacks,
1492 * if one wants to override the ndo_*() functions
1493 * @ethtool_ops: Management operations
1494 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1495 * discovery handling. Necessary for e.g. 6LoWPAN.
1496 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1497 * of Layer 2 headers.
1498 *
1499 * @flags: Interface flags (a la BSD)
1500 * @priv_flags: Like 'flags' but invisible to userspace,
1501 * see if.h for the definitions
1502 * @gflags: Global flags ( kept as legacy )
1503 * @padded: How much padding added by alloc_netdev()
1504 * @operstate: RFC2863 operstate
1505 * @link_mode: Mapping policy to operstate
1506 * @if_port: Selectable AUI, TP, ...
1507 * @dma: DMA channel
1508 * @mtu: Interface MTU value
1509 * @min_mtu: Interface Minimum MTU value
1510 * @max_mtu: Interface Maximum MTU value
1511 * @type: Interface hardware type
1512 * @hard_header_len: Maximum hardware header length.
1513 *
1514 * @needed_headroom: Extra headroom the hardware may need, but not in all
1515 * cases can this be guaranteed
1516 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1517 * cases can this be guaranteed. Some cases also use
1518 * LL_MAX_HEADER instead to allocate the skb
1519 *
1520 * interface address info:
1521 *
1522 * @perm_addr: Permanent hw address
1523 * @addr_assign_type: Hw address assignment type
1524 * @addr_len: Hardware address length
1525 * @neigh_priv_len: Used in neigh_alloc()
1526 * @dev_id: Used to differentiate devices that share
1527 * the same link layer address
1528 * @dev_port: Used to differentiate devices that share
1529 * the same function
1530 * @addr_list_lock: XXX: need comments on this one
1531 * @uc_promisc: Counter that indicates promiscuous mode
1532 * has been enabled due to the need to listen to
1533 * additional unicast addresses in a device that
1534 * does not implement ndo_set_rx_mode()
1535 * @uc: unicast mac addresses
1536 * @mc: multicast mac addresses
1537 * @dev_addrs: list of device hw addresses
1538 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1539 * @promiscuity: Number of times the NIC is told to work in
1540 * promiscuous mode; if it becomes 0 the NIC will
1541 * exit promiscuous mode
1542 * @allmulti: Counter, enables or disables allmulticast mode
1543 *
1544 * @vlan_info: VLAN info
1545 * @dsa_ptr: dsa specific data
1546 * @tipc_ptr: TIPC specific data
1547 * @atalk_ptr: AppleTalk link
1548 * @ip_ptr: IPv4 specific data
1549 * @dn_ptr: DECnet specific data
1550 * @ip6_ptr: IPv6 specific data
1551 * @ax25_ptr: AX.25 specific data
1552 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1553 *
1554 * @last_rx: Time of last Rx
1555 * @dev_addr: Hw address (before bcast,
1556 * because most packets are unicast)
1557 *
1558 * @_rx: Array of RX queues
1559 * @num_rx_queues: Number of RX queues
1560 * allocated at register_netdev() time
1561 * @real_num_rx_queues: Number of RX queues currently active in device
1562 *
1563 * @rx_handler: handler for received packets
1564 * @rx_handler_data: XXX: need comments on this one
1565 * @ingress_queue: XXX: need comments on this one
1566 * @broadcast: hw bcast address
1567 *
1568 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1569 * indexed by RX queue number. Assigned by driver.
1570 * This must only be set if the ndo_rx_flow_steer
1571 * operation is defined
1572 * @index_hlist: Device index hash chain
1573 *
1574 * @_tx: Array of TX queues
1575 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1576 * @real_num_tx_queues: Number of TX queues currently active in device
1577 * @qdisc: Root qdisc from userspace point of view
1578 * @tx_queue_len: Max frames per queue allowed
1579 * @tx_global_lock: XXX: need comments on this one
1580 *
1581 * @xps_maps: XXX: need comments on this one
1582 *
1583 * @watchdog_timeo: Represents the timeout that is used by
1584 * the watchdog (see dev_watchdog())
1585 * @watchdog_timer: List of timers
1586 *
1587 * @pcpu_refcnt: Number of references to this device
1588 * @todo_list: Delayed register/unregister
1589 * @link_watch_list: XXX: need comments on this one
1590 *
1591 * @reg_state: Register/unregister state machine
1592 * @dismantle: Device is going to be freed
1593 * @rtnl_link_state: This enum represents the phases of creating
1594 * a new link
1595 *
1596 * @destructor: Called from unregister,
1597 * can be used to call free_netdev
1598 * @npinfo: XXX: need comments on this one
1599 * @nd_net: Network namespace this network device is inside
1600 *
1601 * @ml_priv: Mid-layer private
1602 * @lstats: Loopback statistics
1603 * @tstats: Tunnel statistics
1604 * @dstats: Dummy statistics
1605 * @vstats: Virtual ethernet statistics
1606 *
1607 * @garp_port: GARP
1608 * @mrp_port: MRP
1609 *
1610 * @dev: Class/net/name entry
1611 * @sysfs_groups: Space for optional device, statistics and wireless
1612 * sysfs groups
1613 *
1614 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1615 * @rtnl_link_ops: Rtnl_link_ops
1616 *
1617 * @gso_max_size: Maximum size of generic segmentation offload
1618 * @gso_max_segs: Maximum number of segments that can be passed to the
1619 * NIC for GSO
1620 *
1621 * @dcbnl_ops: Data Center Bridging netlink ops
1622 * @num_tc: Number of traffic classes in the net device
1623 * @tc_to_txq: XXX: need comments on this one
1624 * @prio_tc_map: XXX: need comments on this one
1625 *
1626 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1627 *
1628 * @priomap: XXX: need comments on this one
1629 * @phydev: Physical device may attach itself
1630 * for hardware timestamping
1631 *
1632 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1633 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1634 *
1635 * @proto_down: protocol port state information can be sent to the
1636 * switch driver and used to set the phys state of the
1637 * switch port.
1638 *
1639 * FIXME: cleanup struct net_device such that network protocol info
1640 * moves out.
1641 */
1642
1643 struct net_device {
1644 char name[IFNAMSIZ];
1645 struct hlist_node name_hlist;
1646 char *ifalias;
1647 /*
1648 * I/O specific fields
1649 * FIXME: Merge these and struct ifmap into one
1650 */
1651 unsigned long mem_end;
1652 unsigned long mem_start;
1653 unsigned long base_addr;
1654 int irq;
1655
1656 atomic_t carrier_changes;
1657
1658 /*
1659 * Some hardware also needs these fields (state,dev_list,
1660 * napi_list,unreg_list,close_list) but they are not
1661 * part of the usual set specified in Space.c.
1662 */
1663
1664 unsigned long state;
1665
1666 struct list_head dev_list;
1667 struct list_head napi_list;
1668 struct list_head unreg_list;
1669 struct list_head close_list;
1670 struct list_head ptype_all;
1671 struct list_head ptype_specific;
1672
1673 struct {
1674 struct list_head upper;
1675 struct list_head lower;
1676 } adj_list;
1677
1678 netdev_features_t features;
1679 netdev_features_t hw_features;
1680 netdev_features_t wanted_features;
1681 netdev_features_t vlan_features;
1682 netdev_features_t hw_enc_features;
1683 netdev_features_t mpls_features;
1684 netdev_features_t gso_partial_features;
1685
1686 int ifindex;
1687 int group;
1688
1689 struct net_device_stats stats;
1690
1691 atomic_long_t rx_dropped;
1692 atomic_long_t tx_dropped;
1693 atomic_long_t rx_nohandler;
1694
1695 #ifdef CONFIG_WIRELESS_EXT
1696 const struct iw_handler_def *wireless_handlers;
1697 struct iw_public_data *wireless_data;
1698 #endif
1699 const struct net_device_ops *netdev_ops;
1700 const struct ethtool_ops *ethtool_ops;
1701 #ifdef CONFIG_NET_SWITCHDEV
1702 const struct switchdev_ops *switchdev_ops;
1703 #endif
1704 #ifdef CONFIG_NET_L3_MASTER_DEV
1705 const struct l3mdev_ops *l3mdev_ops;
1706 #endif
1707 #if IS_ENABLED(CONFIG_IPV6)
1708 const struct ndisc_ops *ndisc_ops;
1709 #endif
1710
1711 const struct header_ops *header_ops;
1712
1713 unsigned int flags;
1714 unsigned int priv_flags;
1715
1716 unsigned short gflags;
1717 unsigned short padded;
1718
1719 unsigned char operstate;
1720 unsigned char link_mode;
1721
1722 unsigned char if_port;
1723 unsigned char dma;
1724
1725 unsigned int mtu;
1726 unsigned int min_mtu;
1727 unsigned int max_mtu;
1728 unsigned short type;
1729 unsigned short hard_header_len;
1730
1731 unsigned short needed_headroom;
1732 unsigned short needed_tailroom;
1733
1734 /* Interface address info. */
1735 unsigned char perm_addr[MAX_ADDR_LEN];
1736 unsigned char addr_assign_type;
1737 unsigned char addr_len;
1738 unsigned short neigh_priv_len;
1739 unsigned short dev_id;
1740 unsigned short dev_port;
1741 spinlock_t addr_list_lock;
1742 unsigned char name_assign_type;
1743 bool uc_promisc;
1744 struct netdev_hw_addr_list uc;
1745 struct netdev_hw_addr_list mc;
1746 struct netdev_hw_addr_list dev_addrs;
1747
1748 #ifdef CONFIG_SYSFS
1749 struct kset *queues_kset;
1750 #endif
1751 unsigned int promiscuity;
1752 unsigned int allmulti;
1753
1754
1755 /* Protocol-specific pointers */
1756
1757 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1758 struct vlan_info __rcu *vlan_info;
1759 #endif
1760 #if IS_ENABLED(CONFIG_NET_DSA)
1761 struct dsa_switch_tree *dsa_ptr;
1762 #endif
1763 #if IS_ENABLED(CONFIG_TIPC)
1764 struct tipc_bearer __rcu *tipc_ptr;
1765 #endif
1766 void *atalk_ptr;
1767 struct in_device __rcu *ip_ptr;
1768 struct dn_dev __rcu *dn_ptr;
1769 struct inet6_dev __rcu *ip6_ptr;
1770 void *ax25_ptr;
1771 struct wireless_dev *ieee80211_ptr;
1772 struct wpan_dev *ieee802154_ptr;
1773 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
1774 struct mpls_dev __rcu *mpls_ptr;
1775 #endif
1776
1777 /*
1778 * Cache lines mostly used on receive path (including eth_type_trans())
1779 */
1780 unsigned long last_rx;
1781
1782 /* Interface address info used in eth_type_trans() */
1783 unsigned char *dev_addr;
1784
1785 #ifdef CONFIG_SYSFS
1786 struct netdev_rx_queue *_rx;
1787
1788 unsigned int num_rx_queues;
1789 unsigned int real_num_rx_queues;
1790 #endif
1791
1792 unsigned long gro_flush_timeout;
1793 rx_handler_func_t __rcu *rx_handler;
1794 void __rcu *rx_handler_data;
1795
1796 #ifdef CONFIG_NET_CLS_ACT
1797 struct tcf_proto __rcu *ingress_cl_list;
1798 #endif
1799 struct netdev_queue __rcu *ingress_queue;
1800 #ifdef CONFIG_NETFILTER_INGRESS
1801 struct nf_hook_entry __rcu *nf_hooks_ingress;
1802 #endif
1803
1804 unsigned char broadcast[MAX_ADDR_LEN];
1805 #ifdef CONFIG_RFS_ACCEL
1806 struct cpu_rmap *rx_cpu_rmap;
1807 #endif
1808 struct hlist_node index_hlist;
1809
1810 /*
1811 * Cache lines mostly used on transmit path
1812 */
1813 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1814 unsigned int num_tx_queues;
1815 unsigned int real_num_tx_queues;
1816 struct Qdisc *qdisc;
1817 #ifdef CONFIG_NET_SCHED
1818 DECLARE_HASHTABLE (qdisc_hash, 4);
1819 #endif
1820 unsigned long tx_queue_len;
1821 spinlock_t tx_global_lock;
1822 int watchdog_timeo;
1823
1824 #ifdef CONFIG_XPS
1825 struct xps_dev_maps __rcu *xps_maps;
1826 #endif
1827 #ifdef CONFIG_NET_CLS_ACT
1828 struct tcf_proto __rcu *egress_cl_list;
1829 #endif
1830
1831 /* These may be needed for future network-power-down code. */
1832 struct timer_list watchdog_timer;
1833
1834 int __percpu *pcpu_refcnt;
1835 struct list_head todo_list;
1836
1837 struct list_head link_watch_list;
1838
1839 enum { NETREG_UNINITIALIZED=0,
1840 NETREG_REGISTERED, /* completed register_netdevice */
1841 NETREG_UNREGISTERING, /* called unregister_netdevice */
1842 NETREG_UNREGISTERED, /* completed unregister todo */
1843 NETREG_RELEASED, /* called free_netdev */
1844 NETREG_DUMMY, /* dummy device for NAPI poll */
1845 } reg_state:8;
1846
1847 bool dismantle;
1848
1849 enum {
1850 RTNL_LINK_INITIALIZED,
1851 RTNL_LINK_INITIALIZING,
1852 } rtnl_link_state:16;
1853
1854 void (*destructor)(struct net_device *dev);
1855
1856 #ifdef CONFIG_NETPOLL
1857 struct netpoll_info __rcu *npinfo;
1858 #endif
1859
1860 possible_net_t nd_net;
1861
1862 /* mid-layer private */
1863 union {
1864 void *ml_priv;
1865 struct pcpu_lstats __percpu *lstats;
1866 struct pcpu_sw_netstats __percpu *tstats;
1867 struct pcpu_dstats __percpu *dstats;
1868 struct pcpu_vstats __percpu *vstats;
1869 };
1870
1871 struct garp_port __rcu *garp_port;
1872 struct mrp_port __rcu *mrp_port;
1873
1874 struct device dev;
1875 const struct attribute_group *sysfs_groups[4];
1876 const struct attribute_group *sysfs_rx_queue_group;
1877
1878 const struct rtnl_link_ops *rtnl_link_ops;
1879
1880 /* for setting kernel sock attribute on TCP connection setup */
1881 #define GSO_MAX_SIZE 65536
1882 unsigned int gso_max_size;
1883 #define GSO_MAX_SEGS 65535
1884 u16 gso_max_segs;
1885
1886 #ifdef CONFIG_DCB
1887 const struct dcbnl_rtnl_ops *dcbnl_ops;
1888 #endif
1889 u8 num_tc;
1890 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1891 u8 prio_tc_map[TC_BITMASK + 1];
1892
1893 #if IS_ENABLED(CONFIG_FCOE)
1894 unsigned int fcoe_ddp_xid;
1895 #endif
1896 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1897 struct netprio_map __rcu *priomap;
1898 #endif
1899 struct phy_device *phydev;
1900 struct lock_class_key *qdisc_tx_busylock;
1901 struct lock_class_key *qdisc_running_key;
1902 bool proto_down;
1903 };
1904 #define to_net_dev(d) container_of(d, struct net_device, dev)
1905
1906 #define NETDEV_ALIGN 32
1907
1908 static inline
1909 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1910 {
1911 return dev->prio_tc_map[prio & TC_BITMASK];
1912 }
1913
1914 static inline
1915 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1916 {
1917 if (tc >= dev->num_tc)
1918 return -EINVAL;
1919
1920 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1921 return 0;
1922 }
1923
1924 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
1925 void netdev_reset_tc(struct net_device *dev);
1926 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
1927 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
1928
1929 static inline
1930 int netdev_get_num_tc(struct net_device *dev)
1931 {
1932 return dev->num_tc;
1933 }
1934
1935 static inline
1936 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1937 unsigned int index)
1938 {
1939 return &dev->_tx[index];
1940 }
1941
1942 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1943 const struct sk_buff *skb)
1944 {
1945 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1946 }
1947
1948 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1949 void (*f)(struct net_device *,
1950 struct netdev_queue *,
1951 void *),
1952 void *arg)
1953 {
1954 unsigned int i;
1955
1956 for (i = 0; i < dev->num_tx_queues; i++)
1957 f(dev, &dev->_tx[i], arg);
1958 }
1959
1960 #define netdev_lockdep_set_classes(dev) \
1961 { \
1962 static struct lock_class_key qdisc_tx_busylock_key; \
1963 static struct lock_class_key qdisc_running_key; \
1964 static struct lock_class_key qdisc_xmit_lock_key; \
1965 static struct lock_class_key dev_addr_list_lock_key; \
1966 unsigned int i; \
1967 \
1968 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1969 (dev)->qdisc_running_key = &qdisc_running_key; \
1970 lockdep_set_class(&(dev)->addr_list_lock, \
1971 &dev_addr_list_lock_key); \
1972 for (i = 0; i < (dev)->num_tx_queues; i++) \
1973 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1974 &qdisc_xmit_lock_key); \
1975 }
1976
1977 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1978 struct sk_buff *skb,
1979 void *accel_priv);
1980
1981 /* returns the headroom that the master device needs to take in account
1982 * when forwarding to this dev
1983 */
1984 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
1985 {
1986 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
1987 }
1988
1989 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
1990 {
1991 if (dev->netdev_ops->ndo_set_rx_headroom)
1992 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
1993 }
1994
1995 /* set the device rx headroom to the dev's default */
1996 static inline void netdev_reset_rx_headroom(struct net_device *dev)
1997 {
1998 netdev_set_rx_headroom(dev, -1);
1999 }
2000
2001 /*
2002 * Net namespace inlines
2003 */
2004 static inline
2005 struct net *dev_net(const struct net_device *dev)
2006 {
2007 return read_pnet(&dev->nd_net);
2008 }
2009
2010 static inline
2011 void dev_net_set(struct net_device *dev, struct net *net)
2012 {
2013 write_pnet(&dev->nd_net, net);
2014 }
2015
2016 static inline bool netdev_uses_dsa(struct net_device *dev)
2017 {
2018 #if IS_ENABLED(CONFIG_NET_DSA)
2019 if (dev->dsa_ptr != NULL)
2020 return dsa_uses_tagged_protocol(dev->dsa_ptr);
2021 #endif
2022 return false;
2023 }
2024
2025 /**
2026 * netdev_priv - access network device private data
2027 * @dev: network device
2028 *
2029 * Get network device private data
2030 */
2031 static inline void *netdev_priv(const struct net_device *dev)
2032 {
2033 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2034 }
2035
2036 /* Set the sysfs physical device reference for the network logical device
2037 * if set prior to registration will cause a symlink during initialization.
2038 */
2039 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2040
2041 /* Set the sysfs device type for the network logical device to allow
2042 * fine-grained identification of different network device types. For
2043 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2044 */
2045 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2046
2047 /* Default NAPI poll() weight
2048 * Device drivers are strongly advised to not use bigger value
2049 */
2050 #define NAPI_POLL_WEIGHT 64
2051
2052 /**
2053 * netif_napi_add - initialize a NAPI context
2054 * @dev: network device
2055 * @napi: NAPI context
2056 * @poll: polling function
2057 * @weight: default weight
2058 *
2059 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2060 * *any* of the other NAPI-related functions.
2061 */
2062 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2063 int (*poll)(struct napi_struct *, int), int weight);
2064
2065 /**
2066 * netif_tx_napi_add - initialize a NAPI context
2067 * @dev: network device
2068 * @napi: NAPI context
2069 * @poll: polling function
2070 * @weight: default weight
2071 *
2072 * This variant of netif_napi_add() should be used from drivers using NAPI
2073 * to exclusively poll a TX queue.
2074 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2075 */
2076 static inline void netif_tx_napi_add(struct net_device *dev,
2077 struct napi_struct *napi,
2078 int (*poll)(struct napi_struct *, int),
2079 int weight)
2080 {
2081 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2082 netif_napi_add(dev, napi, poll, weight);
2083 }
2084
2085 /**
2086 * netif_napi_del - remove a NAPI context
2087 * @napi: NAPI context
2088 *
2089 * netif_napi_del() removes a NAPI context from the network device NAPI list
2090 */
2091 void netif_napi_del(struct napi_struct *napi);
2092
2093 struct napi_gro_cb {
2094 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2095 void *frag0;
2096
2097 /* Length of frag0. */
2098 unsigned int frag0_len;
2099
2100 /* This indicates where we are processing relative to skb->data. */
2101 int data_offset;
2102
2103 /* This is non-zero if the packet cannot be merged with the new skb. */
2104 u16 flush;
2105
2106 /* Save the IP ID here and check when we get to the transport layer */
2107 u16 flush_id;
2108
2109 /* Number of segments aggregated. */
2110 u16 count;
2111
2112 /* Start offset for remote checksum offload */
2113 u16 gro_remcsum_start;
2114
2115 /* jiffies when first packet was created/queued */
2116 unsigned long age;
2117
2118 /* Used in ipv6_gro_receive() and foo-over-udp */
2119 u16 proto;
2120
2121 /* This is non-zero if the packet may be of the same flow. */
2122 u8 same_flow:1;
2123
2124 /* Used in tunnel GRO receive */
2125 u8 encap_mark:1;
2126
2127 /* GRO checksum is valid */
2128 u8 csum_valid:1;
2129
2130 /* Number of checksums via CHECKSUM_UNNECESSARY */
2131 u8 csum_cnt:3;
2132
2133 /* Free the skb? */
2134 u8 free:2;
2135 #define NAPI_GRO_FREE 1
2136 #define NAPI_GRO_FREE_STOLEN_HEAD 2
2137
2138 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2139 u8 is_ipv6:1;
2140
2141 /* Used in GRE, set in fou/gue_gro_receive */
2142 u8 is_fou:1;
2143
2144 /* Used to determine if flush_id can be ignored */
2145 u8 is_atomic:1;
2146
2147 /* Number of gro_receive callbacks this packet already went through */
2148 u8 recursion_counter:4;
2149
2150 /* 1 bit hole */
2151
2152 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2153 __wsum csum;
2154
2155 /* used in skb_gro_receive() slow path */
2156 struct sk_buff *last;
2157 };
2158
2159 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2160
2161 #define GRO_RECURSION_LIMIT 15
2162 static inline int gro_recursion_inc_test(struct sk_buff *skb)
2163 {
2164 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2165 }
2166
2167 typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
2168 static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2169 struct sk_buff **head,
2170 struct sk_buff *skb)
2171 {
2172 if (unlikely(gro_recursion_inc_test(skb))) {
2173 NAPI_GRO_CB(skb)->flush |= 1;
2174 return NULL;
2175 }
2176
2177 return cb(head, skb);
2178 }
2179
2180 typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
2181 struct sk_buff *);
2182 static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
2183 struct sock *sk,
2184 struct sk_buff **head,
2185 struct sk_buff *skb)
2186 {
2187 if (unlikely(gro_recursion_inc_test(skb))) {
2188 NAPI_GRO_CB(skb)->flush |= 1;
2189 return NULL;
2190 }
2191
2192 return cb(sk, head, skb);
2193 }
2194
2195 struct packet_type {
2196 __be16 type; /* This is really htons(ether_type). */
2197 struct net_device *dev; /* NULL is wildcarded here */
2198 int (*func) (struct sk_buff *,
2199 struct net_device *,
2200 struct packet_type *,
2201 struct net_device *);
2202 bool (*id_match)(struct packet_type *ptype,
2203 struct sock *sk);
2204 void *af_packet_priv;
2205 struct list_head list;
2206 };
2207
2208 struct offload_callbacks {
2209 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2210 netdev_features_t features);
2211 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2212 struct sk_buff *skb);
2213 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2214 };
2215
2216 struct packet_offload {
2217 __be16 type; /* This is really htons(ether_type). */
2218 u16 priority;
2219 struct offload_callbacks callbacks;
2220 struct list_head list;
2221 };
2222
2223 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2224 struct pcpu_sw_netstats {
2225 u64 rx_packets;
2226 u64 rx_bytes;
2227 u64 tx_packets;
2228 u64 tx_bytes;
2229 struct u64_stats_sync syncp;
2230 };
2231
2232 #define __netdev_alloc_pcpu_stats(type, gfp) \
2233 ({ \
2234 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2235 if (pcpu_stats) { \
2236 int __cpu; \
2237 for_each_possible_cpu(__cpu) { \
2238 typeof(type) *stat; \
2239 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2240 u64_stats_init(&stat->syncp); \
2241 } \
2242 } \
2243 pcpu_stats; \
2244 })
2245
2246 #define netdev_alloc_pcpu_stats(type) \
2247 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2248
2249 enum netdev_lag_tx_type {
2250 NETDEV_LAG_TX_TYPE_UNKNOWN,
2251 NETDEV_LAG_TX_TYPE_RANDOM,
2252 NETDEV_LAG_TX_TYPE_BROADCAST,
2253 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2254 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2255 NETDEV_LAG_TX_TYPE_HASH,
2256 };
2257
2258 struct netdev_lag_upper_info {
2259 enum netdev_lag_tx_type tx_type;
2260 };
2261
2262 struct netdev_lag_lower_state_info {
2263 u8 link_up : 1,
2264 tx_enabled : 1;
2265 };
2266
2267 #include <linux/notifier.h>
2268
2269 /* netdevice notifier chain. Please remember to update the rtnetlink
2270 * notification exclusion list in rtnetlink_event() when adding new
2271 * types.
2272 */
2273 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
2274 #define NETDEV_DOWN 0x0002
2275 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
2276 detected a hardware crash and restarted
2277 - we can use this eg to kick tcp sessions
2278 once done */
2279 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
2280 #define NETDEV_REGISTER 0x0005
2281 #define NETDEV_UNREGISTER 0x0006
2282 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
2283 #define NETDEV_CHANGEADDR 0x0008
2284 #define NETDEV_GOING_DOWN 0x0009
2285 #define NETDEV_CHANGENAME 0x000A
2286 #define NETDEV_FEAT_CHANGE 0x000B
2287 #define NETDEV_BONDING_FAILOVER 0x000C
2288 #define NETDEV_PRE_UP 0x000D
2289 #define NETDEV_PRE_TYPE_CHANGE 0x000E
2290 #define NETDEV_POST_TYPE_CHANGE 0x000F
2291 #define NETDEV_POST_INIT 0x0010
2292 #define NETDEV_UNREGISTER_FINAL 0x0011
2293 #define NETDEV_RELEASE 0x0012
2294 #define NETDEV_NOTIFY_PEERS 0x0013
2295 #define NETDEV_JOIN 0x0014
2296 #define NETDEV_CHANGEUPPER 0x0015
2297 #define NETDEV_RESEND_IGMP 0x0016
2298 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2299 #define NETDEV_CHANGEINFODATA 0x0018
2300 #define NETDEV_BONDING_INFO 0x0019
2301 #define NETDEV_PRECHANGEUPPER 0x001A
2302 #define NETDEV_CHANGELOWERSTATE 0x001B
2303 #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2304 #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2305
2306 int register_netdevice_notifier(struct notifier_block *nb);
2307 int unregister_netdevice_notifier(struct notifier_block *nb);
2308
2309 struct netdev_notifier_info {
2310 struct net_device *dev;
2311 };
2312
2313 struct netdev_notifier_change_info {
2314 struct netdev_notifier_info info; /* must be first */
2315 unsigned int flags_changed;
2316 };
2317
2318 struct netdev_notifier_changeupper_info {
2319 struct netdev_notifier_info info; /* must be first */
2320 struct net_device *upper_dev; /* new upper dev */
2321 bool master; /* is upper dev master */
2322 bool linking; /* is the notification for link or unlink */
2323 void *upper_info; /* upper dev info */
2324 };
2325
2326 struct netdev_notifier_changelowerstate_info {
2327 struct netdev_notifier_info info; /* must be first */
2328 void *lower_state_info; /* is lower dev state */
2329 };
2330
2331 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2332 struct net_device *dev)
2333 {
2334 info->dev = dev;
2335 }
2336
2337 static inline struct net_device *
2338 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2339 {
2340 return info->dev;
2341 }
2342
2343 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2344
2345
2346 extern rwlock_t dev_base_lock; /* Device list lock */
2347
2348 #define for_each_netdev(net, d) \
2349 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2350 #define for_each_netdev_reverse(net, d) \
2351 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2352 #define for_each_netdev_rcu(net, d) \
2353 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2354 #define for_each_netdev_safe(net, d, n) \
2355 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2356 #define for_each_netdev_continue(net, d) \
2357 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2358 #define for_each_netdev_continue_rcu(net, d) \
2359 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2360 #define for_each_netdev_in_bond_rcu(bond, slave) \
2361 for_each_netdev_rcu(&init_net, slave) \
2362 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2363 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2364
2365 static inline struct net_device *next_net_device(struct net_device *dev)
2366 {
2367 struct list_head *lh;
2368 struct net *net;
2369
2370 net = dev_net(dev);
2371 lh = dev->dev_list.next;
2372 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2373 }
2374
2375 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2376 {
2377 struct list_head *lh;
2378 struct net *net;
2379
2380 net = dev_net(dev);
2381 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2382 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2383 }
2384
2385 static inline struct net_device *first_net_device(struct net *net)
2386 {
2387 return list_empty(&net->dev_base_head) ? NULL :
2388 net_device_entry(net->dev_base_head.next);
2389 }
2390
2391 static inline struct net_device *first_net_device_rcu(struct net *net)
2392 {
2393 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2394
2395 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2396 }
2397
2398 int netdev_boot_setup_check(struct net_device *dev);
2399 unsigned long netdev_boot_base(const char *prefix, int unit);
2400 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2401 const char *hwaddr);
2402 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2403 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2404 void dev_add_pack(struct packet_type *pt);
2405 void dev_remove_pack(struct packet_type *pt);
2406 void __dev_remove_pack(struct packet_type *pt);
2407 void dev_add_offload(struct packet_offload *po);
2408 void dev_remove_offload(struct packet_offload *po);
2409
2410 int dev_get_iflink(const struct net_device *dev);
2411 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2412 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2413 unsigned short mask);
2414 struct net_device *dev_get_by_name(struct net *net, const char *name);
2415 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2416 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2417 int dev_alloc_name(struct net_device *dev, const char *name);
2418 int dev_open(struct net_device *dev);
2419 int dev_close(struct net_device *dev);
2420 int dev_close_many(struct list_head *head, bool unlink);
2421 void dev_disable_lro(struct net_device *dev);
2422 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2423 int dev_queue_xmit(struct sk_buff *skb);
2424 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2425 int register_netdevice(struct net_device *dev);
2426 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2427 void unregister_netdevice_many(struct list_head *head);
2428 static inline void unregister_netdevice(struct net_device *dev)
2429 {
2430 unregister_netdevice_queue(dev, NULL);
2431 }
2432
2433 int netdev_refcnt_read(const struct net_device *dev);
2434 void free_netdev(struct net_device *dev);
2435 void netdev_freemem(struct net_device *dev);
2436 void synchronize_net(void);
2437 int init_dummy_netdev(struct net_device *dev);
2438
2439 DECLARE_PER_CPU(int, xmit_recursion);
2440 #define XMIT_RECURSION_LIMIT 10
2441
2442 static inline int dev_recursion_level(void)
2443 {
2444 return this_cpu_read(xmit_recursion);
2445 }
2446
2447 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2448 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2449 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2450 int netdev_get_name(struct net *net, char *name, int ifindex);
2451 int dev_restart(struct net_device *dev);
2452 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2453
2454 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2455 {
2456 return NAPI_GRO_CB(skb)->data_offset;
2457 }
2458
2459 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2460 {
2461 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2462 }
2463
2464 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2465 {
2466 NAPI_GRO_CB(skb)->data_offset += len;
2467 }
2468
2469 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2470 unsigned int offset)
2471 {
2472 return NAPI_GRO_CB(skb)->frag0 + offset;
2473 }
2474
2475 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2476 {
2477 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2478 }
2479
2480 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2481 unsigned int offset)
2482 {
2483 if (!pskb_may_pull(skb, hlen))
2484 return NULL;
2485
2486 NAPI_GRO_CB(skb)->frag0 = NULL;
2487 NAPI_GRO_CB(skb)->frag0_len = 0;
2488 return skb->data + offset;
2489 }
2490
2491 static inline void *skb_gro_network_header(struct sk_buff *skb)
2492 {
2493 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2494 skb_network_offset(skb);
2495 }
2496
2497 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2498 const void *start, unsigned int len)
2499 {
2500 if (NAPI_GRO_CB(skb)->csum_valid)
2501 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2502 csum_partial(start, len, 0));
2503 }
2504
2505 /* GRO checksum functions. These are logical equivalents of the normal
2506 * checksum functions (in skbuff.h) except that they operate on the GRO
2507 * offsets and fields in sk_buff.
2508 */
2509
2510 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2511
2512 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2513 {
2514 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2515 }
2516
2517 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2518 bool zero_okay,
2519 __sum16 check)
2520 {
2521 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2522 skb_checksum_start_offset(skb) <
2523 skb_gro_offset(skb)) &&
2524 !skb_at_gro_remcsum_start(skb) &&
2525 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2526 (!zero_okay || check));
2527 }
2528
2529 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2530 __wsum psum)
2531 {
2532 if (NAPI_GRO_CB(skb)->csum_valid &&
2533 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2534 return 0;
2535
2536 NAPI_GRO_CB(skb)->csum = psum;
2537
2538 return __skb_gro_checksum_complete(skb);
2539 }
2540
2541 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2542 {
2543 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2544 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2545 NAPI_GRO_CB(skb)->csum_cnt--;
2546 } else {
2547 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2548 * verified a new top level checksum or an encapsulated one
2549 * during GRO. This saves work if we fallback to normal path.
2550 */
2551 __skb_incr_checksum_unnecessary(skb);
2552 }
2553 }
2554
2555 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2556 compute_pseudo) \
2557 ({ \
2558 __sum16 __ret = 0; \
2559 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2560 __ret = __skb_gro_checksum_validate_complete(skb, \
2561 compute_pseudo(skb, proto)); \
2562 if (__ret) \
2563 __skb_mark_checksum_bad(skb); \
2564 else \
2565 skb_gro_incr_csum_unnecessary(skb); \
2566 __ret; \
2567 })
2568
2569 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2570 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2571
2572 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2573 compute_pseudo) \
2574 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2575
2576 #define skb_gro_checksum_simple_validate(skb) \
2577 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2578
2579 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2580 {
2581 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2582 !NAPI_GRO_CB(skb)->csum_valid);
2583 }
2584
2585 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2586 __sum16 check, __wsum pseudo)
2587 {
2588 NAPI_GRO_CB(skb)->csum = ~pseudo;
2589 NAPI_GRO_CB(skb)->csum_valid = 1;
2590 }
2591
2592 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2593 do { \
2594 if (__skb_gro_checksum_convert_check(skb)) \
2595 __skb_gro_checksum_convert(skb, check, \
2596 compute_pseudo(skb, proto)); \
2597 } while (0)
2598
2599 struct gro_remcsum {
2600 int offset;
2601 __wsum delta;
2602 };
2603
2604 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2605 {
2606 grc->offset = 0;
2607 grc->delta = 0;
2608 }
2609
2610 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2611 unsigned int off, size_t hdrlen,
2612 int start, int offset,
2613 struct gro_remcsum *grc,
2614 bool nopartial)
2615 {
2616 __wsum delta;
2617 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2618
2619 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2620
2621 if (!nopartial) {
2622 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2623 return ptr;
2624 }
2625
2626 ptr = skb_gro_header_fast(skb, off);
2627 if (skb_gro_header_hard(skb, off + plen)) {
2628 ptr = skb_gro_header_slow(skb, off + plen, off);
2629 if (!ptr)
2630 return NULL;
2631 }
2632
2633 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2634 start, offset);
2635
2636 /* Adjust skb->csum since we changed the packet */
2637 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2638
2639 grc->offset = off + hdrlen + offset;
2640 grc->delta = delta;
2641
2642 return ptr;
2643 }
2644
2645 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2646 struct gro_remcsum *grc)
2647 {
2648 void *ptr;
2649 size_t plen = grc->offset + sizeof(u16);
2650
2651 if (!grc->delta)
2652 return;
2653
2654 ptr = skb_gro_header_fast(skb, grc->offset);
2655 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2656 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2657 if (!ptr)
2658 return;
2659 }
2660
2661 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2662 }
2663
2664 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2665 unsigned short type,
2666 const void *daddr, const void *saddr,
2667 unsigned int len)
2668 {
2669 if (!dev->header_ops || !dev->header_ops->create)
2670 return 0;
2671
2672 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2673 }
2674
2675 static inline int dev_parse_header(const struct sk_buff *skb,
2676 unsigned char *haddr)
2677 {
2678 const struct net_device *dev = skb->dev;
2679
2680 if (!dev->header_ops || !dev->header_ops->parse)
2681 return 0;
2682 return dev->header_ops->parse(skb, haddr);
2683 }
2684
2685 /* ll_header must have at least hard_header_len allocated */
2686 static inline bool dev_validate_header(const struct net_device *dev,
2687 char *ll_header, int len)
2688 {
2689 if (likely(len >= dev->hard_header_len))
2690 return true;
2691
2692 if (capable(CAP_SYS_RAWIO)) {
2693 memset(ll_header + len, 0, dev->hard_header_len - len);
2694 return true;
2695 }
2696
2697 if (dev->header_ops && dev->header_ops->validate)
2698 return dev->header_ops->validate(ll_header, len);
2699
2700 return false;
2701 }
2702
2703 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2704 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2705 static inline int unregister_gifconf(unsigned int family)
2706 {
2707 return register_gifconf(family, NULL);
2708 }
2709
2710 #ifdef CONFIG_NET_FLOW_LIMIT
2711 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2712 struct sd_flow_limit {
2713 u64 count;
2714 unsigned int num_buckets;
2715 unsigned int history_head;
2716 u16 history[FLOW_LIMIT_HISTORY];
2717 u8 buckets[];
2718 };
2719
2720 extern int netdev_flow_limit_table_len;
2721 #endif /* CONFIG_NET_FLOW_LIMIT */
2722
2723 /*
2724 * Incoming packets are placed on per-CPU queues
2725 */
2726 struct softnet_data {
2727 struct list_head poll_list;
2728 struct sk_buff_head process_queue;
2729
2730 /* stats */
2731 unsigned int processed;
2732 unsigned int time_squeeze;
2733 unsigned int received_rps;
2734 #ifdef CONFIG_RPS
2735 struct softnet_data *rps_ipi_list;
2736 #endif
2737 #ifdef CONFIG_NET_FLOW_LIMIT
2738 struct sd_flow_limit __rcu *flow_limit;
2739 #endif
2740 struct Qdisc *output_queue;
2741 struct Qdisc **output_queue_tailp;
2742 struct sk_buff *completion_queue;
2743
2744 #ifdef CONFIG_RPS
2745 /* input_queue_head should be written by cpu owning this struct,
2746 * and only read by other cpus. Worth using a cache line.
2747 */
2748 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2749
2750 /* Elements below can be accessed between CPUs for RPS/RFS */
2751 struct call_single_data csd ____cacheline_aligned_in_smp;
2752 struct softnet_data *rps_ipi_next;
2753 unsigned int cpu;
2754 unsigned int input_queue_tail;
2755 #endif
2756 unsigned int dropped;
2757 struct sk_buff_head input_pkt_queue;
2758 struct napi_struct backlog;
2759
2760 };
2761
2762 static inline void input_queue_head_incr(struct softnet_data *sd)
2763 {
2764 #ifdef CONFIG_RPS
2765 sd->input_queue_head++;
2766 #endif
2767 }
2768
2769 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2770 unsigned int *qtail)
2771 {
2772 #ifdef CONFIG_RPS
2773 *qtail = ++sd->input_queue_tail;
2774 #endif
2775 }
2776
2777 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2778
2779 void __netif_schedule(struct Qdisc *q);
2780 void netif_schedule_queue(struct netdev_queue *txq);
2781
2782 static inline void netif_tx_schedule_all(struct net_device *dev)
2783 {
2784 unsigned int i;
2785
2786 for (i = 0; i < dev->num_tx_queues; i++)
2787 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2788 }
2789
2790 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2791 {
2792 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2793 }
2794
2795 /**
2796 * netif_start_queue - allow transmit
2797 * @dev: network device
2798 *
2799 * Allow upper layers to call the device hard_start_xmit routine.
2800 */
2801 static inline void netif_start_queue(struct net_device *dev)
2802 {
2803 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2804 }
2805
2806 static inline void netif_tx_start_all_queues(struct net_device *dev)
2807 {
2808 unsigned int i;
2809
2810 for (i = 0; i < dev->num_tx_queues; i++) {
2811 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2812 netif_tx_start_queue(txq);
2813 }
2814 }
2815
2816 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2817
2818 /**
2819 * netif_wake_queue - restart transmit
2820 * @dev: network device
2821 *
2822 * Allow upper layers to call the device hard_start_xmit routine.
2823 * Used for flow control when transmit resources are available.
2824 */
2825 static inline void netif_wake_queue(struct net_device *dev)
2826 {
2827 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2828 }
2829
2830 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2831 {
2832 unsigned int i;
2833
2834 for (i = 0; i < dev->num_tx_queues; i++) {
2835 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2836 netif_tx_wake_queue(txq);
2837 }
2838 }
2839
2840 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2841 {
2842 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2843 }
2844
2845 /**
2846 * netif_stop_queue - stop transmitted packets
2847 * @dev: network device
2848 *
2849 * Stop upper layers calling the device hard_start_xmit routine.
2850 * Used for flow control when transmit resources are unavailable.
2851 */
2852 static inline void netif_stop_queue(struct net_device *dev)
2853 {
2854 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2855 }
2856
2857 void netif_tx_stop_all_queues(struct net_device *dev);
2858
2859 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2860 {
2861 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2862 }
2863
2864 /**
2865 * netif_queue_stopped - test if transmit queue is flowblocked
2866 * @dev: network device
2867 *
2868 * Test if transmit queue on device is currently unable to send.
2869 */
2870 static inline bool netif_queue_stopped(const struct net_device *dev)
2871 {
2872 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2873 }
2874
2875 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2876 {
2877 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2878 }
2879
2880 static inline bool
2881 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2882 {
2883 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2884 }
2885
2886 static inline bool
2887 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2888 {
2889 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2890 }
2891
2892 /**
2893 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2894 * @dev_queue: pointer to transmit queue
2895 *
2896 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2897 * to give appropriate hint to the CPU.
2898 */
2899 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2900 {
2901 #ifdef CONFIG_BQL
2902 prefetchw(&dev_queue->dql.num_queued);
2903 #endif
2904 }
2905
2906 /**
2907 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2908 * @dev_queue: pointer to transmit queue
2909 *
2910 * BQL enabled drivers might use this helper in their TX completion path,
2911 * to give appropriate hint to the CPU.
2912 */
2913 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2914 {
2915 #ifdef CONFIG_BQL
2916 prefetchw(&dev_queue->dql.limit);
2917 #endif
2918 }
2919
2920 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2921 unsigned int bytes)
2922 {
2923 #ifdef CONFIG_BQL
2924 dql_queued(&dev_queue->dql, bytes);
2925
2926 if (likely(dql_avail(&dev_queue->dql) >= 0))
2927 return;
2928
2929 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2930
2931 /*
2932 * The XOFF flag must be set before checking the dql_avail below,
2933 * because in netdev_tx_completed_queue we update the dql_completed
2934 * before checking the XOFF flag.
2935 */
2936 smp_mb();
2937
2938 /* check again in case another CPU has just made room avail */
2939 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2940 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2941 #endif
2942 }
2943
2944 /**
2945 * netdev_sent_queue - report the number of bytes queued to hardware
2946 * @dev: network device
2947 * @bytes: number of bytes queued to the hardware device queue
2948 *
2949 * Report the number of bytes queued for sending/completion to the network
2950 * device hardware queue. @bytes should be a good approximation and should
2951 * exactly match netdev_completed_queue() @bytes
2952 */
2953 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2954 {
2955 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2956 }
2957
2958 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2959 unsigned int pkts, unsigned int bytes)
2960 {
2961 #ifdef CONFIG_BQL
2962 if (unlikely(!bytes))
2963 return;
2964
2965 dql_completed(&dev_queue->dql, bytes);
2966
2967 /*
2968 * Without the memory barrier there is a small possiblity that
2969 * netdev_tx_sent_queue will miss the update and cause the queue to
2970 * be stopped forever
2971 */
2972 smp_mb();
2973
2974 if (dql_avail(&dev_queue->dql) < 0)
2975 return;
2976
2977 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2978 netif_schedule_queue(dev_queue);
2979 #endif
2980 }
2981
2982 /**
2983 * netdev_completed_queue - report bytes and packets completed by device
2984 * @dev: network device
2985 * @pkts: actual number of packets sent over the medium
2986 * @bytes: actual number of bytes sent over the medium
2987 *
2988 * Report the number of bytes and packets transmitted by the network device
2989 * hardware queue over the physical medium, @bytes must exactly match the
2990 * @bytes amount passed to netdev_sent_queue()
2991 */
2992 static inline void netdev_completed_queue(struct net_device *dev,
2993 unsigned int pkts, unsigned int bytes)
2994 {
2995 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2996 }
2997
2998 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2999 {
3000 #ifdef CONFIG_BQL
3001 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3002 dql_reset(&q->dql);
3003 #endif
3004 }
3005
3006 /**
3007 * netdev_reset_queue - reset the packets and bytes count of a network device
3008 * @dev_queue: network device
3009 *
3010 * Reset the bytes and packet count of a network device and clear the
3011 * software flow control OFF bit for this network device
3012 */
3013 static inline void netdev_reset_queue(struct net_device *dev_queue)
3014 {
3015 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3016 }
3017
3018 /**
3019 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3020 * @dev: network device
3021 * @queue_index: given tx queue index
3022 *
3023 * Returns 0 if given tx queue index >= number of device tx queues,
3024 * otherwise returns the originally passed tx queue index.
3025 */
3026 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3027 {
3028 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3029 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3030 dev->name, queue_index,
3031 dev->real_num_tx_queues);
3032 return 0;
3033 }
3034
3035 return queue_index;
3036 }
3037
3038 /**
3039 * netif_running - test if up
3040 * @dev: network device
3041 *
3042 * Test if the device has been brought up.
3043 */
3044 static inline bool netif_running(const struct net_device *dev)
3045 {
3046 return test_bit(__LINK_STATE_START, &dev->state);
3047 }
3048
3049 /*
3050 * Routines to manage the subqueues on a device. We only need start,
3051 * stop, and a check if it's stopped. All other device management is
3052 * done at the overall netdevice level.
3053 * Also test the device if we're multiqueue.
3054 */
3055
3056 /**
3057 * netif_start_subqueue - allow sending packets on subqueue
3058 * @dev: network device
3059 * @queue_index: sub queue index
3060 *
3061 * Start individual transmit queue of a device with multiple transmit queues.
3062 */
3063 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3064 {
3065 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3066
3067 netif_tx_start_queue(txq);
3068 }
3069
3070 /**
3071 * netif_stop_subqueue - stop sending packets on subqueue
3072 * @dev: network device
3073 * @queue_index: sub queue index
3074 *
3075 * Stop individual transmit queue of a device with multiple transmit queues.
3076 */
3077 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3078 {
3079 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3080 netif_tx_stop_queue(txq);
3081 }
3082
3083 /**
3084 * netif_subqueue_stopped - test status of subqueue
3085 * @dev: network device
3086 * @queue_index: sub queue index
3087 *
3088 * Check individual transmit queue of a device with multiple transmit queues.
3089 */
3090 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3091 u16 queue_index)
3092 {
3093 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3094
3095 return netif_tx_queue_stopped(txq);
3096 }
3097
3098 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3099 struct sk_buff *skb)
3100 {
3101 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3102 }
3103
3104 void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
3105
3106 #ifdef CONFIG_XPS
3107 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3108 u16 index);
3109 #else
3110 static inline int netif_set_xps_queue(struct net_device *dev,
3111 const struct cpumask *mask,
3112 u16 index)
3113 {
3114 return 0;
3115 }
3116 #endif
3117
3118 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3119 unsigned int num_tx_queues);
3120
3121 /*
3122 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
3123 * as a distribution range limit for the returned value.
3124 */
3125 static inline u16 skb_tx_hash(const struct net_device *dev,
3126 struct sk_buff *skb)
3127 {
3128 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3129 }
3130
3131 /**
3132 * netif_is_multiqueue - test if device has multiple transmit queues
3133 * @dev: network device
3134 *
3135 * Check if device has multiple transmit queues
3136 */
3137 static inline bool netif_is_multiqueue(const struct net_device *dev)
3138 {
3139 return dev->num_tx_queues > 1;
3140 }
3141
3142 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3143
3144 #ifdef CONFIG_SYSFS
3145 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3146 #else
3147 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3148 unsigned int rxq)
3149 {
3150 return 0;
3151 }
3152 #endif
3153
3154 #ifdef CONFIG_SYSFS
3155 static inline unsigned int get_netdev_rx_queue_index(
3156 struct netdev_rx_queue *queue)
3157 {
3158 struct net_device *dev = queue->dev;
3159 int index = queue - dev->_rx;
3160
3161 BUG_ON(index >= dev->num_rx_queues);
3162 return index;
3163 }
3164 #endif
3165
3166 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3167 int netif_get_num_default_rss_queues(void);
3168
3169 enum skb_free_reason {
3170 SKB_REASON_CONSUMED,
3171 SKB_REASON_DROPPED,
3172 };
3173
3174 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3175 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3176
3177 /*
3178 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3179 * interrupt context or with hardware interrupts being disabled.
3180 * (in_irq() || irqs_disabled())
3181 *
3182 * We provide four helpers that can be used in following contexts :
3183 *
3184 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3185 * replacing kfree_skb(skb)
3186 *
3187 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3188 * Typically used in place of consume_skb(skb) in TX completion path
3189 *
3190 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3191 * replacing kfree_skb(skb)
3192 *
3193 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3194 * and consumed a packet. Used in place of consume_skb(skb)
3195 */
3196 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3197 {
3198 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3199 }
3200
3201 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3202 {
3203 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3204 }
3205
3206 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3207 {
3208 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3209 }
3210
3211 static inline void dev_consume_skb_any(struct sk_buff *skb)
3212 {
3213 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3214 }
3215
3216 int netif_rx(struct sk_buff *skb);
3217 int netif_rx_ni(struct sk_buff *skb);
3218 int netif_receive_skb(struct sk_buff *skb);
3219 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3220 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3221 struct sk_buff *napi_get_frags(struct napi_struct *napi);
3222 gro_result_t napi_gro_frags(struct napi_struct *napi);
3223 struct packet_offload *gro_find_receive_by_type(__be16 type);
3224 struct packet_offload *gro_find_complete_by_type(__be16 type);
3225
3226 static inline void napi_free_frags(struct napi_struct *napi)
3227 {
3228 kfree_skb(napi->skb);
3229 napi->skb = NULL;
3230 }
3231
3232 bool netdev_is_rx_handler_busy(struct net_device *dev);
3233 int netdev_rx_handler_register(struct net_device *dev,
3234 rx_handler_func_t *rx_handler,
3235 void *rx_handler_data);
3236 void netdev_rx_handler_unregister(struct net_device *dev);
3237
3238 bool dev_valid_name(const char *name);
3239 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
3240 int dev_ethtool(struct net *net, struct ifreq *);
3241 unsigned int dev_get_flags(const struct net_device *);
3242 int __dev_change_flags(struct net_device *, unsigned int flags);
3243 int dev_change_flags(struct net_device *, unsigned int);
3244 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3245 unsigned int gchanges);
3246 int dev_change_name(struct net_device *, const char *);
3247 int dev_set_alias(struct net_device *, const char *, size_t);
3248 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3249 int dev_set_mtu(struct net_device *, int);
3250 void dev_set_group(struct net_device *, int);
3251 int dev_set_mac_address(struct net_device *, struct sockaddr *);
3252 int dev_change_carrier(struct net_device *, bool new_carrier);
3253 int dev_get_phys_port_id(struct net_device *dev,
3254 struct netdev_phys_item_id *ppid);
3255 int dev_get_phys_port_name(struct net_device *dev,
3256 char *name, size_t len);
3257 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3258 int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
3259 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3260 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3261 struct netdev_queue *txq, int *ret);
3262 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3263 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3264 bool is_skb_forwardable(const struct net_device *dev,
3265 const struct sk_buff *skb);
3266
3267 static __always_inline int ____dev_forward_skb(struct net_device *dev,
3268 struct sk_buff *skb)
3269 {
3270 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3271 unlikely(!is_skb_forwardable(dev, skb))) {
3272 atomic_long_inc(&dev->rx_dropped);
3273 kfree_skb(skb);
3274 return NET_RX_DROP;
3275 }
3276
3277 skb_scrub_packet(skb, true);
3278 skb->priority = 0;
3279 return 0;
3280 }
3281
3282 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3283
3284 extern int netdev_budget;
3285
3286 /* Called by rtnetlink.c:rtnl_unlock() */
3287 void netdev_run_todo(void);
3288
3289 /**
3290 * dev_put - release reference to device
3291 * @dev: network device
3292 *
3293 * Release reference to device to allow it to be freed.
3294 */
3295 static inline void dev_put(struct net_device *dev)
3296 {
3297 this_cpu_dec(*dev->pcpu_refcnt);
3298 }
3299
3300 /**
3301 * dev_hold - get reference to device
3302 * @dev: network device
3303 *
3304 * Hold reference to device to keep it from being freed.
3305 */
3306 static inline void dev_hold(struct net_device *dev)
3307 {
3308 this_cpu_inc(*dev->pcpu_refcnt);
3309 }
3310
3311 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
3312 * and _off may be called from IRQ context, but it is caller
3313 * who is responsible for serialization of these calls.
3314 *
3315 * The name carrier is inappropriate, these functions should really be
3316 * called netif_lowerlayer_*() because they represent the state of any
3317 * kind of lower layer not just hardware media.
3318 */
3319
3320 void linkwatch_init_dev(struct net_device *dev);
3321 void linkwatch_fire_event(struct net_device *dev);
3322 void linkwatch_forget_dev(struct net_device *dev);
3323
3324 /**
3325 * netif_carrier_ok - test if carrier present
3326 * @dev: network device
3327 *
3328 * Check if carrier is present on device
3329 */
3330 static inline bool netif_carrier_ok(const struct net_device *dev)
3331 {
3332 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3333 }
3334
3335 unsigned long dev_trans_start(struct net_device *dev);
3336
3337 void __netdev_watchdog_up(struct net_device *dev);
3338
3339 void netif_carrier_on(struct net_device *dev);
3340
3341 void netif_carrier_off(struct net_device *dev);
3342
3343 /**
3344 * netif_dormant_on - mark device as dormant.
3345 * @dev: network device
3346 *
3347 * Mark device as dormant (as per RFC2863).
3348 *
3349 * The dormant state indicates that the relevant interface is not
3350 * actually in a condition to pass packets (i.e., it is not 'up') but is
3351 * in a "pending" state, waiting for some external event. For "on-
3352 * demand" interfaces, this new state identifies the situation where the
3353 * interface is waiting for events to place it in the up state.
3354 */
3355 static inline void netif_dormant_on(struct net_device *dev)
3356 {
3357 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3358 linkwatch_fire_event(dev);
3359 }
3360
3361 /**
3362 * netif_dormant_off - set device as not dormant.
3363 * @dev: network device
3364 *
3365 * Device is not in dormant state.
3366 */
3367 static inline void netif_dormant_off(struct net_device *dev)
3368 {
3369 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3370 linkwatch_fire_event(dev);
3371 }
3372
3373 /**
3374 * netif_dormant - test if carrier present
3375 * @dev: network device
3376 *
3377 * Check if carrier is present on device
3378 */
3379 static inline bool netif_dormant(const struct net_device *dev)
3380 {
3381 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3382 }
3383
3384
3385 /**
3386 * netif_oper_up - test if device is operational
3387 * @dev: network device
3388 *
3389 * Check if carrier is operational
3390 */
3391 static inline bool netif_oper_up(const struct net_device *dev)
3392 {
3393 return (dev->operstate == IF_OPER_UP ||
3394 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3395 }
3396
3397 /**
3398 * netif_device_present - is device available or removed
3399 * @dev: network device
3400 *
3401 * Check if device has not been removed from system.
3402 */
3403 static inline bool netif_device_present(struct net_device *dev)
3404 {
3405 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3406 }
3407
3408 void netif_device_detach(struct net_device *dev);
3409
3410 void netif_device_attach(struct net_device *dev);
3411
3412 /*
3413 * Network interface message level settings
3414 */
3415
3416 enum {
3417 NETIF_MSG_DRV = 0x0001,
3418 NETIF_MSG_PROBE = 0x0002,
3419 NETIF_MSG_LINK = 0x0004,
3420 NETIF_MSG_TIMER = 0x0008,
3421 NETIF_MSG_IFDOWN = 0x0010,
3422 NETIF_MSG_IFUP = 0x0020,
3423 NETIF_MSG_RX_ERR = 0x0040,
3424 NETIF_MSG_TX_ERR = 0x0080,
3425 NETIF_MSG_TX_QUEUED = 0x0100,
3426 NETIF_MSG_INTR = 0x0200,
3427 NETIF_MSG_TX_DONE = 0x0400,
3428 NETIF_MSG_RX_STATUS = 0x0800,
3429 NETIF_MSG_PKTDATA = 0x1000,
3430 NETIF_MSG_HW = 0x2000,
3431 NETIF_MSG_WOL = 0x4000,
3432 };
3433
3434 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3435 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3436 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3437 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3438 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3439 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3440 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3441 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3442 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3443 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3444 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3445 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3446 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3447 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3448 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3449
3450 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3451 {
3452 /* use default */
3453 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3454 return default_msg_enable_bits;
3455 if (debug_value == 0) /* no output */
3456 return 0;
3457 /* set low N bits */
3458 return (1 << debug_value) - 1;
3459 }
3460
3461 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3462 {
3463 spin_lock(&txq->_xmit_lock);
3464 txq->xmit_lock_owner = cpu;
3465 }
3466
3467 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3468 {
3469 __acquire(&txq->_xmit_lock);
3470 return true;
3471 }
3472
3473 static inline void __netif_tx_release(struct netdev_queue *txq)
3474 {
3475 __release(&txq->_xmit_lock);
3476 }
3477
3478 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3479 {
3480 spin_lock_bh(&txq->_xmit_lock);
3481 txq->xmit_lock_owner = smp_processor_id();
3482 }
3483
3484 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3485 {
3486 bool ok = spin_trylock(&txq->_xmit_lock);
3487 if (likely(ok))
3488 txq->xmit_lock_owner = smp_processor_id();
3489 return ok;
3490 }
3491
3492 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3493 {
3494 txq->xmit_lock_owner = -1;
3495 spin_unlock(&txq->_xmit_lock);
3496 }
3497
3498 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3499 {
3500 txq->xmit_lock_owner = -1;
3501 spin_unlock_bh(&txq->_xmit_lock);
3502 }
3503
3504 static inline void txq_trans_update(struct netdev_queue *txq)
3505 {
3506 if (txq->xmit_lock_owner != -1)
3507 txq->trans_start = jiffies;
3508 }
3509
3510 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
3511 static inline void netif_trans_update(struct net_device *dev)
3512 {
3513 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3514
3515 if (txq->trans_start != jiffies)
3516 txq->trans_start = jiffies;
3517 }
3518
3519 /**
3520 * netif_tx_lock - grab network device transmit lock
3521 * @dev: network device
3522 *
3523 * Get network device transmit lock
3524 */
3525 static inline void netif_tx_lock(struct net_device *dev)
3526 {
3527 unsigned int i;
3528 int cpu;
3529
3530 spin_lock(&dev->tx_global_lock);
3531 cpu = smp_processor_id();
3532 for (i = 0; i < dev->num_tx_queues; i++) {
3533 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3534
3535 /* We are the only thread of execution doing a
3536 * freeze, but we have to grab the _xmit_lock in
3537 * order to synchronize with threads which are in
3538 * the ->hard_start_xmit() handler and already
3539 * checked the frozen bit.
3540 */
3541 __netif_tx_lock(txq, cpu);
3542 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3543 __netif_tx_unlock(txq);
3544 }
3545 }
3546
3547 static inline void netif_tx_lock_bh(struct net_device *dev)
3548 {
3549 local_bh_disable();
3550 netif_tx_lock(dev);
3551 }
3552
3553 static inline void netif_tx_unlock(struct net_device *dev)
3554 {
3555 unsigned int i;
3556
3557 for (i = 0; i < dev->num_tx_queues; i++) {
3558 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3559
3560 /* No need to grab the _xmit_lock here. If the
3561 * queue is not stopped for another reason, we
3562 * force a schedule.
3563 */
3564 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3565 netif_schedule_queue(txq);
3566 }
3567 spin_unlock(&dev->tx_global_lock);
3568 }
3569
3570 static inline void netif_tx_unlock_bh(struct net_device *dev)
3571 {
3572 netif_tx_unlock(dev);
3573 local_bh_enable();
3574 }
3575
3576 #define HARD_TX_LOCK(dev, txq, cpu) { \
3577 if ((dev->features & NETIF_F_LLTX) == 0) { \
3578 __netif_tx_lock(txq, cpu); \
3579 } else { \
3580 __netif_tx_acquire(txq); \
3581 } \
3582 }
3583
3584 #define HARD_TX_TRYLOCK(dev, txq) \
3585 (((dev->features & NETIF_F_LLTX) == 0) ? \
3586 __netif_tx_trylock(txq) : \
3587 __netif_tx_acquire(txq))
3588
3589 #define HARD_TX_UNLOCK(dev, txq) { \
3590 if ((dev->features & NETIF_F_LLTX) == 0) { \
3591 __netif_tx_unlock(txq); \
3592 } else { \
3593 __netif_tx_release(txq); \
3594 } \
3595 }
3596
3597 static inline void netif_tx_disable(struct net_device *dev)
3598 {
3599 unsigned int i;
3600 int cpu;
3601
3602 local_bh_disable();
3603 cpu = smp_processor_id();
3604 for (i = 0; i < dev->num_tx_queues; i++) {
3605 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3606
3607 __netif_tx_lock(txq, cpu);
3608 netif_tx_stop_queue(txq);
3609 __netif_tx_unlock(txq);
3610 }
3611 local_bh_enable();
3612 }
3613
3614 static inline void netif_addr_lock(struct net_device *dev)
3615 {
3616 spin_lock(&dev->addr_list_lock);
3617 }
3618
3619 static inline void netif_addr_lock_nested(struct net_device *dev)
3620 {
3621 int subclass = SINGLE_DEPTH_NESTING;
3622
3623 if (dev->netdev_ops->ndo_get_lock_subclass)
3624 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3625
3626 spin_lock_nested(&dev->addr_list_lock, subclass);
3627 }
3628
3629 static inline void netif_addr_lock_bh(struct net_device *dev)
3630 {
3631 spin_lock_bh(&dev->addr_list_lock);
3632 }
3633
3634 static inline void netif_addr_unlock(struct net_device *dev)
3635 {
3636 spin_unlock(&dev->addr_list_lock);
3637 }
3638
3639 static inline void netif_addr_unlock_bh(struct net_device *dev)
3640 {
3641 spin_unlock_bh(&dev->addr_list_lock);
3642 }
3643
3644 /*
3645 * dev_addrs walker. Should be used only for read access. Call with
3646 * rcu_read_lock held.
3647 */
3648 #define for_each_dev_addr(dev, ha) \
3649 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3650
3651 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
3652
3653 void ether_setup(struct net_device *dev);
3654
3655 /* Support for loadable net-drivers */
3656 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3657 unsigned char name_assign_type,
3658 void (*setup)(struct net_device *),
3659 unsigned int txqs, unsigned int rxqs);
3660 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3661 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3662
3663 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3664 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3665 count)
3666
3667 int register_netdev(struct net_device *dev);
3668 void unregister_netdev(struct net_device *dev);
3669
3670 /* General hardware address lists handling functions */
3671 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3672 struct netdev_hw_addr_list *from_list, int addr_len);
3673 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3674 struct netdev_hw_addr_list *from_list, int addr_len);
3675 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3676 struct net_device *dev,
3677 int (*sync)(struct net_device *, const unsigned char *),
3678 int (*unsync)(struct net_device *,
3679 const unsigned char *));
3680 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3681 struct net_device *dev,
3682 int (*unsync)(struct net_device *,
3683 const unsigned char *));
3684 void __hw_addr_init(struct netdev_hw_addr_list *list);
3685
3686 /* Functions used for device addresses handling */
3687 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3688 unsigned char addr_type);
3689 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3690 unsigned char addr_type);
3691 void dev_addr_flush(struct net_device *dev);
3692 int dev_addr_init(struct net_device *dev);
3693
3694 /* Functions used for unicast addresses handling */
3695 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3696 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3697 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3698 int dev_uc_sync(struct net_device *to, struct net_device *from);
3699 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3700 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3701 void dev_uc_flush(struct net_device *dev);
3702 void dev_uc_init(struct net_device *dev);
3703
3704 /**
3705 * __dev_uc_sync - Synchonize device's unicast list
3706 * @dev: device to sync
3707 * @sync: function to call if address should be added
3708 * @unsync: function to call if address should be removed
3709 *
3710 * Add newly added addresses to the interface, and release
3711 * addresses that have been deleted.
3712 */
3713 static inline int __dev_uc_sync(struct net_device *dev,
3714 int (*sync)(struct net_device *,
3715 const unsigned char *),
3716 int (*unsync)(struct net_device *,
3717 const unsigned char *))
3718 {
3719 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3720 }
3721
3722 /**
3723 * __dev_uc_unsync - Remove synchronized addresses from device
3724 * @dev: device to sync
3725 * @unsync: function to call if address should be removed
3726 *
3727 * Remove all addresses that were added to the device by dev_uc_sync().
3728 */
3729 static inline void __dev_uc_unsync(struct net_device *dev,
3730 int (*unsync)(struct net_device *,
3731 const unsigned char *))
3732 {
3733 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3734 }
3735
3736 /* Functions used for multicast addresses handling */
3737 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3738 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3739 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3740 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3741 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3742 int dev_mc_sync(struct net_device *to, struct net_device *from);
3743 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3744 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3745 void dev_mc_flush(struct net_device *dev);
3746 void dev_mc_init(struct net_device *dev);
3747
3748 /**
3749 * __dev_mc_sync - Synchonize device's multicast list
3750 * @dev: device to sync
3751 * @sync: function to call if address should be added
3752 * @unsync: function to call if address should be removed
3753 *
3754 * Add newly added addresses to the interface, and release
3755 * addresses that have been deleted.
3756 */
3757 static inline int __dev_mc_sync(struct net_device *dev,
3758 int (*sync)(struct net_device *,
3759 const unsigned char *),
3760 int (*unsync)(struct net_device *,
3761 const unsigned char *))
3762 {
3763 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3764 }
3765
3766 /**
3767 * __dev_mc_unsync - Remove synchronized addresses from device
3768 * @dev: device to sync
3769 * @unsync: function to call if address should be removed
3770 *
3771 * Remove all addresses that were added to the device by dev_mc_sync().
3772 */
3773 static inline void __dev_mc_unsync(struct net_device *dev,
3774 int (*unsync)(struct net_device *,
3775 const unsigned char *))
3776 {
3777 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3778 }
3779
3780 /* Functions used for secondary unicast and multicast support */
3781 void dev_set_rx_mode(struct net_device *dev);
3782 void __dev_set_rx_mode(struct net_device *dev);
3783 int dev_set_promiscuity(struct net_device *dev, int inc);
3784 int dev_set_allmulti(struct net_device *dev, int inc);
3785 void netdev_state_change(struct net_device *dev);
3786 void netdev_notify_peers(struct net_device *dev);
3787 void netdev_features_change(struct net_device *dev);
3788 /* Load a device via the kmod */
3789 void dev_load(struct net *net, const char *name);
3790 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3791 struct rtnl_link_stats64 *storage);
3792 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3793 const struct net_device_stats *netdev_stats);
3794
3795 extern int netdev_max_backlog;
3796 extern int netdev_tstamp_prequeue;
3797 extern int weight_p;
3798
3799 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3800 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3801 struct list_head **iter);
3802 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3803 struct list_head **iter);
3804
3805 /* iterate through upper list, must be called under RCU read lock */
3806 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3807 for (iter = &(dev)->adj_list.upper, \
3808 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3809 updev; \
3810 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3811
3812 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
3813 int (*fn)(struct net_device *upper_dev,
3814 void *data),
3815 void *data);
3816
3817 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
3818 struct net_device *upper_dev);
3819
3820 void *netdev_lower_get_next_private(struct net_device *dev,
3821 struct list_head **iter);
3822 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3823 struct list_head **iter);
3824
3825 #define netdev_for_each_lower_private(dev, priv, iter) \
3826 for (iter = (dev)->adj_list.lower.next, \
3827 priv = netdev_lower_get_next_private(dev, &(iter)); \
3828 priv; \
3829 priv = netdev_lower_get_next_private(dev, &(iter)))
3830
3831 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3832 for (iter = &(dev)->adj_list.lower, \
3833 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3834 priv; \
3835 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3836
3837 void *netdev_lower_get_next(struct net_device *dev,
3838 struct list_head **iter);
3839
3840 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3841 for (iter = (dev)->adj_list.lower.next, \
3842 ldev = netdev_lower_get_next(dev, &(iter)); \
3843 ldev; \
3844 ldev = netdev_lower_get_next(dev, &(iter)))
3845
3846 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3847 struct list_head **iter);
3848 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3849 struct list_head **iter);
3850
3851 int netdev_walk_all_lower_dev(struct net_device *dev,
3852 int (*fn)(struct net_device *lower_dev,
3853 void *data),
3854 void *data);
3855 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
3856 int (*fn)(struct net_device *lower_dev,
3857 void *data),
3858 void *data);
3859
3860 void *netdev_adjacent_get_private(struct list_head *adj_list);
3861 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3862 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3863 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3864 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3865 int netdev_master_upper_dev_link(struct net_device *dev,
3866 struct net_device *upper_dev,
3867 void *upper_priv, void *upper_info);
3868 void netdev_upper_dev_unlink(struct net_device *dev,
3869 struct net_device *upper_dev);
3870 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3871 void *netdev_lower_dev_get_private(struct net_device *dev,
3872 struct net_device *lower_dev);
3873 void netdev_lower_state_changed(struct net_device *lower_dev,
3874 void *lower_state_info);
3875 int netdev_default_l2upper_neigh_construct(struct net_device *dev,
3876 struct neighbour *n);
3877 void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3878 struct neighbour *n);
3879
3880 /* RSS keys are 40 or 52 bytes long */
3881 #define NETDEV_RSS_KEY_LEN 52
3882 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3883 void netdev_rss_key_fill(void *buffer, size_t len);
3884
3885 int dev_get_nest_level(struct net_device *dev);
3886 int skb_checksum_help(struct sk_buff *skb);
3887 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3888 netdev_features_t features, bool tx_path);
3889 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3890 netdev_features_t features);
3891
3892 struct netdev_bonding_info {
3893 ifslave slave;
3894 ifbond master;
3895 };
3896
3897 struct netdev_notifier_bonding_info {
3898 struct netdev_notifier_info info; /* must be first */
3899 struct netdev_bonding_info bonding_info;
3900 };
3901
3902 void netdev_bonding_info_change(struct net_device *dev,
3903 struct netdev_bonding_info *bonding_info);
3904
3905 static inline
3906 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3907 {
3908 return __skb_gso_segment(skb, features, true);
3909 }
3910 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3911
3912 static inline bool can_checksum_protocol(netdev_features_t features,
3913 __be16 protocol)
3914 {
3915 if (protocol == htons(ETH_P_FCOE))
3916 return !!(features & NETIF_F_FCOE_CRC);
3917
3918 /* Assume this is an IP checksum (not SCTP CRC) */
3919
3920 if (features & NETIF_F_HW_CSUM) {
3921 /* Can checksum everything */
3922 return true;
3923 }
3924
3925 switch (protocol) {
3926 case htons(ETH_P_IP):
3927 return !!(features & NETIF_F_IP_CSUM);
3928 case htons(ETH_P_IPV6):
3929 return !!(features & NETIF_F_IPV6_CSUM);
3930 default:
3931 return false;
3932 }
3933 }
3934
3935 #ifdef CONFIG_BUG
3936 void netdev_rx_csum_fault(struct net_device *dev);
3937 #else
3938 static inline void netdev_rx_csum_fault(struct net_device *dev)
3939 {
3940 }
3941 #endif
3942 /* rx skb timestamps */
3943 void net_enable_timestamp(void);
3944 void net_disable_timestamp(void);
3945
3946 #ifdef CONFIG_PROC_FS
3947 int __init dev_proc_init(void);
3948 #else
3949 #define dev_proc_init() 0
3950 #endif
3951
3952 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3953 struct sk_buff *skb, struct net_device *dev,
3954 bool more)
3955 {
3956 skb->xmit_more = more ? 1 : 0;
3957 return ops->ndo_start_xmit(skb, dev);
3958 }
3959
3960 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3961 struct netdev_queue *txq, bool more)
3962 {
3963 const struct net_device_ops *ops = dev->netdev_ops;
3964 int rc;
3965
3966 rc = __netdev_start_xmit(ops, skb, dev, more);
3967 if (rc == NETDEV_TX_OK)
3968 txq_trans_update(txq);
3969
3970 return rc;
3971 }
3972
3973 int netdev_class_create_file_ns(struct class_attribute *class_attr,
3974 const void *ns);
3975 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3976 const void *ns);
3977
3978 static inline int netdev_class_create_file(struct class_attribute *class_attr)
3979 {
3980 return netdev_class_create_file_ns(class_attr, NULL);
3981 }
3982
3983 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
3984 {
3985 netdev_class_remove_file_ns(class_attr, NULL);
3986 }
3987
3988 extern struct kobj_ns_type_operations net_ns_type_operations;
3989
3990 const char *netdev_drivername(const struct net_device *dev);
3991
3992 void linkwatch_run_queue(void);
3993
3994 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3995 netdev_features_t f2)
3996 {
3997 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
3998 if (f1 & NETIF_F_HW_CSUM)
3999 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4000 else
4001 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4002 }
4003
4004 return f1 & f2;
4005 }
4006
4007 static inline netdev_features_t netdev_get_wanted_features(
4008 struct net_device *dev)
4009 {
4010 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4011 }
4012 netdev_features_t netdev_increment_features(netdev_features_t all,
4013 netdev_features_t one, netdev_features_t mask);
4014
4015 /* Allow TSO being used on stacked device :
4016 * Performing the GSO segmentation before last device
4017 * is a performance improvement.
4018 */
4019 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4020 netdev_features_t mask)
4021 {
4022 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4023 }
4024
4025 int __netdev_update_features(struct net_device *dev);
4026 void netdev_update_features(struct net_device *dev);
4027 void netdev_change_features(struct net_device *dev);
4028
4029 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4030 struct net_device *dev);
4031
4032 netdev_features_t passthru_features_check(struct sk_buff *skb,
4033 struct net_device *dev,
4034 netdev_features_t features);
4035 netdev_features_t netif_skb_features(struct sk_buff *skb);
4036
4037 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4038 {
4039 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4040
4041 /* check flags correspondence */
4042 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4043 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
4044 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4045 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4046 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4047 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4048 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4049 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4050 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4051 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4052 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4053 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4054 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4055 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4056 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4057 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4058
4059 return (features & feature) == feature;
4060 }
4061
4062 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4063 {
4064 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4065 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4066 }
4067
4068 static inline bool netif_needs_gso(struct sk_buff *skb,
4069 netdev_features_t features)
4070 {
4071 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4072 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4073 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4074 }
4075
4076 static inline void netif_set_gso_max_size(struct net_device *dev,
4077 unsigned int size)
4078 {
4079 dev->gso_max_size = size;
4080 }
4081
4082 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4083 int pulled_hlen, u16 mac_offset,
4084 int mac_len)
4085 {
4086 skb->protocol = protocol;
4087 skb->encapsulation = 1;
4088 skb_push(skb, pulled_hlen);
4089 skb_reset_transport_header(skb);
4090 skb->mac_header = mac_offset;
4091 skb->network_header = skb->mac_header + mac_len;
4092 skb->mac_len = mac_len;
4093 }
4094
4095 static inline bool netif_is_macsec(const struct net_device *dev)
4096 {
4097 return dev->priv_flags & IFF_MACSEC;
4098 }
4099
4100 static inline bool netif_is_macvlan(const struct net_device *dev)
4101 {
4102 return dev->priv_flags & IFF_MACVLAN;
4103 }
4104
4105 static inline bool netif_is_macvlan_port(const struct net_device *dev)
4106 {
4107 return dev->priv_flags & IFF_MACVLAN_PORT;
4108 }
4109
4110 static inline bool netif_is_ipvlan(const struct net_device *dev)
4111 {
4112 return dev->priv_flags & IFF_IPVLAN_SLAVE;
4113 }
4114
4115 static inline bool netif_is_ipvlan_port(const struct net_device *dev)
4116 {
4117 return dev->priv_flags & IFF_IPVLAN_MASTER;
4118 }
4119
4120 static inline bool netif_is_bond_master(const struct net_device *dev)
4121 {
4122 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4123 }
4124
4125 static inline bool netif_is_bond_slave(const struct net_device *dev)
4126 {
4127 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4128 }
4129
4130 static inline bool netif_supports_nofcs(struct net_device *dev)
4131 {
4132 return dev->priv_flags & IFF_SUPP_NOFCS;
4133 }
4134
4135 static inline bool netif_is_l3_master(const struct net_device *dev)
4136 {
4137 return dev->priv_flags & IFF_L3MDEV_MASTER;
4138 }
4139
4140 static inline bool netif_is_l3_slave(const struct net_device *dev)
4141 {
4142 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4143 }
4144
4145 static inline bool netif_is_bridge_master(const struct net_device *dev)
4146 {
4147 return dev->priv_flags & IFF_EBRIDGE;
4148 }
4149
4150 static inline bool netif_is_bridge_port(const struct net_device *dev)
4151 {
4152 return dev->priv_flags & IFF_BRIDGE_PORT;
4153 }
4154
4155 static inline bool netif_is_ovs_master(const struct net_device *dev)
4156 {
4157 return dev->priv_flags & IFF_OPENVSWITCH;
4158 }
4159
4160 static inline bool netif_is_team_master(const struct net_device *dev)
4161 {
4162 return dev->priv_flags & IFF_TEAM;
4163 }
4164
4165 static inline bool netif_is_team_port(const struct net_device *dev)
4166 {
4167 return dev->priv_flags & IFF_TEAM_PORT;
4168 }
4169
4170 static inline bool netif_is_lag_master(const struct net_device *dev)
4171 {
4172 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4173 }
4174
4175 static inline bool netif_is_lag_port(const struct net_device *dev)
4176 {
4177 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4178 }
4179
4180 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4181 {
4182 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4183 }
4184
4185 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4186 static inline void netif_keep_dst(struct net_device *dev)
4187 {
4188 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4189 }
4190
4191 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
4192 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4193 {
4194 /* TODO: reserve and use an additional IFF bit, if we get more users */
4195 return dev->priv_flags & IFF_MACSEC;
4196 }
4197
4198 extern struct pernet_operations __net_initdata loopback_net_ops;
4199
4200 /* Logging, debugging and troubleshooting/diagnostic helpers. */
4201
4202 /* netdev_printk helpers, similar to dev_printk */
4203
4204 static inline const char *netdev_name(const struct net_device *dev)
4205 {
4206 if (!dev->name[0] || strchr(dev->name, '%'))
4207 return "(unnamed net_device)";
4208 return dev->name;
4209 }
4210
4211 static inline const char *netdev_reg_state(const struct net_device *dev)
4212 {
4213 switch (dev->reg_state) {
4214 case NETREG_UNINITIALIZED: return " (uninitialized)";
4215 case NETREG_REGISTERED: return "";
4216 case NETREG_UNREGISTERING: return " (unregistering)";
4217 case NETREG_UNREGISTERED: return " (unregistered)";
4218 case NETREG_RELEASED: return " (released)";
4219 case NETREG_DUMMY: return " (dummy)";
4220 }
4221
4222 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4223 return " (unknown)";
4224 }
4225
4226 __printf(3, 4)
4227 void netdev_printk(const char *level, const struct net_device *dev,
4228 const char *format, ...);
4229 __printf(2, 3)
4230 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4231 __printf(2, 3)
4232 void netdev_alert(const struct net_device *dev, const char *format, ...);
4233 __printf(2, 3)
4234 void netdev_crit(const struct net_device *dev, const char *format, ...);
4235 __printf(2, 3)
4236 void netdev_err(const struct net_device *dev, const char *format, ...);
4237 __printf(2, 3)
4238 void netdev_warn(const struct net_device *dev, const char *format, ...);
4239 __printf(2, 3)
4240 void netdev_notice(const struct net_device *dev, const char *format, ...);
4241 __printf(2, 3)
4242 void netdev_info(const struct net_device *dev, const char *format, ...);
4243
4244 #define MODULE_ALIAS_NETDEV(device) \
4245 MODULE_ALIAS("netdev-" device)
4246
4247 #if defined(CONFIG_DYNAMIC_DEBUG)
4248 #define netdev_dbg(__dev, format, args...) \
4249 do { \
4250 dynamic_netdev_dbg(__dev, format, ##args); \
4251 } while (0)
4252 #elif defined(DEBUG)
4253 #define netdev_dbg(__dev, format, args...) \
4254 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4255 #else
4256 #define netdev_dbg(__dev, format, args...) \
4257 ({ \
4258 if (0) \
4259 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4260 })
4261 #endif
4262
4263 #if defined(VERBOSE_DEBUG)
4264 #define netdev_vdbg netdev_dbg
4265 #else
4266
4267 #define netdev_vdbg(dev, format, args...) \
4268 ({ \
4269 if (0) \
4270 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4271 0; \
4272 })
4273 #endif
4274
4275 /*
4276 * netdev_WARN() acts like dev_printk(), but with the key difference
4277 * of using a WARN/WARN_ON to get the message out, including the
4278 * file/line information and a backtrace.
4279 */
4280 #define netdev_WARN(dev, format, args...) \
4281 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
4282 netdev_reg_state(dev), ##args)
4283
4284 /* netif printk helpers, similar to netdev_printk */
4285
4286 #define netif_printk(priv, type, level, dev, fmt, args...) \
4287 do { \
4288 if (netif_msg_##type(priv)) \
4289 netdev_printk(level, (dev), fmt, ##args); \
4290 } while (0)
4291
4292 #define netif_level(level, priv, type, dev, fmt, args...) \
4293 do { \
4294 if (netif_msg_##type(priv)) \
4295 netdev_##level(dev, fmt, ##args); \
4296 } while (0)
4297
4298 #define netif_emerg(priv, type, dev, fmt, args...) \
4299 netif_level(emerg, priv, type, dev, fmt, ##args)
4300 #define netif_alert(priv, type, dev, fmt, args...) \
4301 netif_level(alert, priv, type, dev, fmt, ##args)
4302 #define netif_crit(priv, type, dev, fmt, args...) \
4303 netif_level(crit, priv, type, dev, fmt, ##args)
4304 #define netif_err(priv, type, dev, fmt, args...) \
4305 netif_level(err, priv, type, dev, fmt, ##args)
4306 #define netif_warn(priv, type, dev, fmt, args...) \
4307 netif_level(warn, priv, type, dev, fmt, ##args)
4308 #define netif_notice(priv, type, dev, fmt, args...) \
4309 netif_level(notice, priv, type, dev, fmt, ##args)
4310 #define netif_info(priv, type, dev, fmt, args...) \
4311 netif_level(info, priv, type, dev, fmt, ##args)
4312
4313 #if defined(CONFIG_DYNAMIC_DEBUG)
4314 #define netif_dbg(priv, type, netdev, format, args...) \
4315 do { \
4316 if (netif_msg_##type(priv)) \
4317 dynamic_netdev_dbg(netdev, format, ##args); \
4318 } while (0)
4319 #elif defined(DEBUG)
4320 #define netif_dbg(priv, type, dev, format, args...) \
4321 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4322 #else
4323 #define netif_dbg(priv, type, dev, format, args...) \
4324 ({ \
4325 if (0) \
4326 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4327 0; \
4328 })
4329 #endif
4330
4331 #if defined(VERBOSE_DEBUG)
4332 #define netif_vdbg netif_dbg
4333 #else
4334 #define netif_vdbg(priv, type, dev, format, args...) \
4335 ({ \
4336 if (0) \
4337 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4338 0; \
4339 })
4340 #endif
4341
4342 /*
4343 * The list of packet types we will receive (as opposed to discard)
4344 * and the routines to invoke.
4345 *
4346 * Why 16. Because with 16 the only overlap we get on a hash of the
4347 * low nibble of the protocol value is RARP/SNAP/X.25.
4348 *
4349 * NOTE: That is no longer true with the addition of VLAN tags. Not
4350 * sure which should go first, but I bet it won't make much
4351 * difference if we are running VLANs. The good news is that
4352 * this protocol won't be in the list unless compiled in, so
4353 * the average user (w/out VLANs) will not be adversely affected.
4354 * --BLG
4355 *
4356 * 0800 IP
4357 * 8100 802.1Q VLAN
4358 * 0001 802.3
4359 * 0002 AX.25
4360 * 0004 802.2
4361 * 8035 RARP
4362 * 0005 SNAP
4363 * 0805 X.25
4364 * 0806 ARP
4365 * 8137 IPX
4366 * 0009 Localtalk
4367 * 86DD IPv6
4368 */
4369 #define PTYPE_HASH_SIZE (16)
4370 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4371
4372 #endif /* _LINUX_NETDEVICE_H */ 1 /* include this file if the platform implements the dma_ DMA Mapping API
2 * and wants to provide the pci_ DMA Mapping API in terms of it */
3
4 #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
5 #define _ASM_GENERIC_PCI_DMA_COMPAT_H
6
7 #include <linux/dma-mapping.h>
8
9 /* This defines the direction arg to the DMA mapping routines. */
10 #define PCI_DMA_BIDIRECTIONAL 0
11 #define PCI_DMA_TODEVICE 1
12 #define PCI_DMA_FROMDEVICE 2
13 #define PCI_DMA_NONE 3
14
15 static inline void *
16 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
17 dma_addr_t *dma_handle)
18 {
19 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
20 }
21
22 static inline void *
23 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
24 dma_addr_t *dma_handle)
25 {
26 return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
27 size, dma_handle, GFP_ATOMIC);
28 }
29
30 static inline void
31 pci_free_consistent(struct pci_dev *hwdev, size_t size,
32 void *vaddr, dma_addr_t dma_handle)
33 {
34 dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
35 }
36
37 static inline dma_addr_t
38 pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
39 {
40 return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
41 }
42
43 static inline void
44 pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
45 size_t size, int direction)
46 {
47 dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
48 }
49
50 static inline dma_addr_t
51 pci_map_page(struct pci_dev *hwdev, struct page *page,
52 unsigned long offset, size_t size, int direction)
53 {
54 return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
55 }
56
57 static inline void
58 pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
59 size_t size, int direction)
60 {
61 dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
62 }
63
64 static inline int
65 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
66 int nents, int direction)
67 {
68 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
69 }
70
71 static inline void
72 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
73 int nents, int direction)
74 {
75 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
76 }
77
78 static inline void
79 pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
80 size_t size, int direction)
81 {
82 dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
83 }
84
85 static inline void
86 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
87 size_t size, int direction)
88 {
89 dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
90 }
91
92 static inline void
93 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
94 int nelems, int direction)
95 {
96 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
97 }
98
99 static inline void
100 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
101 int nelems, int direction)
102 {
103 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
104 }
105
106 static inline int
107 pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
108 {
109 return dma_mapping_error(&pdev->dev, dma_addr);
110 }
111
112 #ifdef CONFIG_PCI
113 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
114 {
115 return dma_set_mask(&dev->dev, mask);
116 }
117
118 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
119 {
120 return dma_set_coherent_mask(&dev->dev, mask);
121 }
122
123 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
124 unsigned int size)
125 {
126 return dma_set_max_seg_size(&dev->dev, size);
127 }
128
129 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
130 unsigned long mask)
131 {
132 return dma_set_seg_boundary(&dev->dev, mask);
133 }
134 #else
135 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
136 { return -EIO; }
137 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
138 { return -EIO; }
139 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
140 unsigned int size)
141 { return -EIO; }
142 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
143 unsigned long mask)
144 { return -EIO; }
145 #endif
146
147 #endif 1 /*
2 * pci.h
3 *
4 * PCI defines and function prototypes
5 * Copyright 1994, Drew Eckhardt
6 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
7 *
8 * For more information, please consult the following manuals (look at
9 * http://www.pcisig.com/ for how to get them):
10 *
11 * PCI BIOS Specification
12 * PCI Local Bus Specification
13 * PCI to PCI Bridge Specification
14 * PCI System Design Guide
15 */
16 #ifndef LINUX_PCI_H
17 #define LINUX_PCI_H
18
19
20 #include <linux/mod_devicetable.h>
21
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/list.h>
26 #include <linux/compiler.h>
27 #include <linux/errno.h>
28 #include <linux/kobject.h>
29 #include <linux/atomic.h>
30 #include <linux/device.h>
31 #include <linux/io.h>
32 #include <linux/resource_ext.h>
33 #include <uapi/linux/pci.h>
34
35 #include <linux/pci_ids.h>
36
37 /*
38 * The PCI interface treats multi-function devices as independent
39 * devices. The slot/function address of each device is encoded
40 * in a single byte as follows:
41 *
42 * 7:3 = slot
43 * 2:0 = function
44 *
45 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
46 * In the interest of not exposing interfaces to user-space unnecessarily,
47 * the following kernel-only defines are being added here.
48 */
49 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
50 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
51 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
52
53 /* pci_slot represents a physical slot */
54 struct pci_slot {
55 struct pci_bus *bus; /* The bus this slot is on */
56 struct list_head list; /* node in list of slots on this bus */
57 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
58 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
59 struct kobject kobj;
60 };
61
62 static inline const char *pci_slot_name(const struct pci_slot *slot)
63 {
64 return kobject_name(&slot->kobj);
65 }
66
67 /* File state for mmap()s on /proc/bus/pci/X/Y */
68 enum pci_mmap_state {
69 pci_mmap_io,
70 pci_mmap_mem
71 };
72
73 /*
74 * For PCI devices, the region numbers are assigned this way:
75 */
76 enum {
77 /* #0-5: standard PCI resources */
78 PCI_STD_RESOURCES,
79 PCI_STD_RESOURCE_END = 5,
80
81 /* #6: expansion ROM resource */
82 PCI_ROM_RESOURCE,
83
84 /* device specific resources */
85 #ifdef CONFIG_PCI_IOV
86 PCI_IOV_RESOURCES,
87 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
88 #endif
89
90 /* resources assigned to buses behind the bridge */
91 #define PCI_BRIDGE_RESOURCE_NUM 4
92
93 PCI_BRIDGE_RESOURCES,
94 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
95 PCI_BRIDGE_RESOURCE_NUM - 1,
96
97 /* total resources associated with a PCI device */
98 PCI_NUM_RESOURCES,
99
100 /* preserve this for compatibility */
101 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
102 };
103
104 /*
105 * pci_power_t values must match the bits in the Capabilities PME_Support
106 * and Control/Status PowerState fields in the Power Management capability.
107 */
108 typedef int __bitwise pci_power_t;
109
110 #define PCI_D0 ((pci_power_t __force) 0)
111 #define PCI_D1 ((pci_power_t __force) 1)
112 #define PCI_D2 ((pci_power_t __force) 2)
113 #define PCI_D3hot ((pci_power_t __force) 3)
114 #define PCI_D3cold ((pci_power_t __force) 4)
115 #define PCI_UNKNOWN ((pci_power_t __force) 5)
116 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
117
118 /* Remember to update this when the list above changes! */
119 extern const char *pci_power_names[];
120
121 static inline const char *pci_power_name(pci_power_t state)
122 {
123 return pci_power_names[1 + (__force int) state];
124 }
125
126 #define PCI_PM_D2_DELAY 200
127 #define PCI_PM_D3_WAIT 10
128 #define PCI_PM_D3COLD_WAIT 100
129 #define PCI_PM_BUS_WAIT 50
130
131 /** The pci_channel state describes connectivity between the CPU and
132 * the pci device. If some PCI bus between here and the pci device
133 * has crashed or locked up, this info is reflected here.
134 */
135 typedef unsigned int __bitwise pci_channel_state_t;
136
137 enum pci_channel_state {
138 /* I/O channel is in normal state */
139 pci_channel_io_normal = (__force pci_channel_state_t) 1,
140
141 /* I/O to channel is blocked */
142 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
143
144 /* PCI card is dead */
145 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
146 };
147
148 typedef unsigned int __bitwise pcie_reset_state_t;
149
150 enum pcie_reset_state {
151 /* Reset is NOT asserted (Use to deassert reset) */
152 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
153
154 /* Use #PERST to reset PCIe device */
155 pcie_warm_reset = (__force pcie_reset_state_t) 2,
156
157 /* Use PCIe Hot Reset to reset device */
158 pcie_hot_reset = (__force pcie_reset_state_t) 3
159 };
160
161 typedef unsigned short __bitwise pci_dev_flags_t;
162 enum pci_dev_flags {
163 /* INTX_DISABLE in PCI_COMMAND register disables MSI
164 * generation too.
165 */
166 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
167 /* Device configuration is irrevocably lost if disabled into D3 */
168 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
169 /* Provide indication device is assigned by a Virtual Machine Manager */
170 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
171 /* Flag for quirk use to store if quirk-specific ACS is enabled */
172 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
173 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
174 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
175 /* Do not use bus resets for device */
176 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
177 /* Do not use PM reset even if device advertises NoSoftRst- */
178 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
179 /* Get VPD from function 0 VPD */
180 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
181 };
182
183 enum pci_irq_reroute_variant {
184 INTEL_IRQ_REROUTE_VARIANT = 1,
185 MAX_IRQ_REROUTE_VARIANTS = 3
186 };
187
188 typedef unsigned short __bitwise pci_bus_flags_t;
189 enum pci_bus_flags {
190 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
191 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
192 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
193 };
194
195 /* These values come from the PCI Express Spec */
196 enum pcie_link_width {
197 PCIE_LNK_WIDTH_RESRV = 0x00,
198 PCIE_LNK_X1 = 0x01,
199 PCIE_LNK_X2 = 0x02,
200 PCIE_LNK_X4 = 0x04,
201 PCIE_LNK_X8 = 0x08,
202 PCIE_LNK_X12 = 0x0C,
203 PCIE_LNK_X16 = 0x10,
204 PCIE_LNK_X32 = 0x20,
205 PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
206 };
207
208 /* Based on the PCI Hotplug Spec, but some values are made up by us */
209 enum pci_bus_speed {
210 PCI_SPEED_33MHz = 0x00,
211 PCI_SPEED_66MHz = 0x01,
212 PCI_SPEED_66MHz_PCIX = 0x02,
213 PCI_SPEED_100MHz_PCIX = 0x03,
214 PCI_SPEED_133MHz_PCIX = 0x04,
215 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
216 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
217 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
218 PCI_SPEED_66MHz_PCIX_266 = 0x09,
219 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
220 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
221 AGP_UNKNOWN = 0x0c,
222 AGP_1X = 0x0d,
223 AGP_2X = 0x0e,
224 AGP_4X = 0x0f,
225 AGP_8X = 0x10,
226 PCI_SPEED_66MHz_PCIX_533 = 0x11,
227 PCI_SPEED_100MHz_PCIX_533 = 0x12,
228 PCI_SPEED_133MHz_PCIX_533 = 0x13,
229 PCIE_SPEED_2_5GT = 0x14,
230 PCIE_SPEED_5_0GT = 0x15,
231 PCIE_SPEED_8_0GT = 0x16,
232 PCI_SPEED_UNKNOWN = 0xff,
233 };
234
235 struct pci_cap_saved_data {
236 u16 cap_nr;
237 bool cap_extended;
238 unsigned int size;
239 u32 data[0];
240 };
241
242 struct pci_cap_saved_state {
243 struct hlist_node next;
244 struct pci_cap_saved_data cap;
245 };
246
247 struct irq_affinity;
248 struct pcie_link_state;
249 struct pci_vpd;
250 struct pci_sriov;
251 struct pci_ats;
252
253 /*
254 * The pci_dev structure is used to describe PCI devices.
255 */
256 struct pci_dev {
257 struct list_head bus_list; /* node in per-bus list */
258 struct pci_bus *bus; /* bus this device is on */
259 struct pci_bus *subordinate; /* bus this device bridges to */
260
261 void *sysdata; /* hook for sys-specific extension */
262 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
263 struct pci_slot *slot; /* Physical slot this device is in */
264
265 unsigned int devfn; /* encoded device & function index */
266 unsigned short vendor;
267 unsigned short device;
268 unsigned short subsystem_vendor;
269 unsigned short subsystem_device;
270 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
271 u8 revision; /* PCI revision, low byte of class word */
272 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
273 #ifdef CONFIG_PCIEAER
274 u16 aer_cap; /* AER capability offset */
275 #endif
276 u8 pcie_cap; /* PCIe capability offset */
277 u8 msi_cap; /* MSI capability offset */
278 u8 msix_cap; /* MSI-X capability offset */
279 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
280 u8 rom_base_reg; /* which config register controls the ROM */
281 u8 pin; /* which interrupt pin this device uses */
282 u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
283 unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
284
285 struct pci_driver *driver; /* which driver has allocated this device */
286 u64 dma_mask; /* Mask of the bits of bus address this
287 device implements. Normally this is
288 0xffffffff. You only need to change
289 this if your device has broken DMA
290 or supports 64-bit transfers. */
291
292 struct device_dma_parameters dma_parms;
293
294 pci_power_t current_state; /* Current operating state. In ACPI-speak,
295 this is D0-D3, D0 being fully functional,
296 and D3 being off. */
297 u8 pm_cap; /* PM capability offset */
298 unsigned int pme_support:5; /* Bitmask of states from which PME#
299 can be generated */
300 unsigned int pme_interrupt:1;
301 unsigned int pme_poll:1; /* Poll device's PME status bit */
302 unsigned int d1_support:1; /* Low power state D1 is supported */
303 unsigned int d2_support:1; /* Low power state D2 is supported */
304 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
305 unsigned int no_d3cold:1; /* D3cold is forbidden */
306 unsigned int bridge_d3:1; /* Allow D3 for bridge */
307 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
308 unsigned int mmio_always_on:1; /* disallow turning off io/mem
309 decoding during bar sizing */
310 unsigned int wakeup_prepared:1;
311 unsigned int runtime_d3cold:1; /* whether go through runtime
312 D3cold, not set for devices
313 powered on/off by the
314 corresponding bridge */
315 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
316 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
317 controlled exclusively by
318 user sysfs */
319 unsigned int d3_delay; /* D3->D0 transition time in ms */
320 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
321
322 #ifdef CONFIG_PCIEASPM
323 struct pcie_link_state *link_state; /* ASPM link state */
324 #endif
325
326 pci_channel_state_t error_state; /* current connectivity state */
327 struct device dev; /* Generic device interface */
328
329 int cfg_size; /* Size of configuration space */
330
331 /*
332 * Instead of touching interrupt line and base address registers
333 * directly, use the values stored here. They might be different!
334 */
335 unsigned int irq;
336 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
337
338 bool match_driver; /* Skip attaching driver */
339 /* These fields are used by common fixups */
340 unsigned int transparent:1; /* Subtractive decode PCI bridge */
341 unsigned int multifunction:1;/* Part of multi-function device */
342 /* keep track of device state */
343 unsigned int is_added:1;
344 unsigned int is_busmaster:1; /* device is busmaster */
345 unsigned int no_msi:1; /* device may not use msi */
346 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
347 unsigned int block_cfg_access:1; /* config space access is blocked */
348 unsigned int broken_parity_status:1; /* Device generates false positive parity */
349 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
350 unsigned int msi_enabled:1;
351 unsigned int msix_enabled:1;
352 unsigned int ari_enabled:1; /* ARI forwarding */
353 unsigned int ats_enabled:1; /* Address Translation Service */
354 unsigned int is_managed:1;
355 unsigned int needs_freset:1; /* Dev requires fundamental reset */
356 unsigned int state_saved:1;
357 unsigned int is_physfn:1;
358 unsigned int is_virtfn:1;
359 unsigned int reset_fn:1;
360 unsigned int is_hotplug_bridge:1;
361 unsigned int __aer_firmware_first_valid:1;
362 unsigned int __aer_firmware_first:1;
363 unsigned int broken_intx_masking:1;
364 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
365 unsigned int irq_managed:1;
366 unsigned int has_secondary_link:1;
367 unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
368 pci_dev_flags_t dev_flags;
369 atomic_t enable_cnt; /* pci_enable_device has been called */
370
371 u32 saved_config_space[16]; /* config space saved at suspend time */
372 struct hlist_head saved_cap_space;
373 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
374 int rom_attr_enabled; /* has display of the rom attribute been enabled? */
375 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
376 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
377
378 #ifdef CONFIG_PCIE_PTM
379 unsigned int ptm_root:1;
380 unsigned int ptm_enabled:1;
381 u8 ptm_granularity;
382 #endif
383 #ifdef CONFIG_PCI_MSI
384 const struct attribute_group **msi_irq_groups;
385 #endif
386 struct pci_vpd *vpd;
387 #ifdef CONFIG_PCI_ATS
388 union {
389 struct pci_sriov *sriov; /* SR-IOV capability related */
390 struct pci_dev *physfn; /* the PF this VF is associated with */
391 };
392 u16 ats_cap; /* ATS Capability offset */
393 u8 ats_stu; /* ATS Smallest Translation Unit */
394 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
395 #endif
396 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
397 size_t romlen; /* Length of ROM if it's not from the BAR */
398 char *driver_override; /* Driver name to force a match */
399 };
400
401 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
402 {
403 #ifdef CONFIG_PCI_IOV
404 if (dev->is_virtfn)
405 dev = dev->physfn;
406 #endif
407 return dev;
408 }
409
410 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
411
412 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
413 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
414
415 static inline int pci_channel_offline(struct pci_dev *pdev)
416 {
417 return (pdev->error_state != pci_channel_io_normal);
418 }
419
420 struct pci_host_bridge {
421 struct device dev;
422 struct pci_bus *bus; /* root bus */
423 struct pci_ops *ops;
424 void *sysdata;
425 int busnr;
426 struct list_head windows; /* resource_entry */
427 void (*release_fn)(struct pci_host_bridge *);
428 void *release_data;
429 struct msi_controller *msi;
430 unsigned int ignore_reset_delay:1; /* for entire hierarchy */
431 /* Resource alignment requirements */
432 resource_size_t (*align_resource)(struct pci_dev *dev,
433 const struct resource *res,
434 resource_size_t start,
435 resource_size_t size,
436 resource_size_t align);
437 unsigned long private[0] ____cacheline_aligned;
438 };
439
440 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
441
442 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
443 {
444 return (void *)bridge->private;
445 }
446
447 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
448 {
449 return container_of(priv, struct pci_host_bridge, private);
450 }
451
452 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
453 int pci_register_host_bridge(struct pci_host_bridge *bridge);
454 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
455
456 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
457 void (*release_fn)(struct pci_host_bridge *),
458 void *release_data);
459
460 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
461
462 /*
463 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
464 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
465 * buses below host bridges or subtractive decode bridges) go in the list.
466 * Use pci_bus_for_each_resource() to iterate through all the resources.
467 */
468
469 /*
470 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
471 * and there's no way to program the bridge with the details of the window.
472 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
473 * decode bit set, because they are explicit and can be programmed with _SRS.
474 */
475 #define PCI_SUBTRACTIVE_DECODE 0x1
476
477 struct pci_bus_resource {
478 struct list_head list;
479 struct resource *res;
480 unsigned int flags;
481 };
482
483 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
484
485 struct pci_bus {
486 struct list_head node; /* node in list of buses */
487 struct pci_bus *parent; /* parent bus this bridge is on */
488 struct list_head children; /* list of child buses */
489 struct list_head devices; /* list of devices on this bus */
490 struct pci_dev *self; /* bridge device as seen by parent */
491 struct list_head slots; /* list of slots on this bus;
492 protected by pci_slot_mutex */
493 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
494 struct list_head resources; /* address space routed to this bus */
495 struct resource busn_res; /* bus numbers routed to this bus */
496
497 struct pci_ops *ops; /* configuration access functions */
498 struct msi_controller *msi; /* MSI controller */
499 void *sysdata; /* hook for sys-specific extension */
500 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
501
502 unsigned char number; /* bus number */
503 unsigned char primary; /* number of primary bridge */
504 unsigned char max_bus_speed; /* enum pci_bus_speed */
505 unsigned char cur_bus_speed; /* enum pci_bus_speed */
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 int domain_nr;
508 #endif
509
510 char name[48];
511
512 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
513 pci_bus_flags_t bus_flags; /* inherited by child buses */
514 struct device *bridge;
515 struct device dev;
516 struct bin_attribute *legacy_io; /* legacy I/O for this bus */
517 struct bin_attribute *legacy_mem; /* legacy mem */
518 unsigned int is_added:1;
519 };
520
521 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
522
523 /*
524 * Returns true if the PCI bus is root (behind host-PCI bridge),
525 * false otherwise
526 *
527 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
528 * This is incorrect because "virtual" buses added for SR-IOV (via
529 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
530 */
531 static inline bool pci_is_root_bus(struct pci_bus *pbus)
532 {
533 return !(pbus->parent);
534 }
535
536 /**
537 * pci_is_bridge - check if the PCI device is a bridge
538 * @dev: PCI device
539 *
540 * Return true if the PCI device is bridge whether it has subordinate
541 * or not.
542 */
543 static inline bool pci_is_bridge(struct pci_dev *dev)
544 {
545 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
546 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
547 }
548
549 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
550 {
551 dev = pci_physfn(dev);
552 if (pci_is_root_bus(dev->bus))
553 return NULL;
554
555 return dev->bus->self;
556 }
557
558 struct device *pci_get_host_bridge_device(struct pci_dev *dev);
559 void pci_put_host_bridge_device(struct device *dev);
560
561 #ifdef CONFIG_PCI_MSI
562 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
563 {
564 return pci_dev->msi_enabled || pci_dev->msix_enabled;
565 }
566 #else
567 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
568 #endif
569
570 /*
571 * Error values that may be returned by PCI functions.
572 */
573 #define PCIBIOS_SUCCESSFUL 0x00
574 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
575 #define PCIBIOS_BAD_VENDOR_ID 0x83
576 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
577 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
578 #define PCIBIOS_SET_FAILED 0x88
579 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
580
581 /*
582 * Translate above to generic errno for passing back through non-PCI code.
583 */
584 static inline int pcibios_err_to_errno(int err)
585 {
586 if (err <= PCIBIOS_SUCCESSFUL)
587 return err; /* Assume already errno */
588
589 switch (err) {
590 case PCIBIOS_FUNC_NOT_SUPPORTED:
591 return -ENOENT;
592 case PCIBIOS_BAD_VENDOR_ID:
593 return -ENOTTY;
594 case PCIBIOS_DEVICE_NOT_FOUND:
595 return -ENODEV;
596 case PCIBIOS_BAD_REGISTER_NUMBER:
597 return -EFAULT;
598 case PCIBIOS_SET_FAILED:
599 return -EIO;
600 case PCIBIOS_BUFFER_TOO_SMALL:
601 return -ENOSPC;
602 }
603
604 return -ERANGE;
605 }
606
607 /* Low-level architecture-dependent routines */
608
609 struct pci_ops {
610 int (*add_bus)(struct pci_bus *bus);
611 void (*remove_bus)(struct pci_bus *bus);
612 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
613 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
614 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
615 };
616
617 /*
618 * ACPI needs to be able to access PCI config space before we've done a
619 * PCI bus scan and created pci_bus structures.
620 */
621 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
622 int reg, int len, u32 *val);
623 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
624 int reg, int len, u32 val);
625
626 #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
627 typedef u64 pci_bus_addr_t;
628 #else
629 typedef u32 pci_bus_addr_t;
630 #endif
631
632 struct pci_bus_region {
633 pci_bus_addr_t start;
634 pci_bus_addr_t end;
635 };
636
637 struct pci_dynids {
638 spinlock_t lock; /* protects list, index */
639 struct list_head list; /* for IDs added at runtime */
640 };
641
642
643 /*
644 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
645 * a set of callbacks in struct pci_error_handlers, that device driver
646 * will be notified of PCI bus errors, and will be driven to recovery
647 * when an error occurs.
648 */
649
650 typedef unsigned int __bitwise pci_ers_result_t;
651
652 enum pci_ers_result {
653 /* no result/none/not supported in device driver */
654 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
655
656 /* Device driver can recover without slot reset */
657 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
658
659 /* Device driver wants slot to be reset. */
660 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
661
662 /* Device has completely failed, is unrecoverable */
663 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
664
665 /* Device driver is fully recovered and operational */
666 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
667
668 /* No AER capabilities registered for the driver */
669 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
670 };
671
672 /* PCI bus error event callbacks */
673 struct pci_error_handlers {
674 /* PCI bus error detected on this device */
675 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
676 enum pci_channel_state error);
677
678 /* MMIO has been re-enabled, but not DMA */
679 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
680
681 /* PCI Express link has been reset */
682 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
683
684 /* PCI slot has been reset */
685 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
686
687 /* PCI function reset prepare or completed */
688 void (*reset_notify)(struct pci_dev *dev, bool prepare);
689
690 /* Device driver may resume normal operations */
691 void (*resume)(struct pci_dev *dev);
692 };
693
694
695 struct module;
696 struct pci_driver {
697 struct list_head node;
698 const char *name;
699 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
700 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
701 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
702 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
703 int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
704 int (*resume_early) (struct pci_dev *dev);
705 int (*resume) (struct pci_dev *dev); /* Device woken up */
706 void (*shutdown) (struct pci_dev *dev);
707 int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
708 const struct pci_error_handlers *err_handler;
709 struct device_driver driver;
710 struct pci_dynids dynids;
711 };
712
713 #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
714
715 /**
716 * PCI_DEVICE - macro used to describe a specific pci device
717 * @vend: the 16 bit PCI Vendor ID
718 * @dev: the 16 bit PCI Device ID
719 *
720 * This macro is used to create a struct pci_device_id that matches a
721 * specific device. The subvendor and subdevice fields will be set to
722 * PCI_ANY_ID.
723 */
724 #define PCI_DEVICE(vend,dev) \
725 .vendor = (vend), .device = (dev), \
726 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
727
728 /**
729 * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
730 * @vend: the 16 bit PCI Vendor ID
731 * @dev: the 16 bit PCI Device ID
732 * @subvend: the 16 bit PCI Subvendor ID
733 * @subdev: the 16 bit PCI Subdevice ID
734 *
735 * This macro is used to create a struct pci_device_id that matches a
736 * specific device with subsystem information.
737 */
738 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
739 .vendor = (vend), .device = (dev), \
740 .subvendor = (subvend), .subdevice = (subdev)
741
742 /**
743 * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
744 * @dev_class: the class, subclass, prog-if triple for this device
745 * @dev_class_mask: the class mask for this device
746 *
747 * This macro is used to create a struct pci_device_id that matches a
748 * specific PCI class. The vendor, device, subvendor, and subdevice
749 * fields will be set to PCI_ANY_ID.
750 */
751 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
752 .class = (dev_class), .class_mask = (dev_class_mask), \
753 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
754 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
755
756 /**
757 * PCI_VDEVICE - macro used to describe a specific pci device in short form
758 * @vend: the vendor name
759 * @dev: the 16 bit PCI Device ID
760 *
761 * This macro is used to create a struct pci_device_id that matches a
762 * specific PCI device. The subvendor, and subdevice fields will be set
763 * to PCI_ANY_ID. The macro allows the next field to follow as the device
764 * private data.
765 */
766
767 #define PCI_VDEVICE(vend, dev) \
768 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
769 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
770
771 enum {
772 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
773 PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
774 PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
775 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
776 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
777 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
778 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
779 };
780
781 /* these external functions are only available when PCI support is enabled */
782 #ifdef CONFIG_PCI
783
784 extern unsigned int pci_flags;
785
786 static inline void pci_set_flags(int flags) { pci_flags = flags; }
787 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
788 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
789 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
790
791 void pcie_bus_configure_settings(struct pci_bus *bus);
792
793 enum pcie_bus_config_types {
794 PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
795 PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
796 PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
797 PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
798 PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
799 };
800
801 extern enum pcie_bus_config_types pcie_bus_config;
802
803 extern struct bus_type pci_bus_type;
804
805 /* Do NOT directly access these two variables, unless you are arch-specific PCI
806 * code, or PCI core code. */
807 extern struct list_head pci_root_buses; /* list of all known PCI buses */
808 /* Some device drivers need know if PCI is initiated */
809 int no_pci_devices(void);
810
811 void pcibios_resource_survey_bus(struct pci_bus *bus);
812 void pcibios_bus_add_device(struct pci_dev *pdev);
813 void pcibios_add_bus(struct pci_bus *bus);
814 void pcibios_remove_bus(struct pci_bus *bus);
815 void pcibios_fixup_bus(struct pci_bus *);
816 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
817 /* Architecture-specific versions may override this (weak) */
818 char *pcibios_setup(char *str);
819
820 /* Used only when drivers/pci/setup.c is used */
821 resource_size_t pcibios_align_resource(void *, const struct resource *,
822 resource_size_t,
823 resource_size_t);
824 void pcibios_update_irq(struct pci_dev *, int irq);
825
826 /* Weak but can be overriden by arch */
827 void pci_fixup_cardbus(struct pci_bus *);
828
829 /* Generic PCI functions used internally */
830
831 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
832 struct resource *res);
833 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
834 struct pci_bus_region *region);
835 void pcibios_scan_specific_bus(int busn);
836 struct pci_bus *pci_find_bus(int domain, int busnr);
837 void pci_bus_add_devices(const struct pci_bus *bus);
838 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
839 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
840 struct pci_ops *ops, void *sysdata,
841 struct list_head *resources);
842 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
843 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
844 void pci_bus_release_busn_res(struct pci_bus *b);
845 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
846 struct pci_ops *ops, void *sysdata,
847 struct list_head *resources,
848 struct msi_controller *msi);
849 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
850 struct pci_ops *ops, void *sysdata,
851 struct list_head *resources);
852 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
853 int busnr);
854 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
855 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
856 const char *name,
857 struct hotplug_slot *hotplug);
858 void pci_destroy_slot(struct pci_slot *slot);
859 #ifdef CONFIG_SYSFS
860 void pci_dev_assign_slot(struct pci_dev *dev);
861 #else
862 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
863 #endif
864 int pci_scan_slot(struct pci_bus *bus, int devfn);
865 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
866 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
867 unsigned int pci_scan_child_bus(struct pci_bus *bus);
868 void pci_bus_add_device(struct pci_dev *dev);
869 void pci_read_bridge_bases(struct pci_bus *child);
870 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
871 struct resource *res);
872 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
873 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
874 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
875 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
876 struct pci_dev *pci_dev_get(struct pci_dev *dev);
877 void pci_dev_put(struct pci_dev *dev);
878 void pci_remove_bus(struct pci_bus *b);
879 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
880 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
881 void pci_stop_root_bus(struct pci_bus *bus);
882 void pci_remove_root_bus(struct pci_bus *bus);
883 void pci_setup_cardbus(struct pci_bus *bus);
884 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
885 void pci_sort_breadthfirst(void);
886 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
887 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
888 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
889
890 /* Generic PCI functions exported to card drivers */
891
892 enum pci_lost_interrupt_reason {
893 PCI_LOST_IRQ_NO_INFORMATION = 0,
894 PCI_LOST_IRQ_DISABLE_MSI,
895 PCI_LOST_IRQ_DISABLE_MSIX,
896 PCI_LOST_IRQ_DISABLE_ACPI,
897 };
898 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
899 int pci_find_capability(struct pci_dev *dev, int cap);
900 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
901 int pci_find_ext_capability(struct pci_dev *dev, int cap);
902 int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
903 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
904 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
905 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
906
907 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
908 struct pci_dev *from);
909 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
910 unsigned int ss_vendor, unsigned int ss_device,
911 struct pci_dev *from);
912 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
913 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
914 unsigned int devfn);
915 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
916 unsigned int devfn)
917 {
918 return pci_get_domain_bus_and_slot(0, bus, devfn);
919 }
920 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
921 int pci_dev_present(const struct pci_device_id *ids);
922
923 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
924 int where, u8 *val);
925 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
926 int where, u16 *val);
927 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
928 int where, u32 *val);
929 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
930 int where, u8 val);
931 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
932 int where, u16 val);
933 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
934 int where, u32 val);
935
936 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
937 int where, int size, u32 *val);
938 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
939 int where, int size, u32 val);
940 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
941 int where, int size, u32 *val);
942 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
943 int where, int size, u32 val);
944
945 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
946
947 static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
948 {
949 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
950 }
951 static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
952 {
953 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
954 }
955 static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
956 u32 *val)
957 {
958 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
959 }
960 static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
961 {
962 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
963 }
964 static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
965 {
966 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
967 }
968 static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
969 u32 val)
970 {
971 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
972 }
973
974 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
975 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
976 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
977 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
978 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
979 u16 clear, u16 set);
980 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
981 u32 clear, u32 set);
982
983 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
984 u16 set)
985 {
986 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
987 }
988
989 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
990 u32 set)
991 {
992 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
993 }
994
995 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
996 u16 clear)
997 {
998 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
999 }
1000
1001 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1002 u32 clear)
1003 {
1004 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1005 }
1006
1007 /* user-space driven config access */
1008 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1009 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1010 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1011 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1012 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1013 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1014
1015 int __must_check pci_enable_device(struct pci_dev *dev);
1016 int __must_check pci_enable_device_io(struct pci_dev *dev);
1017 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1018 int __must_check pci_reenable_device(struct pci_dev *);
1019 int __must_check pcim_enable_device(struct pci_dev *pdev);
1020 void pcim_pin_device(struct pci_dev *pdev);
1021
1022 static inline int pci_is_enabled(struct pci_dev *pdev)
1023 {
1024 return (atomic_read(&pdev->enable_cnt) > 0);
1025 }
1026
1027 static inline int pci_is_managed(struct pci_dev *pdev)
1028 {
1029 return pdev->is_managed;
1030 }
1031
1032 void pci_disable_device(struct pci_dev *dev);
1033
1034 extern unsigned int pcibios_max_latency;
1035 void pci_set_master(struct pci_dev *dev);
1036 void pci_clear_master(struct pci_dev *dev);
1037
1038 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1039 int pci_set_cacheline_size(struct pci_dev *dev);
1040 #define HAVE_PCI_SET_MWI
1041 int __must_check pci_set_mwi(struct pci_dev *dev);
1042 int pci_try_set_mwi(struct pci_dev *dev);
1043 void pci_clear_mwi(struct pci_dev *dev);
1044 void pci_intx(struct pci_dev *dev, int enable);
1045 bool pci_intx_mask_supported(struct pci_dev *dev);
1046 bool pci_check_and_mask_intx(struct pci_dev *dev);
1047 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1048 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1049 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1050 int pcix_get_max_mmrbc(struct pci_dev *dev);
1051 int pcix_get_mmrbc(struct pci_dev *dev);
1052 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1053 int pcie_get_readrq(struct pci_dev *dev);
1054 int pcie_set_readrq(struct pci_dev *dev, int rq);
1055 int pcie_get_mps(struct pci_dev *dev);
1056 int pcie_set_mps(struct pci_dev *dev, int mps);
1057 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
1058 enum pcie_link_width *width);
1059 int __pci_reset_function(struct pci_dev *dev);
1060 int __pci_reset_function_locked(struct pci_dev *dev);
1061 int pci_reset_function(struct pci_dev *dev);
1062 int pci_try_reset_function(struct pci_dev *dev);
1063 int pci_probe_reset_slot(struct pci_slot *slot);
1064 int pci_reset_slot(struct pci_slot *slot);
1065 int pci_try_reset_slot(struct pci_slot *slot);
1066 int pci_probe_reset_bus(struct pci_bus *bus);
1067 int pci_reset_bus(struct pci_bus *bus);
1068 int pci_try_reset_bus(struct pci_bus *bus);
1069 void pci_reset_secondary_bus(struct pci_dev *dev);
1070 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1071 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
1072 void pci_update_resource(struct pci_dev *dev, int resno);
1073 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1074 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1075 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1076 bool pci_device_is_present(struct pci_dev *pdev);
1077 void pci_ignore_hotplug(struct pci_dev *dev);
1078
1079 /* ROM control related routines */
1080 int pci_enable_rom(struct pci_dev *pdev);
1081 void pci_disable_rom(struct pci_dev *pdev);
1082 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1083 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1084 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
1085 void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1086
1087 /* Power management related routines */
1088 int pci_save_state(struct pci_dev *dev);
1089 void pci_restore_state(struct pci_dev *dev);
1090 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1091 int pci_load_saved_state(struct pci_dev *dev,
1092 struct pci_saved_state *state);
1093 int pci_load_and_free_saved_state(struct pci_dev *dev,
1094 struct pci_saved_state **state);
1095 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
1096 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
1097 u16 cap);
1098 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
1099 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
1100 u16 cap, unsigned int size);
1101 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
1102 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1103 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1104 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1105 void pci_pme_active(struct pci_dev *dev, bool enable);
1106 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1107 bool runtime, bool enable);
1108 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1109 int pci_prepare_to_sleep(struct pci_dev *dev);
1110 int pci_back_from_sleep(struct pci_dev *dev);
1111 bool pci_dev_run_wake(struct pci_dev *dev);
1112 bool pci_check_pme_status(struct pci_dev *dev);
1113 void pci_pme_wakeup_bus(struct pci_bus *bus);
1114 void pci_d3cold_enable(struct pci_dev *dev);
1115 void pci_d3cold_disable(struct pci_dev *dev);
1116
1117 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1118 bool enable)
1119 {
1120 return __pci_enable_wake(dev, state, false, enable);
1121 }
1122
1123 /* PCI Virtual Channel */
1124 int pci_save_vc_state(struct pci_dev *dev);
1125 void pci_restore_vc_state(struct pci_dev *dev);
1126 void pci_allocate_vc_save_buffers(struct pci_dev *dev);
1127
1128 /* For use by arch with custom probe code */
1129 void set_pcie_port_type(struct pci_dev *pdev);
1130 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1131
1132 /* Functions for PCI Hotplug drivers to use */
1133 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1134 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1135 unsigned int pci_rescan_bus(struct pci_bus *bus);
1136 void pci_lock_rescan_remove(void);
1137 void pci_unlock_rescan_remove(void);
1138
1139 /* Vital product data routines */
1140 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1141 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1142 int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1143
1144 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1145 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1146 void pci_bus_assign_resources(const struct pci_bus *bus);
1147 void pci_bus_claim_resources(struct pci_bus *bus);
1148 void pci_bus_size_bridges(struct pci_bus *bus);
1149 int pci_claim_resource(struct pci_dev *, int);
1150 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1151 void pci_assign_unassigned_resources(void);
1152 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1153 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1154 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1155 void pdev_enable_device(struct pci_dev *);
1156 int pci_enable_resources(struct pci_dev *, int mask);
1157 void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
1158 int (*)(const struct pci_dev *, u8, u8));
1159 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1160 #define HAVE_PCI_REQ_REGIONS 2
1161 int __must_check pci_request_regions(struct pci_dev *, const char *);
1162 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1163 void pci_release_regions(struct pci_dev *);
1164 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1165 int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
1166 void pci_release_region(struct pci_dev *, int);
1167 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1168 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1169 void pci_release_selected_regions(struct pci_dev *, int);
1170
1171 /* drivers/pci/bus.c */
1172 struct pci_bus *pci_bus_get(struct pci_bus *bus);
1173 void pci_bus_put(struct pci_bus *bus);
1174 void pci_add_resource(struct list_head *resources, struct resource *res);
1175 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1176 resource_size_t offset);
1177 void pci_free_resource_list(struct list_head *resources);
1178 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1179 unsigned int flags);
1180 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1181 void pci_bus_remove_resources(struct pci_bus *bus);
1182 int devm_request_pci_bus_resources(struct device *dev,
1183 struct list_head *resources);
1184
1185 #define pci_bus_for_each_resource(bus, res, i) \
1186 for (i = 0; \
1187 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
1188 i++)
1189
1190 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1191 struct resource *res, resource_size_t size,
1192 resource_size_t align, resource_size_t min,
1193 unsigned long type_mask,
1194 resource_size_t (*alignf)(void *,
1195 const struct resource *,
1196 resource_size_t,
1197 resource_size_t),
1198 void *alignf_data);
1199
1200
1201 int pci_register_io_range(phys_addr_t addr, resource_size_t size);
1202 unsigned long pci_address_to_pio(phys_addr_t addr);
1203 phys_addr_t pci_pio_to_address(unsigned long pio);
1204 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1205 void pci_unmap_iospace(struct resource *res);
1206
1207 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1208 {
1209 struct pci_bus_region region;
1210
1211 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1212 return region.start;
1213 }
1214
1215 /* Proper probing supporting hot-pluggable devices */
1216 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1217 const char *mod_name);
1218
1219 /*
1220 * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
1221 */
1222 #define pci_register_driver(driver) \
1223 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1224
1225 void pci_unregister_driver(struct pci_driver *dev);
1226
1227 /**
1228 * module_pci_driver() - Helper macro for registering a PCI driver
1229 * @__pci_driver: pci_driver struct
1230 *
1231 * Helper macro for PCI drivers which do not do anything special in module
1232 * init/exit. This eliminates a lot of boilerplate. Each module may only
1233 * use this macro once, and calling it replaces module_init() and module_exit()
1234 */
1235 #define module_pci_driver(__pci_driver) \
1236 module_driver(__pci_driver, pci_register_driver, \
1237 pci_unregister_driver)
1238
1239 /**
1240 * builtin_pci_driver() - Helper macro for registering a PCI driver
1241 * @__pci_driver: pci_driver struct
1242 *
1243 * Helper macro for PCI drivers which do not do anything special in their
1244 * init code. This eliminates a lot of boilerplate. Each driver may only
1245 * use this macro once, and calling it replaces device_initcall(...)
1246 */
1247 #define builtin_pci_driver(__pci_driver) \
1248 builtin_driver(__pci_driver, pci_register_driver)
1249
1250 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1251 int pci_add_dynid(struct pci_driver *drv,
1252 unsigned int vendor, unsigned int device,
1253 unsigned int subvendor, unsigned int subdevice,
1254 unsigned int class, unsigned int class_mask,
1255 unsigned long driver_data);
1256 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1257 struct pci_dev *dev);
1258 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1259 int pass);
1260
1261 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1262 void *userdata);
1263 int pci_cfg_space_size(struct pci_dev *dev);
1264 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1265 void pci_setup_bridge(struct pci_bus *bus);
1266 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1267 unsigned long type);
1268 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
1269
1270 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1271 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1272
1273 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1274 unsigned int command_bits, u32 flags);
1275
1276 #define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
1277 #define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
1278 #define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
1279 #define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
1280 #define PCI_IRQ_ALL_TYPES \
1281 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1282
1283 /* kmem_cache style wrapper around pci_alloc_consistent() */
1284
1285 #include <linux/pci-dma.h>
1286 #include <linux/dmapool.h>
1287
1288 #define pci_pool dma_pool
1289 #define pci_pool_create(name, pdev, size, align, allocation) \
1290 dma_pool_create(name, &pdev->dev, size, align, allocation)
1291 #define pci_pool_destroy(pool) dma_pool_destroy(pool)
1292 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1293 #define pci_pool_zalloc(pool, flags, handle) \
1294 dma_pool_zalloc(pool, flags, handle)
1295 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1296
1297 struct msix_entry {
1298 u32 vector; /* kernel uses to write allocated vector */
1299 u16 entry; /* driver uses to specify entry, OS writes */
1300 };
1301
1302 #ifdef CONFIG_PCI_MSI
1303 int pci_msi_vec_count(struct pci_dev *dev);
1304 void pci_msi_shutdown(struct pci_dev *dev);
1305 void pci_disable_msi(struct pci_dev *dev);
1306 int pci_msix_vec_count(struct pci_dev *dev);
1307 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
1308 void pci_msix_shutdown(struct pci_dev *dev);
1309 void pci_disable_msix(struct pci_dev *dev);
1310 void pci_restore_msi_state(struct pci_dev *dev);
1311 int pci_msi_enabled(void);
1312 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
1313 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1314 {
1315 int rc = pci_enable_msi_range(dev, nvec, nvec);
1316 if (rc < 0)
1317 return rc;
1318 return 0;
1319 }
1320 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1321 int minvec, int maxvec);
1322 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1323 struct msix_entry *entries, int nvec)
1324 {
1325 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1326 if (rc < 0)
1327 return rc;
1328 return 0;
1329 }
1330 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1331 unsigned int max_vecs, unsigned int flags,
1332 const struct irq_affinity *affd);
1333
1334 void pci_free_irq_vectors(struct pci_dev *dev);
1335 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1336 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1337
1338 #else
1339 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1340 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
1341 static inline void pci_disable_msi(struct pci_dev *dev) { }
1342 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1343 static inline int pci_enable_msix(struct pci_dev *dev,
1344 struct msix_entry *entries, int nvec)
1345 { return -ENOSYS; }
1346 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
1347 static inline void pci_disable_msix(struct pci_dev *dev) { }
1348 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
1349 static inline int pci_msi_enabled(void) { return 0; }
1350 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
1351 int maxvec)
1352 { return -ENOSYS; }
1353 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1354 { return -ENOSYS; }
1355 static inline int pci_enable_msix_range(struct pci_dev *dev,
1356 struct msix_entry *entries, int minvec, int maxvec)
1357 { return -ENOSYS; }
1358 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1359 struct msix_entry *entries, int nvec)
1360 { return -ENOSYS; }
1361
1362 static inline int
1363 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1364 unsigned int max_vecs, unsigned int flags,
1365 const struct irq_affinity *aff_desc)
1366 {
1367 if (min_vecs > 1)
1368 return -EINVAL;
1369 return 1;
1370 }
1371
1372 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1373 {
1374 }
1375
1376 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1377 {
1378 if (WARN_ON_ONCE(nr > 0))
1379 return -EINVAL;
1380 return dev->irq;
1381 }
1382 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1383 int vec)
1384 {
1385 return cpu_possible_mask;
1386 }
1387 #endif
1388
1389 static inline int
1390 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1391 unsigned int max_vecs, unsigned int flags)
1392 {
1393 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
1394 NULL);
1395 }
1396
1397 #ifdef CONFIG_PCIEPORTBUS
1398 extern bool pcie_ports_disabled;
1399 extern bool pcie_ports_auto;
1400 #else
1401 #define pcie_ports_disabled true
1402 #define pcie_ports_auto false
1403 #endif
1404
1405 #ifdef CONFIG_PCIEASPM
1406 bool pcie_aspm_support_enabled(void);
1407 #else
1408 static inline bool pcie_aspm_support_enabled(void) { return false; }
1409 #endif
1410
1411 #ifdef CONFIG_PCIEAER
1412 void pci_no_aer(void);
1413 bool pci_aer_available(void);
1414 int pci_aer_init(struct pci_dev *dev);
1415 #else
1416 static inline void pci_no_aer(void) { }
1417 static inline bool pci_aer_available(void) { return false; }
1418 static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
1419 #endif
1420
1421 #ifdef CONFIG_PCIE_ECRC
1422 void pcie_set_ecrc_checking(struct pci_dev *dev);
1423 void pcie_ecrc_get_policy(char *str);
1424 #else
1425 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
1426 static inline void pcie_ecrc_get_policy(char *str) { }
1427 #endif
1428
1429 #define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1)
1430
1431 #ifdef CONFIG_HT_IRQ
1432 /* The functions a driver should call */
1433 int ht_create_irq(struct pci_dev *dev, int idx);
1434 void ht_destroy_irq(unsigned int irq);
1435 #endif /* CONFIG_HT_IRQ */
1436
1437 #ifdef CONFIG_PCI_ATS
1438 /* Address Translation Service */
1439 void pci_ats_init(struct pci_dev *dev);
1440 int pci_enable_ats(struct pci_dev *dev, int ps);
1441 void pci_disable_ats(struct pci_dev *dev);
1442 int pci_ats_queue_depth(struct pci_dev *dev);
1443 #else
1444 static inline void pci_ats_init(struct pci_dev *d) { }
1445 static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
1446 static inline void pci_disable_ats(struct pci_dev *d) { }
1447 static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
1448 #endif
1449
1450 #ifdef CONFIG_PCIE_PTM
1451 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1452 #else
1453 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1454 { return -EINVAL; }
1455 #endif
1456
1457 void pci_cfg_access_lock(struct pci_dev *dev);
1458 bool pci_cfg_access_trylock(struct pci_dev *dev);
1459 void pci_cfg_access_unlock(struct pci_dev *dev);
1460
1461 /*
1462 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1463 * a PCI domain is defined to be a set of PCI buses which share
1464 * configuration space.
1465 */
1466 #ifdef CONFIG_PCI_DOMAINS
1467 extern int pci_domains_supported;
1468 int pci_get_new_domain_nr(void);
1469 #else
1470 enum { pci_domains_supported = 0 };
1471 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1472 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1473 static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1474 #endif /* CONFIG_PCI_DOMAINS */
1475
1476 /*
1477 * Generic implementation for PCI domain support. If your
1478 * architecture does not need custom management of PCI
1479 * domains then this implementation will be used
1480 */
1481 #ifdef CONFIG_PCI_DOMAINS_GENERIC
1482 static inline int pci_domain_nr(struct pci_bus *bus)
1483 {
1484 return bus->domain_nr;
1485 }
1486 #ifdef CONFIG_ACPI
1487 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1488 #else
1489 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1490 { return 0; }
1491 #endif
1492 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1493 #endif
1494
1495 /* some architectures require additional setup to direct VGA traffic */
1496 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1497 unsigned int command_bits, u32 flags);
1498 void pci_register_set_vga_state(arch_set_vga_state_t func);
1499
1500 static inline int
1501 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1502 {
1503 return pci_request_selected_regions(pdev,
1504 pci_select_bars(pdev, IORESOURCE_IO), name);
1505 }
1506
1507 static inline void
1508 pci_release_io_regions(struct pci_dev *pdev)
1509 {
1510 return pci_release_selected_regions(pdev,
1511 pci_select_bars(pdev, IORESOURCE_IO));
1512 }
1513
1514 static inline int
1515 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1516 {
1517 return pci_request_selected_regions(pdev,
1518 pci_select_bars(pdev, IORESOURCE_MEM), name);
1519 }
1520
1521 static inline void
1522 pci_release_mem_regions(struct pci_dev *pdev)
1523 {
1524 return pci_release_selected_regions(pdev,
1525 pci_select_bars(pdev, IORESOURCE_MEM));
1526 }
1527
1528 #else /* CONFIG_PCI is not enabled */
1529
1530 static inline void pci_set_flags(int flags) { }
1531 static inline void pci_add_flags(int flags) { }
1532 static inline void pci_clear_flags(int flags) { }
1533 static inline int pci_has_flag(int flag) { return 0; }
1534
1535 /*
1536 * If the system does not have PCI, clearly these return errors. Define
1537 * these as simple inline functions to avoid hair in drivers.
1538 */
1539
1540 #define _PCI_NOP(o, s, t) \
1541 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1542 int where, t val) \
1543 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1544
1545 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1546 _PCI_NOP(o, word, u16 x) \
1547 _PCI_NOP(o, dword, u32 x)
1548 _PCI_NOP_ALL(read, *)
1549 _PCI_NOP_ALL(write,)
1550
1551 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1552 unsigned int device,
1553 struct pci_dev *from)
1554 { return NULL; }
1555
1556 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1557 unsigned int device,
1558 unsigned int ss_vendor,
1559 unsigned int ss_device,
1560 struct pci_dev *from)
1561 { return NULL; }
1562
1563 static inline struct pci_dev *pci_get_class(unsigned int class,
1564 struct pci_dev *from)
1565 { return NULL; }
1566
1567 #define pci_dev_present(ids) (0)
1568 #define no_pci_devices() (1)
1569 #define pci_dev_put(dev) do { } while (0)
1570
1571 static inline void pci_set_master(struct pci_dev *dev) { }
1572 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
1573 static inline void pci_disable_device(struct pci_dev *dev) { }
1574 static inline int pci_assign_resource(struct pci_dev *dev, int i)
1575 { return -EBUSY; }
1576 static inline int __pci_register_driver(struct pci_driver *drv,
1577 struct module *owner)
1578 { return 0; }
1579 static inline int pci_register_driver(struct pci_driver *drv)
1580 { return 0; }
1581 static inline void pci_unregister_driver(struct pci_driver *drv) { }
1582 static inline int pci_find_capability(struct pci_dev *dev, int cap)
1583 { return 0; }
1584 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1585 int cap)
1586 { return 0; }
1587 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1588 { return 0; }
1589
1590 /* Power management related routines */
1591 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
1592 static inline void pci_restore_state(struct pci_dev *dev) { }
1593 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1594 { return 0; }
1595 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1596 { return 0; }
1597 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
1598 pm_message_t state)
1599 { return PCI_D0; }
1600 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1601 int enable)
1602 { return 0; }
1603
1604 static inline struct resource *pci_find_resource(struct pci_dev *dev,
1605 struct resource *res)
1606 { return NULL; }
1607 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1608 { return -EIO; }
1609 static inline void pci_release_regions(struct pci_dev *dev) { }
1610
1611 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
1612
1613 static inline void pci_block_cfg_access(struct pci_dev *dev) { }
1614 static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
1615 { return 0; }
1616 static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
1617
1618 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
1619 { return NULL; }
1620 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1621 unsigned int devfn)
1622 { return NULL; }
1623 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1624 unsigned int devfn)
1625 { return NULL; }
1626
1627 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1628 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1629 static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1630
1631 #define dev_is_pci(d) (false)
1632 #define dev_is_pf(d) (false)
1633 #define dev_num_vf(d) (0)
1634 #endif /* CONFIG_PCI */
1635
1636 /* Include architecture-dependent settings and functions */
1637
1638 #include <asm/pci.h>
1639
1640 #ifndef pci_root_bus_fwnode
1641 #define pci_root_bus_fwnode(bus) NULL
1642 #endif
1643
1644 /* these helpers provide future and backwards compatibility
1645 * for accessing popular PCI BAR info */
1646 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1647 #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1648 #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
1649 #define pci_resource_len(dev,bar) \
1650 ((pci_resource_start((dev), (bar)) == 0 && \
1651 pci_resource_end((dev), (bar)) == \
1652 pci_resource_start((dev), (bar))) ? 0 : \
1653 \
1654 (pci_resource_end((dev), (bar)) - \
1655 pci_resource_start((dev), (bar)) + 1))
1656
1657 /* Similar to the helpers above, these manipulate per-pci_dev
1658 * driver-specific data. They are really just a wrapper around
1659 * the generic device structure functions of these calls.
1660 */
1661 static inline void *pci_get_drvdata(struct pci_dev *pdev)
1662 {
1663 return dev_get_drvdata(&pdev->dev);
1664 }
1665
1666 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1667 {
1668 dev_set_drvdata(&pdev->dev, data);
1669 }
1670
1671 /* If you want to know what to call your pci_dev, ask this function.
1672 * Again, it's a wrapper around the generic device.
1673 */
1674 static inline const char *pci_name(const struct pci_dev *pdev)
1675 {
1676 return dev_name(&pdev->dev);
1677 }
1678
1679
1680 /* Some archs don't want to expose struct resource to userland as-is
1681 * in sysfs and /proc
1682 */
1683 #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
1684 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1685 const struct resource *rsrc,
1686 resource_size_t *start, resource_size_t *end);
1687 #else
1688 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
1689 const struct resource *rsrc, resource_size_t *start,
1690 resource_size_t *end)
1691 {
1692 *start = rsrc->start;
1693 *end = rsrc->end;
1694 }
1695 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
1696
1697
1698 /*
1699 * The world is not perfect and supplies us with broken PCI devices.
1700 * For at least a part of these bugs we need a work-around, so both
1701 * generic (drivers/pci/quirks.c) and per-architecture code can define
1702 * fixup hooks to be called for particular buggy devices.
1703 */
1704
1705 struct pci_fixup {
1706 u16 vendor; /* You can use PCI_ANY_ID here of course */
1707 u16 device; /* You can use PCI_ANY_ID here of course */
1708 u32 class; /* You can use PCI_ANY_ID here too */
1709 unsigned int class_shift; /* should be 0, 8, 16 */
1710 void (*hook)(struct pci_dev *dev);
1711 };
1712
1713 enum pci_fixup_pass {
1714 pci_fixup_early, /* Before probing BARs */
1715 pci_fixup_header, /* After reading configuration header */
1716 pci_fixup_final, /* Final phase of device fixups */
1717 pci_fixup_enable, /* pci_enable_device() time */
1718 pci_fixup_resume, /* pci_device_resume() */
1719 pci_fixup_suspend, /* pci_device_suspend() */
1720 pci_fixup_resume_early, /* pci_device_resume_early() */
1721 pci_fixup_suspend_late, /* pci_device_suspend_late() */
1722 };
1723
1724 /* Anonymous variables would be nice... */
1725 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1726 class_shift, hook) \
1727 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1728 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1729 = { vendor, device, class, class_shift, hook };
1730
1731 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1732 class_shift, hook) \
1733 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1734 hook, vendor, device, class, class_shift, hook)
1735 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1736 class_shift, hook) \
1737 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1738 hook, vendor, device, class, class_shift, hook)
1739 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1740 class_shift, hook) \
1741 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1742 hook, vendor, device, class, class_shift, hook)
1743 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1744 class_shift, hook) \
1745 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1746 hook, vendor, device, class, class_shift, hook)
1747 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1748 class_shift, hook) \
1749 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1750 resume##hook, vendor, device, class, \
1751 class_shift, hook)
1752 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1753 class_shift, hook) \
1754 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1755 resume_early##hook, vendor, device, \
1756 class, class_shift, hook)
1757 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1758 class_shift, hook) \
1759 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1760 suspend##hook, vendor, device, class, \
1761 class_shift, hook)
1762 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1763 class_shift, hook) \
1764 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1765 suspend_late##hook, vendor, device, \
1766 class, class_shift, hook)
1767
1768 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1769 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1770 hook, vendor, device, PCI_ANY_ID, 0, hook)
1771 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1772 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1773 hook, vendor, device, PCI_ANY_ID, 0, hook)
1774 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1775 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1776 hook, vendor, device, PCI_ANY_ID, 0, hook)
1777 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1778 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1779 hook, vendor, device, PCI_ANY_ID, 0, hook)
1780 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1781 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1782 resume##hook, vendor, device, \
1783 PCI_ANY_ID, 0, hook)
1784 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1785 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1786 resume_early##hook, vendor, device, \
1787 PCI_ANY_ID, 0, hook)
1788 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1789 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1790 suspend##hook, vendor, device, \
1791 PCI_ANY_ID, 0, hook)
1792 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1793 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1794 suspend_late##hook, vendor, device, \
1795 PCI_ANY_ID, 0, hook)
1796
1797 #ifdef CONFIG_PCI_QUIRKS
1798 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1799 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
1800 int pci_dev_specific_enable_acs(struct pci_dev *dev);
1801 #else
1802 static inline void pci_fixup_device(enum pci_fixup_pass pass,
1803 struct pci_dev *dev) { }
1804 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
1805 u16 acs_flags)
1806 {
1807 return -ENOTTY;
1808 }
1809 static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
1810 {
1811 return -ENOTTY;
1812 }
1813 #endif
1814
1815 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
1816 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
1817 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
1818 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
1819 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
1820 const char *name);
1821 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
1822
1823 extern int pci_pci_problems;
1824 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
1825 #define PCIPCI_TRITON 2
1826 #define PCIPCI_NATOMA 4
1827 #define PCIPCI_VIAETBF 8
1828 #define PCIPCI_VSFX 16
1829 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
1830 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
1831
1832 extern unsigned long pci_cardbus_io_size;
1833 extern unsigned long pci_cardbus_mem_size;
1834 extern u8 pci_dfl_cache_line_size;
1835 extern u8 pci_cache_line_size;
1836
1837 extern unsigned long pci_hotplug_io_size;
1838 extern unsigned long pci_hotplug_mem_size;
1839 extern unsigned long pci_hotplug_bus_size;
1840
1841 /* Architecture-specific versions may override these (weak) */
1842 void pcibios_disable_device(struct pci_dev *dev);
1843 void pcibios_set_master(struct pci_dev *dev);
1844 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
1845 enum pcie_reset_state state);
1846 int pcibios_add_device(struct pci_dev *dev);
1847 void pcibios_release_device(struct pci_dev *dev);
1848 void pcibios_penalize_isa_irq(int irq, int active);
1849 int pcibios_alloc_irq(struct pci_dev *dev);
1850 void pcibios_free_irq(struct pci_dev *dev);
1851
1852 #ifdef CONFIG_HIBERNATE_CALLBACKS
1853 extern struct dev_pm_ops pcibios_pm_ops;
1854 #endif
1855
1856 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
1857 void __init pci_mmcfg_early_init(void);
1858 void __init pci_mmcfg_late_init(void);
1859 #else
1860 static inline void pci_mmcfg_early_init(void) { }
1861 static inline void pci_mmcfg_late_init(void) { }
1862 #endif
1863
1864 int pci_ext_cfg_avail(void);
1865
1866 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1867 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
1868
1869 #ifdef CONFIG_PCI_IOV
1870 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
1871 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
1872
1873 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
1874 void pci_disable_sriov(struct pci_dev *dev);
1875 int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
1876 void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
1877 int pci_num_vf(struct pci_dev *dev);
1878 int pci_vfs_assigned(struct pci_dev *dev);
1879 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1880 int pci_sriov_get_totalvfs(struct pci_dev *dev);
1881 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
1882 #else
1883 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
1884 {
1885 return -ENOSYS;
1886 }
1887 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
1888 {
1889 return -ENOSYS;
1890 }
1891 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1892 { return -ENODEV; }
1893 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
1894 {
1895 return -ENOSYS;
1896 }
1897 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
1898 int id, int reset) { }
1899 static inline void pci_disable_sriov(struct pci_dev *dev) { }
1900 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
1901 static inline int pci_vfs_assigned(struct pci_dev *dev)
1902 { return 0; }
1903 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
1904 { return 0; }
1905 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
1906 { return 0; }
1907 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
1908 { return 0; }
1909 #endif
1910
1911 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
1912 void pci_hp_create_module_link(struct pci_slot *pci_slot);
1913 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
1914 #endif
1915
1916 /**
1917 * pci_pcie_cap - get the saved PCIe capability offset
1918 * @dev: PCI device
1919 *
1920 * PCIe capability offset is calculated at PCI device initialization
1921 * time and saved in the data structure. This function returns saved
1922 * PCIe capability offset. Using this instead of pci_find_capability()
1923 * reduces unnecessary search in the PCI configuration space. If you
1924 * need to calculate PCIe capability offset from raw device for some
1925 * reasons, please use pci_find_capability() instead.
1926 */
1927 static inline int pci_pcie_cap(struct pci_dev *dev)
1928 {
1929 return dev->pcie_cap;
1930 }
1931
1932 /**
1933 * pci_is_pcie - check if the PCI device is PCI Express capable
1934 * @dev: PCI device
1935 *
1936 * Returns: true if the PCI device is PCI Express capable, false otherwise.
1937 */
1938 static inline bool pci_is_pcie(struct pci_dev *dev)
1939 {
1940 return pci_pcie_cap(dev);
1941 }
1942
1943 /**
1944 * pcie_caps_reg - get the PCIe Capabilities Register
1945 * @dev: PCI device
1946 */
1947 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
1948 {
1949 return dev->pcie_flags_reg;
1950 }
1951
1952 /**
1953 * pci_pcie_type - get the PCIe device/port type
1954 * @dev: PCI device
1955 */
1956 static inline int pci_pcie_type(const struct pci_dev *dev)
1957 {
1958 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
1959 }
1960
1961 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
1962 {
1963 while (1) {
1964 if (!pci_is_pcie(dev))
1965 break;
1966 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1967 return dev;
1968 if (!dev->bus->self)
1969 break;
1970 dev = dev->bus->self;
1971 }
1972 return NULL;
1973 }
1974
1975 void pci_request_acs(void);
1976 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
1977 bool pci_acs_path_enabled(struct pci_dev *start,
1978 struct pci_dev *end, u16 acs_flags);
1979
1980 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
1981 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
1982
1983 /* Large Resource Data Type Tag Item Names */
1984 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
1985 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
1986 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
1987
1988 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
1989 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
1990 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
1991
1992 /* Small Resource Data Type Tag Item Names */
1993 #define PCI_VPD_STIN_END 0x0f /* End */
1994
1995 #define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
1996
1997 #define PCI_VPD_SRDT_TIN_MASK 0x78
1998 #define PCI_VPD_SRDT_LEN_MASK 0x07
1999 #define PCI_VPD_LRDT_TIN_MASK 0x7f
2000
2001 #define PCI_VPD_LRDT_TAG_SIZE 3
2002 #define PCI_VPD_SRDT_TAG_SIZE 1
2003
2004 #define PCI_VPD_INFO_FLD_HDR_SIZE 3
2005
2006 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2007 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2008 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2009 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2010
2011 /**
2012 * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
2013 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2014 *
2015 * Returns the extracted Large Resource Data Type length.
2016 */
2017 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
2018 {
2019 return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
2020 }
2021
2022 /**
2023 * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
2024 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2025 *
2026 * Returns the extracted Large Resource Data Type Tag item.
2027 */
2028 static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
2029 {
2030 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
2031 }
2032
2033 /**
2034 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
2035 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
2036 *
2037 * Returns the extracted Small Resource Data Type length.
2038 */
2039 static inline u8 pci_vpd_srdt_size(const u8 *srdt)
2040 {
2041 return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
2042 }
2043
2044 /**
2045 * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
2046 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
2047 *
2048 * Returns the extracted Small Resource Data Type Tag Item.
2049 */
2050 static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
2051 {
2052 return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
2053 }
2054
2055 /**
2056 * pci_vpd_info_field_size - Extracts the information field length
2057 * @lrdt: Pointer to the beginning of an information field header
2058 *
2059 * Returns the extracted information field length.
2060 */
2061 static inline u8 pci_vpd_info_field_size(const u8 *info_field)
2062 {
2063 return info_field[2];
2064 }
2065
2066 /**
2067 * pci_vpd_find_tag - Locates the Resource Data Type tag provided
2068 * @buf: Pointer to buffered vpd data
2069 * @off: The offset into the buffer at which to begin the search
2070 * @len: The length of the vpd buffer
2071 * @rdt: The Resource Data Type to search for
2072 *
2073 * Returns the index where the Resource Data Type was found or
2074 * -ENOENT otherwise.
2075 */
2076 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
2077
2078 /**
2079 * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
2080 * @buf: Pointer to buffered vpd data
2081 * @off: The offset into the buffer at which to begin the search
2082 * @len: The length of the buffer area, relative to off, in which to search
2083 * @kw: The keyword to search for
2084 *
2085 * Returns the index where the information field keyword was found or
2086 * -ENOENT otherwise.
2087 */
2088 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
2089 unsigned int len, const char *kw);
2090
2091 /* PCI <-> OF binding helpers */
2092 #ifdef CONFIG_OF
2093 struct device_node;
2094 struct irq_domain;
2095 void pci_set_of_node(struct pci_dev *dev);
2096 void pci_release_of_node(struct pci_dev *dev);
2097 void pci_set_bus_of_node(struct pci_bus *bus);
2098 void pci_release_bus_of_node(struct pci_bus *bus);
2099 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2100
2101 /* Arch may override this (weak) */
2102 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2103
2104 static inline struct device_node *
2105 pci_device_to_OF_node(const struct pci_dev *pdev)
2106 {
2107 return pdev ? pdev->dev.of_node : NULL;
2108 }
2109
2110 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2111 {
2112 return bus ? bus->dev.of_node : NULL;
2113 }
2114
2115 #else /* CONFIG_OF */
2116 static inline void pci_set_of_node(struct pci_dev *dev) { }
2117 static inline void pci_release_of_node(struct pci_dev *dev) { }
2118 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
2119 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
2120 static inline struct device_node *
2121 pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
2122 static inline struct irq_domain *
2123 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2124 #endif /* CONFIG_OF */
2125
2126 #ifdef CONFIG_ACPI
2127 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2128
2129 void
2130 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2131 #else
2132 static inline struct irq_domain *
2133 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
2134 #endif
2135
2136 #ifdef CONFIG_EEH
2137 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2138 {
2139 return pdev->dev.archdata.edev;
2140 }
2141 #endif
2142
2143 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
2144 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2145 int pci_for_each_dma_alias(struct pci_dev *pdev,
2146 int (*fn)(struct pci_dev *pdev,
2147 u16 alias, void *data), void *data);
2148
2149 /* helper functions for operation of device flag */
2150 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2151 {
2152 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2153 }
2154 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2155 {
2156 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2157 }
2158 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2159 {
2160 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2161 }
2162
2163 /**
2164 * pci_ari_enabled - query ARI forwarding status
2165 * @bus: the PCI bus
2166 *
2167 * Returns true if ARI forwarding is enabled.
2168 */
2169 static inline bool pci_ari_enabled(struct pci_bus *bus)
2170 {
2171 return bus->self && bus->self->ari_enabled;
2172 }
2173
2174 /* provide the legacy pci_dma_* API */
2175 #include <linux/pci-dma-compat.h>
2176
2177 #endif /* LINUX_PCI_H */ 1 /*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/bug.h>
22 #include <linux/cache.h>
23 #include <linux/rbtree.h>
24 #include <linux/socket.h>
25
26 #include <linux/atomic.h>
27 #include <asm/types.h>
28 #include <linux/spinlock.h>
29 #include <linux/net.h>
30 #include <linux/textsearch.h>
31 #include <net/checksum.h>
32 #include <linux/rcupdate.h>
33 #include <linux/hrtimer.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/netdev_features.h>
36 #include <linux/sched.h>
37 #include <net/flow_dissector.h>
38 #include <linux/splice.h>
39 #include <linux/in6.h>
40 #include <linux/if_packet.h>
41 #include <net/flow.h>
42
43 /* The interface for checksum offload between the stack and networking drivers
44 * is as follows...
45 *
46 * A. IP checksum related features
47 *
48 * Drivers advertise checksum offload capabilities in the features of a device.
49 * From the stack's point of view these are capabilities offered by the driver,
50 * a driver typically only advertises features that it is capable of offloading
51 * to its device.
52 *
53 * The checksum related features are:
54 *
55 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
56 * IP (one's complement) checksum for any combination
57 * of protocols or protocol layering. The checksum is
58 * computed and set in a packet per the CHECKSUM_PARTIAL
59 * interface (see below).
60 *
61 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
62 * TCP or UDP packets over IPv4. These are specifically
63 * unencapsulated packets of the form IPv4|TCP or
64 * IPv4|UDP where the Protocol field in the IPv4 header
65 * is TCP or UDP. The IPv4 header may contain IP options
66 * This feature cannot be set in features for a device
67 * with NETIF_F_HW_CSUM also set. This feature is being
68 * DEPRECATED (see below).
69 *
70 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
71 * TCP or UDP packets over IPv6. These are specifically
72 * unencapsulated packets of the form IPv6|TCP or
73 * IPv4|UDP where the Next Header field in the IPv6
74 * header is either TCP or UDP. IPv6 extension headers
75 * are not supported with this feature. This feature
76 * cannot be set in features for a device with
77 * NETIF_F_HW_CSUM also set. This feature is being
78 * DEPRECATED (see below).
79 *
80 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
81 * This flag is used only used to disable the RX checksum
82 * feature for a device. The stack will accept receive
83 * checksum indication in packets received on a device
84 * regardless of whether NETIF_F_RXCSUM is set.
85 *
86 * B. Checksumming of received packets by device. Indication of checksum
87 * verification is in set skb->ip_summed. Possible values are:
88 *
89 * CHECKSUM_NONE:
90 *
91 * Device did not checksum this packet e.g. due to lack of capabilities.
92 * The packet contains full (though not verified) checksum in packet but
93 * not in skb->csum. Thus, skb->csum is undefined in this case.
94 *
95 * CHECKSUM_UNNECESSARY:
96 *
97 * The hardware you're dealing with doesn't calculate the full checksum
98 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
99 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
100 * if their checksums are okay. skb->csum is still undefined in this case
101 * though. A driver or device must never modify the checksum field in the
102 * packet even if checksum is verified.
103 *
104 * CHECKSUM_UNNECESSARY is applicable to following protocols:
105 * TCP: IPv6 and IPv4.
106 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
107 * zero UDP checksum for either IPv4 or IPv6, the networking stack
108 * may perform further validation in this case.
109 * GRE: only if the checksum is present in the header.
110 * SCTP: indicates the CRC in SCTP header has been validated.
111 *
112 * skb->csum_level indicates the number of consecutive checksums found in
113 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
114 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
115 * and a device is able to verify the checksums for UDP (possibly zero),
116 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
117 * two. If the device were only able to verify the UDP checksum and not
118 * GRE, either because it doesn't support GRE checksum of because GRE
119 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
120 * not considered in this case).
121 *
122 * CHECKSUM_COMPLETE:
123 *
124 * This is the most generic way. The device supplied checksum of the _whole_
125 * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
126 * hardware doesn't need to parse L3/L4 headers to implement this.
127 *
128 * Note: Even if device supports only some protocols, but is able to produce
129 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
130 *
131 * CHECKSUM_PARTIAL:
132 *
133 * A checksum is set up to be offloaded to a device as described in the
134 * output description for CHECKSUM_PARTIAL. This may occur on a packet
135 * received directly from another Linux OS, e.g., a virtualized Linux kernel
136 * on the same host, or it may be set in the input path in GRO or remote
137 * checksum offload. For the purposes of checksum verification, the checksum
138 * referred to by skb->csum_start + skb->csum_offset and any preceding
139 * checksums in the packet are considered verified. Any checksums in the
140 * packet that are after the checksum being offloaded are not considered to
141 * be verified.
142 *
143 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
144 * in the skb->ip_summed for a packet. Values are:
145 *
146 * CHECKSUM_PARTIAL:
147 *
148 * The driver is required to checksum the packet as seen by hard_start_xmit()
149 * from skb->csum_start up to the end, and to record/write the checksum at
150 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
151 * csum_start and csum_offset values are valid values given the length and
152 * offset of the packet, however they should not attempt to validate that the
153 * checksum refers to a legitimate transport layer checksum-- it is the
154 * purview of the stack to validate that csum_start and csum_offset are set
155 * correctly.
156 *
157 * When the stack requests checksum offload for a packet, the driver MUST
158 * ensure that the checksum is set correctly. A driver can either offload the
159 * checksum calculation to the device, or call skb_checksum_help (in the case
160 * that the device does not support offload for a particular checksum).
161 *
162 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
163 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
164 * checksum offload capability. If a device has limited checksum capabilities
165 * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
166 * described above) a helper function can be called to resolve
167 * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
168 * function takes a spec argument that describes the protocol layer that is
169 * supported for checksum offload and can be called for each packet. If a
170 * packet does not match the specification for offload, skb_checksum_help
171 * is called to resolve the checksum.
172 *
173 * CHECKSUM_NONE:
174 *
175 * The skb was already checksummed by the protocol, or a checksum is not
176 * required.
177 *
178 * CHECKSUM_UNNECESSARY:
179 *
180 * This has the same meaning on as CHECKSUM_NONE for checksum offload on
181 * output.
182 *
183 * CHECKSUM_COMPLETE:
184 * Not used in checksum output. If a driver observes a packet with this value
185 * set in skbuff, if should treat as CHECKSUM_NONE being set.
186 *
187 * D. Non-IP checksum (CRC) offloads
188 *
189 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
190 * offloading the SCTP CRC in a packet. To perform this offload the stack
191 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
192 * accordingly. Note the there is no indication in the skbuff that the
193 * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
194 * both IP checksum offload and SCTP CRC offload must verify which offload
195 * is configured for a packet presumably by inspecting packet headers.
196 *
197 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
198 * offloading the FCOE CRC in a packet. To perform this offload the stack
199 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
200 * accordingly. Note the there is no indication in the skbuff that the
201 * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
202 * both IP checksum offload and FCOE CRC offload must verify which offload
203 * is configured for a packet presumably by inspecting packet headers.
204 *
205 * E. Checksumming on output with GSO.
206 *
207 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
208 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
209 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
210 * part of the GSO operation is implied. If a checksum is being offloaded
211 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
212 * are set to refer to the outermost checksum being offload (two offloaded
213 * checksums are possible with UDP encapsulation).
214 */
215
216 /* Don't change this without changing skb_csum_unnecessary! */
217 #define CHECKSUM_NONE 0
218 #define CHECKSUM_UNNECESSARY 1
219 #define CHECKSUM_COMPLETE 2
220 #define CHECKSUM_PARTIAL 3
221
222 /* Maximum value in skb->csum_level */
223 #define SKB_MAX_CSUM_LEVEL 3
224
225 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
226 #define SKB_WITH_OVERHEAD(X) \
227 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
228 #define SKB_MAX_ORDER(X, ORDER) \
229 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
230 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
231 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
232
233 /* return minimum truesize of one skb containing X bytes of data */
234 #define SKB_TRUESIZE(X) ((X) + \
235 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
236 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
237
238 struct net_device;
239 struct scatterlist;
240 struct pipe_inode_info;
241 struct iov_iter;
242 struct napi_struct;
243
244 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
245 struct nf_conntrack {
246 atomic_t use;
247 };
248 #endif
249
250 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
251 struct nf_bridge_info {
252 atomic_t use;
253 enum {
254 BRNF_PROTO_UNCHANGED,
255 BRNF_PROTO_8021Q,
256 BRNF_PROTO_PPPOE
257 } orig_proto:8;
258 u8 pkt_otherhost:1;
259 u8 in_prerouting:1;
260 u8 bridged_dnat:1;
261 __u16 frag_max_size;
262 struct net_device *physindev;
263
264 /* always valid & non-NULL from FORWARD on, for physdev match */
265 struct net_device *physoutdev;
266 union {
267 /* prerouting: detect dnat in orig/reply direction */
268 __be32 ipv4_daddr;
269 struct in6_addr ipv6_daddr;
270
271 /* after prerouting + nat detected: store original source
272 * mac since neigh resolution overwrites it, only used while
273 * skb is out in neigh layer.
274 */
275 char neigh_header[8];
276 };
277 };
278 #endif
279
280 struct sk_buff_head {
281 /* These two members must be first. */
282 struct sk_buff *next;
283 struct sk_buff *prev;
284
285 __u32 qlen;
286 spinlock_t lock;
287 };
288
289 struct sk_buff;
290
291 /* To allow 64K frame to be packed as single skb without frag_list we
292 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
293 * buffers which do not start on a page boundary.
294 *
295 * Since GRO uses frags we allocate at least 16 regardless of page
296 * size.
297 */
298 #if (65536/PAGE_SIZE + 1) < 16
299 #define MAX_SKB_FRAGS 16UL
300 #else
301 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
302 #endif
303 extern int sysctl_max_skb_frags;
304
305 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
306 * segment using its current segmentation instead.
307 */
308 #define GSO_BY_FRAGS 0xFFFF
309
310 typedef struct skb_frag_struct skb_frag_t;
311
312 struct skb_frag_struct {
313 struct {
314 struct page *p;
315 } page;
316 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
317 __u32 page_offset;
318 __u32 size;
319 #else
320 __u16 page_offset;
321 __u16 size;
322 #endif
323 };
324
325 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
326 {
327 return frag->size;
328 }
329
330 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
331 {
332 frag->size = size;
333 }
334
335 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
336 {
337 frag->size += delta;
338 }
339
340 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
341 {
342 frag->size -= delta;
343 }
344
345 #define HAVE_HW_TIME_STAMP
346
347 /**
348 * struct skb_shared_hwtstamps - hardware time stamps
349 * @hwtstamp: hardware time stamp transformed into duration
350 * since arbitrary point in time
351 *
352 * Software time stamps generated by ktime_get_real() are stored in
353 * skb->tstamp.
354 *
355 * hwtstamps can only be compared against other hwtstamps from
356 * the same device.
357 *
358 * This structure is attached to packets as part of the
359 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
360 */
361 struct skb_shared_hwtstamps {
362 ktime_t hwtstamp;
363 };
364
365 /* Definitions for tx_flags in struct skb_shared_info */
366 enum {
367 /* generate hardware time stamp */
368 SKBTX_HW_TSTAMP = 1 << 0,
369
370 /* generate software time stamp when queueing packet to NIC */
371 SKBTX_SW_TSTAMP = 1 << 1,
372
373 /* device driver is going to provide hardware time stamp */
374 SKBTX_IN_PROGRESS = 1 << 2,
375
376 /* device driver supports TX zero-copy buffers */
377 SKBTX_DEV_ZEROCOPY = 1 << 3,
378
379 /* generate wifi status information (where possible) */
380 SKBTX_WIFI_STATUS = 1 << 4,
381
382 /* This indicates at least one fragment might be overwritten
383 * (as in vmsplice(), sendfile() ...)
384 * If we need to compute a TX checksum, we'll need to copy
385 * all frags to avoid possible bad checksum
386 */
387 SKBTX_SHARED_FRAG = 1 << 5,
388
389 /* generate software time stamp when entering packet scheduling */
390 SKBTX_SCHED_TSTAMP = 1 << 6,
391 };
392
393 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
394 SKBTX_SCHED_TSTAMP)
395 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
396
397 /*
398 * The callback notifies userspace to release buffers when skb DMA is done in
399 * lower device, the skb last reference should be 0 when calling this.
400 * The zerocopy_success argument is true if zero copy transmit occurred,
401 * false on data copy or out of memory error caused by data copy attempt.
402 * The ctx field is used to track device context.
403 * The desc field is used to track userspace buffer index.
404 */
405 struct ubuf_info {
406 void (*callback)(struct ubuf_info *, bool zerocopy_success);
407 void *ctx;
408 unsigned long desc;
409 };
410
411 /* This data is invariant across clones and lives at
412 * the end of the header data, ie. at skb->end.
413 */
414 struct skb_shared_info {
415 unsigned char nr_frags;
416 __u8 tx_flags;
417 unsigned short gso_size;
418 /* Warning: this field is not always filled in (UFO)! */
419 unsigned short gso_segs;
420 unsigned short gso_type;
421 struct sk_buff *frag_list;
422 struct skb_shared_hwtstamps hwtstamps;
423 u32 tskey;
424 __be32 ip6_frag_id;
425
426 /*
427 * Warning : all fields before dataref are cleared in __alloc_skb()
428 */
429 atomic_t dataref;
430
431 /* Intermediate layers must ensure that destructor_arg
432 * remains valid until skb destructor */
433 void * destructor_arg;
434
435 /* must be last field, see pskb_expand_head() */
436 skb_frag_t frags[MAX_SKB_FRAGS];
437 };
438
439 /* We divide dataref into two halves. The higher 16 bits hold references
440 * to the payload part of skb->data. The lower 16 bits hold references to
441 * the entire skb->data. A clone of a headerless skb holds the length of
442 * the header in skb->hdr_len.
443 *
444 * All users must obey the rule that the skb->data reference count must be
445 * greater than or equal to the payload reference count.
446 *
447 * Holding a reference to the payload part means that the user does not
448 * care about modifications to the header part of skb->data.
449 */
450 #define SKB_DATAREF_SHIFT 16
451 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
452
453
454 enum {
455 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
456 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
457 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
458 };
459
460 enum {
461 SKB_GSO_TCPV4 = 1 << 0,
462 SKB_GSO_UDP = 1 << 1,
463
464 /* This indicates the skb is from an untrusted source. */
465 SKB_GSO_DODGY = 1 << 2,
466
467 /* This indicates the tcp segment has CWR set. */
468 SKB_GSO_TCP_ECN = 1 << 3,
469
470 SKB_GSO_TCP_FIXEDID = 1 << 4,
471
472 SKB_GSO_TCPV6 = 1 << 5,
473
474 SKB_GSO_FCOE = 1 << 6,
475
476 SKB_GSO_GRE = 1 << 7,
477
478 SKB_GSO_GRE_CSUM = 1 << 8,
479
480 SKB_GSO_IPXIP4 = 1 << 9,
481
482 SKB_GSO_IPXIP6 = 1 << 10,
483
484 SKB_GSO_UDP_TUNNEL = 1 << 11,
485
486 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
487
488 SKB_GSO_PARTIAL = 1 << 13,
489
490 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
491
492 SKB_GSO_SCTP = 1 << 15,
493 };
494
495 #if BITS_PER_LONG > 32
496 #define NET_SKBUFF_DATA_USES_OFFSET 1
497 #endif
498
499 #ifdef NET_SKBUFF_DATA_USES_OFFSET
500 typedef unsigned int sk_buff_data_t;
501 #else
502 typedef unsigned char *sk_buff_data_t;
503 #endif
504
505 /**
506 * struct skb_mstamp - multi resolution time stamps
507 * @stamp_us: timestamp in us resolution
508 * @stamp_jiffies: timestamp in jiffies
509 */
510 struct skb_mstamp {
511 union {
512 u64 v64;
513 struct {
514 u32 stamp_us;
515 u32 stamp_jiffies;
516 };
517 };
518 };
519
520 /**
521 * skb_mstamp_get - get current timestamp
522 * @cl: place to store timestamps
523 */
524 static inline void skb_mstamp_get(struct skb_mstamp *cl)
525 {
526 u64 val = local_clock();
527
528 do_div(val, NSEC_PER_USEC);
529 cl->stamp_us = (u32)val;
530 cl->stamp_jiffies = (u32)jiffies;
531 }
532
533 /**
534 * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
535 * @t1: pointer to newest sample
536 * @t0: pointer to oldest sample
537 */
538 static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
539 const struct skb_mstamp *t0)
540 {
541 s32 delta_us = t1->stamp_us - t0->stamp_us;
542 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
543
544 /* If delta_us is negative, this might be because interval is too big,
545 * or local_clock() drift is too big : fallback using jiffies.
546 */
547 if (delta_us <= 0 ||
548 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
549
550 delta_us = jiffies_to_usecs(delta_jiffies);
551
552 return delta_us;
553 }
554
555 static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
556 const struct skb_mstamp *t0)
557 {
558 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
559
560 if (!diff)
561 diff = t1->stamp_us - t0->stamp_us;
562 return diff > 0;
563 }
564
565 /**
566 * struct sk_buff - socket buffer
567 * @next: Next buffer in list
568 * @prev: Previous buffer in list
569 * @tstamp: Time we arrived/left
570 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
571 * @sk: Socket we are owned by
572 * @dev: Device we arrived on/are leaving by
573 * @cb: Control buffer. Free for use by every layer. Put private vars here
574 * @_skb_refdst: destination entry (with norefcount bit)
575 * @sp: the security path, used for xfrm
576 * @len: Length of actual data
577 * @data_len: Data length
578 * @mac_len: Length of link layer header
579 * @hdr_len: writable header length of cloned skb
580 * @csum: Checksum (must include start/offset pair)
581 * @csum_start: Offset from skb->head where checksumming should start
582 * @csum_offset: Offset from csum_start where checksum should be stored
583 * @priority: Packet queueing priority
584 * @ignore_df: allow local fragmentation
585 * @cloned: Head may be cloned (check refcnt to be sure)
586 * @ip_summed: Driver fed us an IP checksum
587 * @nohdr: Payload reference only, must not modify header
588 * @nfctinfo: Relationship of this skb to the connection
589 * @pkt_type: Packet class
590 * @fclone: skbuff clone status
591 * @ipvs_property: skbuff is owned by ipvs
592 * @peeked: this packet has been seen already, so stats have been
593 * done for it, don't do them again
594 * @nf_trace: netfilter packet trace flag
595 * @protocol: Packet protocol from driver
596 * @destructor: Destruct function
597 * @nfct: Associated connection, if any
598 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
599 * @skb_iif: ifindex of device we arrived on
600 * @tc_index: Traffic control index
601 * @tc_verd: traffic control verdict
602 * @hash: the packet hash
603 * @queue_mapping: Queue mapping for multiqueue devices
604 * @xmit_more: More SKBs are pending for this queue
605 * @ndisc_nodetype: router type (from link layer)
606 * @ooo_okay: allow the mapping of a socket to a queue to be changed
607 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
608 * ports.
609 * @sw_hash: indicates hash was computed in software stack
610 * @wifi_acked_valid: wifi_acked was set
611 * @wifi_acked: whether frame was acked on wifi or not
612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
613 * @napi_id: id of the NAPI struct this skb came from
614 * @secmark: security marking
615 * @mark: Generic packet mark
616 * @vlan_proto: vlan encapsulation protocol
617 * @vlan_tci: vlan tag control information
618 * @inner_protocol: Protocol (encapsulation)
619 * @inner_transport_header: Inner transport layer header (encapsulation)
620 * @inner_network_header: Network layer header (encapsulation)
621 * @inner_mac_header: Link layer header (encapsulation)
622 * @transport_header: Transport layer header
623 * @network_header: Network layer header
624 * @mac_header: Link layer header
625 * @tail: Tail pointer
626 * @end: End pointer
627 * @head: Head of buffer
628 * @data: Data head pointer
629 * @truesize: Buffer size
630 * @users: User count - see {datagram,tcp}.c
631 */
632
633 struct sk_buff {
634 union {
635 struct {
636 /* These two members must be first. */
637 struct sk_buff *next;
638 struct sk_buff *prev;
639
640 union {
641 ktime_t tstamp;
642 struct skb_mstamp skb_mstamp;
643 };
644 };
645 struct rb_node rbnode; /* used in netem & tcp stack */
646 };
647 struct sock *sk;
648
649 union {
650 struct net_device *dev;
651 /* Some protocols might use this space to store information,
652 * while device pointer would be NULL.
653 * UDP receive path is one user.
654 */
655 unsigned long dev_scratch;
656 };
657 /*
658 * This is the control buffer. It is free to use for every
659 * layer. Please put your private variables there. If you
660 * want to keep them across layers you have to do a skb_clone()
661 * first. This is owned by whoever has the skb queued ATM.
662 */
663 char cb[48] __aligned(8);
664
665 unsigned long _skb_refdst;
666 void (*destructor)(struct sk_buff *skb);
667 #ifdef CONFIG_XFRM
668 struct sec_path *sp;
669 #endif
670 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
671 struct nf_conntrack *nfct;
672 #endif
673 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
674 struct nf_bridge_info *nf_bridge;
675 #endif
676 unsigned int len,
677 data_len;
678 __u16 mac_len,
679 hdr_len;
680
681 /* Following fields are _not_ copied in __copy_skb_header()
682 * Note that queue_mapping is here mostly to fill a hole.
683 */
684 kmemcheck_bitfield_begin(flags1);
685 __u16 queue_mapping;
686
687 /* if you move cloned around you also must adapt those constants */
688 #ifdef __BIG_ENDIAN_BITFIELD
689 #define CLONED_MASK (1 << 7)
690 #else
691 #define CLONED_MASK 1
692 #endif
693 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
694
695 __u8 __cloned_offset[0];
696 __u8 cloned:1,
697 nohdr:1,
698 fclone:2,
699 peeked:1,
700 head_frag:1,
701 xmit_more:1,
702 __unused:1; /* one bit hole */
703 kmemcheck_bitfield_end(flags1);
704
705 /* fields enclosed in headers_start/headers_end are copied
706 * using a single memcpy() in __copy_skb_header()
707 */
708 /* private: */
709 __u32 headers_start[0];
710 /* public: */
711
712 /* if you move pkt_type around you also must adapt those constants */
713 #ifdef __BIG_ENDIAN_BITFIELD
714 #define PKT_TYPE_MAX (7 << 5)
715 #else
716 #define PKT_TYPE_MAX 7
717 #endif
718 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
719
720 __u8 __pkt_type_offset[0];
721 __u8 pkt_type:3;
722 __u8 pfmemalloc:1;
723 __u8 ignore_df:1;
724 __u8 nfctinfo:3;
725
726 __u8 nf_trace:1;
727 __u8 ip_summed:2;
728 __u8 ooo_okay:1;
729 __u8 l4_hash:1;
730 __u8 sw_hash:1;
731 __u8 wifi_acked_valid:1;
732 __u8 wifi_acked:1;
733
734 __u8 no_fcs:1;
735 /* Indicates the inner headers are valid in the skbuff. */
736 __u8 encapsulation:1;
737 __u8 encap_hdr_csum:1;
738 __u8 csum_valid:1;
739 __u8 csum_complete_sw:1;
740 __u8 csum_level:2;
741 __u8 csum_bad:1;
742
743 #ifdef CONFIG_IPV6_NDISC_NODETYPE
744 __u8 ndisc_nodetype:2;
745 #endif
746 __u8 ipvs_property:1;
747 __u8 inner_protocol_type:1;
748 __u8 remcsum_offload:1;
749 #ifdef CONFIG_NET_SWITCHDEV
750 __u8 offload_fwd_mark:1;
751 #endif
752 /* 2, 4 or 5 bit hole */
753
754 #ifdef CONFIG_NET_SCHED
755 __u16 tc_index; /* traffic control index */
756 #ifdef CONFIG_NET_CLS_ACT
757 __u16 tc_verd; /* traffic control verdict */
758 #endif
759 #endif
760
761 union {
762 __wsum csum;
763 struct {
764 __u16 csum_start;
765 __u16 csum_offset;
766 };
767 };
768 __u32 priority;
769 int skb_iif;
770 __u32 hash;
771 __be16 vlan_proto;
772 __u16 vlan_tci;
773 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
774 union {
775 unsigned int napi_id;
776 unsigned int sender_cpu;
777 };
778 #endif
779 #ifdef CONFIG_NETWORK_SECMARK
780 __u32 secmark;
781 #endif
782
783 union {
784 __u32 mark;
785 __u32 reserved_tailroom;
786 };
787
788 union {
789 __be16 inner_protocol;
790 __u8 inner_ipproto;
791 };
792
793 __u16 inner_transport_header;
794 __u16 inner_network_header;
795 __u16 inner_mac_header;
796
797 __be16 protocol;
798 __u16 transport_header;
799 __u16 network_header;
800 __u16 mac_header;
801
802 /* private: */
803 __u32 headers_end[0];
804 /* public: */
805
806 /* These elements must be at the end, see alloc_skb() for details. */
807 sk_buff_data_t tail;
808 sk_buff_data_t end;
809 unsigned char *head,
810 *data;
811 unsigned int truesize;
812 atomic_t users;
813 };
814
815 #ifdef __KERNEL__
816 /*
817 * Handling routines are only of interest to the kernel
818 */
819 #include <linux/slab.h>
820
821
822 #define SKB_ALLOC_FCLONE 0x01
823 #define SKB_ALLOC_RX 0x02
824 #define SKB_ALLOC_NAPI 0x04
825
826 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
827 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
828 {
829 return unlikely(skb->pfmemalloc);
830 }
831
832 /*
833 * skb might have a dst pointer attached, refcounted or not.
834 * _skb_refdst low order bit is set if refcount was _not_ taken
835 */
836 #define SKB_DST_NOREF 1UL
837 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
838
839 /**
840 * skb_dst - returns skb dst_entry
841 * @skb: buffer
842 *
843 * Returns skb dst_entry, regardless of reference taken or not.
844 */
845 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
846 {
847 /* If refdst was not refcounted, check we still are in a
848 * rcu_read_lock section
849 */
850 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
851 !rcu_read_lock_held() &&
852 !rcu_read_lock_bh_held());
853 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
854 }
855
856 /**
857 * skb_dst_set - sets skb dst
858 * @skb: buffer
859 * @dst: dst entry
860 *
861 * Sets skb dst, assuming a reference was taken on dst and should
862 * be released by skb_dst_drop()
863 */
864 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
865 {
866 skb->_skb_refdst = (unsigned long)dst;
867 }
868
869 /**
870 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
871 * @skb: buffer
872 * @dst: dst entry
873 *
874 * Sets skb dst, assuming a reference was not taken on dst.
875 * If dst entry is cached, we do not take reference and dst_release
876 * will be avoided by refdst_drop. If dst entry is not cached, we take
877 * reference, so that last dst_release can destroy the dst immediately.
878 */
879 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
880 {
881 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
882 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
883 }
884
885 /**
886 * skb_dst_is_noref - Test if skb dst isn't refcounted
887 * @skb: buffer
888 */
889 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
890 {
891 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
892 }
893
894 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
895 {
896 return (struct rtable *)skb_dst(skb);
897 }
898
899 /* For mangling skb->pkt_type from user space side from applications
900 * such as nft, tc, etc, we only allow a conservative subset of
901 * possible pkt_types to be set.
902 */
903 static inline bool skb_pkt_type_ok(u32 ptype)
904 {
905 return ptype <= PACKET_OTHERHOST;
906 }
907
908 void kfree_skb(struct sk_buff *skb);
909 void kfree_skb_list(struct sk_buff *segs);
910 void skb_tx_error(struct sk_buff *skb);
911 void consume_skb(struct sk_buff *skb);
912 void __kfree_skb(struct sk_buff *skb);
913 extern struct kmem_cache *skbuff_head_cache;
914
915 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
916 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
917 bool *fragstolen, int *delta_truesize);
918
919 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
920 int node);
921 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
922 struct sk_buff *build_skb(void *data, unsigned int frag_size);
923 static inline struct sk_buff *alloc_skb(unsigned int size,
924 gfp_t priority)
925 {
926 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
927 }
928
929 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
930 unsigned long data_len,
931 int max_page_order,
932 int *errcode,
933 gfp_t gfp_mask);
934
935 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
936 struct sk_buff_fclones {
937 struct sk_buff skb1;
938
939 struct sk_buff skb2;
940
941 atomic_t fclone_ref;
942 };
943
944 /**
945 * skb_fclone_busy - check if fclone is busy
946 * @sk: socket
947 * @skb: buffer
948 *
949 * Returns true if skb is a fast clone, and its clone is not freed.
950 * Some drivers call skb_orphan() in their ndo_start_xmit(),
951 * so we also check that this didnt happen.
952 */
953 static inline bool skb_fclone_busy(const struct sock *sk,
954 const struct sk_buff *skb)
955 {
956 const struct sk_buff_fclones *fclones;
957
958 fclones = container_of(skb, struct sk_buff_fclones, skb1);
959
960 return skb->fclone == SKB_FCLONE_ORIG &&
961 atomic_read(&fclones->fclone_ref) > 1 &&
962 fclones->skb2.sk == sk;
963 }
964
965 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
966 gfp_t priority)
967 {
968 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
969 }
970
971 struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
972 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
973 {
974 return __alloc_skb_head(priority, -1);
975 }
976
977 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
978 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
979 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
980 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
981 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
982 gfp_t gfp_mask, bool fclone);
983 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
984 gfp_t gfp_mask)
985 {
986 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
987 }
988
989 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
990 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
991 unsigned int headroom);
992 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
993 int newtailroom, gfp_t priority);
994 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
995 int offset, int len);
996 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
997 int len);
998 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
999 int skb_pad(struct sk_buff *skb, int pad);
1000 #define dev_kfree_skb(a) consume_skb(a)
1001
1002 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1003 int getfrag(void *from, char *to, int offset,
1004 int len, int odd, struct sk_buff *skb),
1005 void *from, int length);
1006
1007 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1008 int offset, size_t size);
1009
1010 struct skb_seq_state {
1011 __u32 lower_offset;
1012 __u32 upper_offset;
1013 __u32 frag_idx;
1014 __u32 stepped_offset;
1015 struct sk_buff *root_skb;
1016 struct sk_buff *cur_skb;
1017 __u8 *frag_data;
1018 };
1019
1020 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1021 unsigned int to, struct skb_seq_state *st);
1022 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1023 struct skb_seq_state *st);
1024 void skb_abort_seq_read(struct skb_seq_state *st);
1025
1026 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1027 unsigned int to, struct ts_config *config);
1028
1029 /*
1030 * Packet hash types specify the type of hash in skb_set_hash.
1031 *
1032 * Hash types refer to the protocol layer addresses which are used to
1033 * construct a packet's hash. The hashes are used to differentiate or identify
1034 * flows of the protocol layer for the hash type. Hash types are either
1035 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1036 *
1037 * Properties of hashes:
1038 *
1039 * 1) Two packets in different flows have different hash values
1040 * 2) Two packets in the same flow should have the same hash value
1041 *
1042 * A hash at a higher layer is considered to be more specific. A driver should
1043 * set the most specific hash possible.
1044 *
1045 * A driver cannot indicate a more specific hash than the layer at which a hash
1046 * was computed. For instance an L3 hash cannot be set as an L4 hash.
1047 *
1048 * A driver may indicate a hash level which is less specific than the
1049 * actual layer the hash was computed on. For instance, a hash computed
1050 * at L4 may be considered an L3 hash. This should only be done if the
1051 * driver can't unambiguously determine that the HW computed the hash at
1052 * the higher layer. Note that the "should" in the second property above
1053 * permits this.
1054 */
1055 enum pkt_hash_types {
1056 PKT_HASH_TYPE_NONE, /* Undefined type */
1057 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
1058 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
1059 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
1060 };
1061
1062 static inline void skb_clear_hash(struct sk_buff *skb)
1063 {
1064 skb->hash = 0;
1065 skb->sw_hash = 0;
1066 skb->l4_hash = 0;
1067 }
1068
1069 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1070 {
1071 if (!skb->l4_hash)
1072 skb_clear_hash(skb);
1073 }
1074
1075 static inline void
1076 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1077 {
1078 skb->l4_hash = is_l4;
1079 skb->sw_hash = is_sw;
1080 skb->hash = hash;
1081 }
1082
1083 static inline void
1084 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1085 {
1086 /* Used by drivers to set hash from HW */
1087 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1088 }
1089
1090 static inline void
1091 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1092 {
1093 __skb_set_hash(skb, hash, true, is_l4);
1094 }
1095
1096 void __skb_get_hash(struct sk_buff *skb);
1097 u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1098 u32 skb_get_poff(const struct sk_buff *skb);
1099 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1100 const struct flow_keys *keys, int hlen);
1101 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1102 void *data, int hlen_proto);
1103
1104 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1105 int thoff, u8 ip_proto)
1106 {
1107 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1108 }
1109
1110 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1111 const struct flow_dissector_key *key,
1112 unsigned int key_count);
1113
1114 bool __skb_flow_dissect(const struct sk_buff *skb,
1115 struct flow_dissector *flow_dissector,
1116 void *target_container,
1117 void *data, __be16 proto, int nhoff, int hlen,
1118 unsigned int flags);
1119
1120 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1121 struct flow_dissector *flow_dissector,
1122 void *target_container, unsigned int flags)
1123 {
1124 return __skb_flow_dissect(skb, flow_dissector, target_container,
1125 NULL, 0, 0, 0, flags);
1126 }
1127
1128 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1129 struct flow_keys *flow,
1130 unsigned int flags)
1131 {
1132 memset(flow, 0, sizeof(*flow));
1133 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1134 NULL, 0, 0, 0, flags);
1135 }
1136
1137 static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1138 void *data, __be16 proto,
1139 int nhoff, int hlen,
1140 unsigned int flags)
1141 {
1142 memset(flow, 0, sizeof(*flow));
1143 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1144 data, proto, nhoff, hlen, flags);
1145 }
1146
1147 static inline __u32 skb_get_hash(struct sk_buff *skb)
1148 {
1149 if (!skb->l4_hash && !skb->sw_hash)
1150 __skb_get_hash(skb);
1151
1152 return skb->hash;
1153 }
1154
1155 __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1156
1157 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1158 {
1159 if (!skb->l4_hash && !skb->sw_hash) {
1160 struct flow_keys keys;
1161 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1162
1163 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1164 }
1165
1166 return skb->hash;
1167 }
1168
1169 __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1170
1171 static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1172 {
1173 if (!skb->l4_hash && !skb->sw_hash) {
1174 struct flow_keys keys;
1175 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1176
1177 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1178 }
1179
1180 return skb->hash;
1181 }
1182
1183 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1184
1185 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1186 {
1187 return skb->hash;
1188 }
1189
1190 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1191 {
1192 to->hash = from->hash;
1193 to->sw_hash = from->sw_hash;
1194 to->l4_hash = from->l4_hash;
1195 };
1196
1197 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1198 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1199 {
1200 return skb->head + skb->end;
1201 }
1202
1203 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1204 {
1205 return skb->end;
1206 }
1207 #else
1208 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1209 {
1210 return skb->end;
1211 }
1212
1213 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1214 {
1215 return skb->end - skb->head;
1216 }
1217 #endif
1218
1219 /* Internal */
1220 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1221
1222 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1223 {
1224 return &skb_shinfo(skb)->hwtstamps;
1225 }
1226
1227 /**
1228 * skb_queue_empty - check if a queue is empty
1229 * @list: queue head
1230 *
1231 * Returns true if the queue is empty, false otherwise.
1232 */
1233 static inline int skb_queue_empty(const struct sk_buff_head *list)
1234 {
1235 return list->next == (const struct sk_buff *) list;
1236 }
1237
1238 /**
1239 * skb_queue_is_last - check if skb is the last entry in the queue
1240 * @list: queue head
1241 * @skb: buffer
1242 *
1243 * Returns true if @skb is the last buffer on the list.
1244 */
1245 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1246 const struct sk_buff *skb)
1247 {
1248 return skb->next == (const struct sk_buff *) list;
1249 }
1250
1251 /**
1252 * skb_queue_is_first - check if skb is the first entry in the queue
1253 * @list: queue head
1254 * @skb: buffer
1255 *
1256 * Returns true if @skb is the first buffer on the list.
1257 */
1258 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1259 const struct sk_buff *skb)
1260 {
1261 return skb->prev == (const struct sk_buff *) list;
1262 }
1263
1264 /**
1265 * skb_queue_next - return the next packet in the queue
1266 * @list: queue head
1267 * @skb: current buffer
1268 *
1269 * Return the next packet in @list after @skb. It is only valid to
1270 * call this if skb_queue_is_last() evaluates to false.
1271 */
1272 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1273 const struct sk_buff *skb)
1274 {
1275 /* This BUG_ON may seem severe, but if we just return then we
1276 * are going to dereference garbage.
1277 */
1278 BUG_ON(skb_queue_is_last(list, skb));
1279 return skb->next;
1280 }
1281
1282 /**
1283 * skb_queue_prev - return the prev packet in the queue
1284 * @list: queue head
1285 * @skb: current buffer
1286 *
1287 * Return the prev packet in @list before @skb. It is only valid to
1288 * call this if skb_queue_is_first() evaluates to false.
1289 */
1290 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1291 const struct sk_buff *skb)
1292 {
1293 /* This BUG_ON may seem severe, but if we just return then we
1294 * are going to dereference garbage.
1295 */
1296 BUG_ON(skb_queue_is_first(list, skb));
1297 return skb->prev;
1298 }
1299
1300 /**
1301 * skb_get - reference buffer
1302 * @skb: buffer to reference
1303 *
1304 * Makes another reference to a socket buffer and returns a pointer
1305 * to the buffer.
1306 */
1307 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1308 {
1309 atomic_inc(&skb->users);
1310 return skb;
1311 }
1312
1313 /*
1314 * If users == 1, we are the only owner and are can avoid redundant
1315 * atomic change.
1316 */
1317
1318 /**
1319 * skb_cloned - is the buffer a clone
1320 * @skb: buffer to check
1321 *
1322 * Returns true if the buffer was generated with skb_clone() and is
1323 * one of multiple shared copies of the buffer. Cloned buffers are
1324 * shared data so must not be written to under normal circumstances.
1325 */
1326 static inline int skb_cloned(const struct sk_buff *skb)
1327 {
1328 return skb->cloned &&
1329 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1330 }
1331
1332 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1333 {
1334 might_sleep_if(gfpflags_allow_blocking(pri));
1335
1336 if (skb_cloned(skb))
1337 return pskb_expand_head(skb, 0, 0, pri);
1338
1339 return 0;
1340 }
1341
1342 /**
1343 * skb_header_cloned - is the header a clone
1344 * @skb: buffer to check
1345 *
1346 * Returns true if modifying the header part of the buffer requires
1347 * the data to be copied.
1348 */
1349 static inline int skb_header_cloned(const struct sk_buff *skb)
1350 {
1351 int dataref;
1352
1353 if (!skb->cloned)
1354 return 0;
1355
1356 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1357 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1358 return dataref != 1;
1359 }
1360
1361 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1362 {
1363 might_sleep_if(gfpflags_allow_blocking(pri));
1364
1365 if (skb_header_cloned(skb))
1366 return pskb_expand_head(skb, 0, 0, pri);
1367
1368 return 0;
1369 }
1370
1371 /**
1372 * skb_header_release - release reference to header
1373 * @skb: buffer to operate on
1374 *
1375 * Drop a reference to the header part of the buffer. This is done
1376 * by acquiring a payload reference. You must not read from the header
1377 * part of skb->data after this.
1378 * Note : Check if you can use __skb_header_release() instead.
1379 */
1380 static inline void skb_header_release(struct sk_buff *skb)
1381 {
1382 BUG_ON(skb->nohdr);
1383 skb->nohdr = 1;
1384 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1385 }
1386
1387 /**
1388 * __skb_header_release - release reference to header
1389 * @skb: buffer to operate on
1390 *
1391 * Variant of skb_header_release() assuming skb is private to caller.
1392 * We can avoid one atomic operation.
1393 */
1394 static inline void __skb_header_release(struct sk_buff *skb)
1395 {
1396 skb->nohdr = 1;
1397 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1398 }
1399
1400
1401 /**
1402 * skb_shared - is the buffer shared
1403 * @skb: buffer to check
1404 *
1405 * Returns true if more than one person has a reference to this
1406 * buffer.
1407 */
1408 static inline int skb_shared(const struct sk_buff *skb)
1409 {
1410 return atomic_read(&skb->users) != 1;
1411 }
1412
1413 /**
1414 * skb_share_check - check if buffer is shared and if so clone it
1415 * @skb: buffer to check
1416 * @pri: priority for memory allocation
1417 *
1418 * If the buffer is shared the buffer is cloned and the old copy
1419 * drops a reference. A new clone with a single reference is returned.
1420 * If the buffer is not shared the original buffer is returned. When
1421 * being called from interrupt status or with spinlocks held pri must
1422 * be GFP_ATOMIC.
1423 *
1424 * NULL is returned on a memory allocation failure.
1425 */
1426 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1427 {
1428 might_sleep_if(gfpflags_allow_blocking(pri));
1429 if (skb_shared(skb)) {
1430 struct sk_buff *nskb = skb_clone(skb, pri);
1431
1432 if (likely(nskb))
1433 consume_skb(skb);
1434 else
1435 kfree_skb(skb);
1436 skb = nskb;
1437 }
1438 return skb;
1439 }
1440
1441 /*
1442 * Copy shared buffers into a new sk_buff. We effectively do COW on
1443 * packets to handle cases where we have a local reader and forward
1444 * and a couple of other messy ones. The normal one is tcpdumping
1445 * a packet thats being forwarded.
1446 */
1447
1448 /**
1449 * skb_unshare - make a copy of a shared buffer
1450 * @skb: buffer to check
1451 * @pri: priority for memory allocation
1452 *
1453 * If the socket buffer is a clone then this function creates a new
1454 * copy of the data, drops a reference count on the old copy and returns
1455 * the new copy with the reference count at 1. If the buffer is not a clone
1456 * the original buffer is returned. When called with a spinlock held or
1457 * from interrupt state @pri must be %GFP_ATOMIC
1458 *
1459 * %NULL is returned on a memory allocation failure.
1460 */
1461 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1462 gfp_t pri)
1463 {
1464 might_sleep_if(gfpflags_allow_blocking(pri));
1465 if (skb_cloned(skb)) {
1466 struct sk_buff *nskb = skb_copy(skb, pri);
1467
1468 /* Free our shared copy */
1469 if (likely(nskb))
1470 consume_skb(skb);
1471 else
1472 kfree_skb(skb);
1473 skb = nskb;
1474 }
1475 return skb;
1476 }
1477
1478 /**
1479 * skb_peek - peek at the head of an &sk_buff_head
1480 * @list_: list to peek at
1481 *
1482 * Peek an &sk_buff. Unlike most other operations you _MUST_
1483 * be careful with this one. A peek leaves the buffer on the
1484 * list and someone else may run off with it. You must hold
1485 * the appropriate locks or have a private queue to do this.
1486 *
1487 * Returns %NULL for an empty list or a pointer to the head element.
1488 * The reference count is not incremented and the reference is therefore
1489 * volatile. Use with caution.
1490 */
1491 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1492 {
1493 struct sk_buff *skb = list_->next;
1494
1495 if (skb == (struct sk_buff *)list_)
1496 skb = NULL;
1497 return skb;
1498 }
1499
1500 /**
1501 * skb_peek_next - peek skb following the given one from a queue
1502 * @skb: skb to start from
1503 * @list_: list to peek at
1504 *
1505 * Returns %NULL when the end of the list is met or a pointer to the
1506 * next element. The reference count is not incremented and the
1507 * reference is therefore volatile. Use with caution.
1508 */
1509 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1510 const struct sk_buff_head *list_)
1511 {
1512 struct sk_buff *next = skb->next;
1513
1514 if (next == (struct sk_buff *)list_)
1515 next = NULL;
1516 return next;
1517 }
1518
1519 /**
1520 * skb_peek_tail - peek at the tail of an &sk_buff_head
1521 * @list_: list to peek at
1522 *
1523 * Peek an &sk_buff. Unlike most other operations you _MUST_
1524 * be careful with this one. A peek leaves the buffer on the
1525 * list and someone else may run off with it. You must hold
1526 * the appropriate locks or have a private queue to do this.
1527 *
1528 * Returns %NULL for an empty list or a pointer to the tail element.
1529 * The reference count is not incremented and the reference is therefore
1530 * volatile. Use with caution.
1531 */
1532 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1533 {
1534 struct sk_buff *skb = list_->prev;
1535
1536 if (skb == (struct sk_buff *)list_)
1537 skb = NULL;
1538 return skb;
1539
1540 }
1541
1542 /**
1543 * skb_queue_len - get queue length
1544 * @list_: list to measure
1545 *
1546 * Return the length of an &sk_buff queue.
1547 */
1548 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1549 {
1550 return list_->qlen;
1551 }
1552
1553 /**
1554 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1555 * @list: queue to initialize
1556 *
1557 * This initializes only the list and queue length aspects of
1558 * an sk_buff_head object. This allows to initialize the list
1559 * aspects of an sk_buff_head without reinitializing things like
1560 * the spinlock. It can also be used for on-stack sk_buff_head
1561 * objects where the spinlock is known to not be used.
1562 */
1563 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1564 {
1565 list->prev = list->next = (struct sk_buff *)list;
1566 list->qlen = 0;
1567 }
1568
1569 /*
1570 * This function creates a split out lock class for each invocation;
1571 * this is needed for now since a whole lot of users of the skb-queue
1572 * infrastructure in drivers have different locking usage (in hardirq)
1573 * than the networking core (in softirq only). In the long run either the
1574 * network layer or drivers should need annotation to consolidate the
1575 * main types of usage into 3 classes.
1576 */
1577 static inline void skb_queue_head_init(struct sk_buff_head *list)
1578 {
1579 spin_lock_init(&list->lock);
1580 __skb_queue_head_init(list);
1581 }
1582
1583 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1584 struct lock_class_key *class)
1585 {
1586 skb_queue_head_init(list);
1587 lockdep_set_class(&list->lock, class);
1588 }
1589
1590 /*
1591 * Insert an sk_buff on a list.
1592 *
1593 * The "__skb_xxxx()" functions are the non-atomic ones that
1594 * can only be called with interrupts disabled.
1595 */
1596 void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1597 struct sk_buff_head *list);
1598 static inline void __skb_insert(struct sk_buff *newsk,
1599 struct sk_buff *prev, struct sk_buff *next,
1600 struct sk_buff_head *list)
1601 {
1602 newsk->next = next;
1603 newsk->prev = prev;
1604 next->prev = prev->next = newsk;
1605 list->qlen++;
1606 }
1607
1608 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1609 struct sk_buff *prev,
1610 struct sk_buff *next)
1611 {
1612 struct sk_buff *first = list->next;
1613 struct sk_buff *last = list->prev;
1614
1615 first->prev = prev;
1616 prev->next = first;
1617
1618 last->next = next;
1619 next->prev = last;
1620 }
1621
1622 /**
1623 * skb_queue_splice - join two skb lists, this is designed for stacks
1624 * @list: the new list to add
1625 * @head: the place to add it in the first list
1626 */
1627 static inline void skb_queue_splice(const struct sk_buff_head *list,
1628 struct sk_buff_head *head)
1629 {
1630 if (!skb_queue_empty(list)) {
1631 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1632 head->qlen += list->qlen;
1633 }
1634 }
1635
1636 /**
1637 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1638 * @list: the new list to add
1639 * @head: the place to add it in the first list
1640 *
1641 * The list at @list is reinitialised
1642 */
1643 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1644 struct sk_buff_head *head)
1645 {
1646 if (!skb_queue_empty(list)) {
1647 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1648 head->qlen += list->qlen;
1649 __skb_queue_head_init(list);
1650 }
1651 }
1652
1653 /**
1654 * skb_queue_splice_tail - join two skb lists, each list being a queue
1655 * @list: the new list to add
1656 * @head: the place to add it in the first list
1657 */
1658 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1659 struct sk_buff_head *head)
1660 {
1661 if (!skb_queue_empty(list)) {
1662 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1663 head->qlen += list->qlen;
1664 }
1665 }
1666
1667 /**
1668 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1669 * @list: the new list to add
1670 * @head: the place to add it in the first list
1671 *
1672 * Each of the lists is a queue.
1673 * The list at @list is reinitialised
1674 */
1675 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1676 struct sk_buff_head *head)
1677 {
1678 if (!skb_queue_empty(list)) {
1679 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1680 head->qlen += list->qlen;
1681 __skb_queue_head_init(list);
1682 }
1683 }
1684
1685 /**
1686 * __skb_queue_after - queue a buffer at the list head
1687 * @list: list to use
1688 * @prev: place after this buffer
1689 * @newsk: buffer to queue
1690 *
1691 * Queue a buffer int the middle of a list. This function takes no locks
1692 * and you must therefore hold required locks before calling it.
1693 *
1694 * A buffer cannot be placed on two lists at the same time.
1695 */
1696 static inline void __skb_queue_after(struct sk_buff_head *list,
1697 struct sk_buff *prev,
1698 struct sk_buff *newsk)
1699 {
1700 __skb_insert(newsk, prev, prev->next, list);
1701 }
1702
1703 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1704 struct sk_buff_head *list);
1705
1706 static inline void __skb_queue_before(struct sk_buff_head *list,
1707 struct sk_buff *next,
1708 struct sk_buff *newsk)
1709 {
1710 __skb_insert(newsk, next->prev, next, list);
1711 }
1712
1713 /**
1714 * __skb_queue_head - queue a buffer at the list head
1715 * @list: list to use
1716 * @newsk: buffer to queue
1717 *
1718 * Queue a buffer at the start of a list. This function takes no locks
1719 * and you must therefore hold required locks before calling it.
1720 *
1721 * A buffer cannot be placed on two lists at the same time.
1722 */
1723 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1724 static inline void __skb_queue_head(struct sk_buff_head *list,
1725 struct sk_buff *newsk)
1726 {
1727 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1728 }
1729
1730 /**
1731 * __skb_queue_tail - queue a buffer at the list tail
1732 * @list: list to use
1733 * @newsk: buffer to queue
1734 *
1735 * Queue a buffer at the end of a list. This function takes no locks
1736 * and you must therefore hold required locks before calling it.
1737 *
1738 * A buffer cannot be placed on two lists at the same time.
1739 */
1740 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1741 static inline void __skb_queue_tail(struct sk_buff_head *list,
1742 struct sk_buff *newsk)
1743 {
1744 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1745 }
1746
1747 /*
1748 * remove sk_buff from list. _Must_ be called atomically, and with
1749 * the list known..
1750 */
1751 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1752 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1753 {
1754 struct sk_buff *next, *prev;
1755
1756 list->qlen--;
1757 next = skb->next;
1758 prev = skb->prev;
1759 skb->next = skb->prev = NULL;
1760 next->prev = prev;
1761 prev->next = next;
1762 }
1763
1764 /**
1765 * __skb_dequeue - remove from the head of the queue
1766 * @list: list to dequeue from
1767 *
1768 * Remove the head of the list. This function does not take any locks
1769 * so must be used with appropriate locks held only. The head item is
1770 * returned or %NULL if the list is empty.
1771 */
1772 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1773 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1774 {
1775 struct sk_buff *skb = skb_peek(list);
1776 if (skb)
1777 __skb_unlink(skb, list);
1778 return skb;
1779 }
1780
1781 /**
1782 * __skb_dequeue_tail - remove from the tail of the queue
1783 * @list: list to dequeue from
1784 *
1785 * Remove the tail of the list. This function does not take any locks
1786 * so must be used with appropriate locks held only. The tail item is
1787 * returned or %NULL if the list is empty.
1788 */
1789 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1790 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1791 {
1792 struct sk_buff *skb = skb_peek_tail(list);
1793 if (skb)
1794 __skb_unlink(skb, list);
1795 return skb;
1796 }
1797
1798
1799 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1800 {
1801 return skb->data_len;
1802 }
1803
1804 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1805 {
1806 return skb->len - skb->data_len;
1807 }
1808
1809 static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1810 {
1811 unsigned int i, len = 0;
1812
1813 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1814 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1815 return len + skb_headlen(skb);
1816 }
1817
1818 /**
1819 * __skb_fill_page_desc - initialise a paged fragment in an skb
1820 * @skb: buffer containing fragment to be initialised
1821 * @i: paged fragment index to initialise
1822 * @page: the page to use for this fragment
1823 * @off: the offset to the data with @page
1824 * @size: the length of the data
1825 *
1826 * Initialises the @i'th fragment of @skb to point to &size bytes at
1827 * offset @off within @page.
1828 *
1829 * Does not take any additional reference on the fragment.
1830 */
1831 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1832 struct page *page, int off, int size)
1833 {
1834 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1835
1836 /*
1837 * Propagate page pfmemalloc to the skb if we can. The problem is
1838 * that not all callers have unique ownership of the page but rely
1839 * on page_is_pfmemalloc doing the right thing(tm).
1840 */
1841 frag->page.p = page;
1842 frag->page_offset = off;
1843 skb_frag_size_set(frag, size);
1844
1845 page = compound_head(page);
1846 if (page_is_pfmemalloc(page))
1847 skb->pfmemalloc = true;
1848 }
1849
1850 /**
1851 * skb_fill_page_desc - initialise a paged fragment in an skb
1852 * @skb: buffer containing fragment to be initialised
1853 * @i: paged fragment index to initialise
1854 * @page: the page to use for this fragment
1855 * @off: the offset to the data with @page
1856 * @size: the length of the data
1857 *
1858 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1859 * @skb to point to @size bytes at offset @off within @page. In
1860 * addition updates @skb such that @i is the last fragment.
1861 *
1862 * Does not take any additional reference on the fragment.
1863 */
1864 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1865 struct page *page, int off, int size)
1866 {
1867 __skb_fill_page_desc(skb, i, page, off, size);
1868 skb_shinfo(skb)->nr_frags = i + 1;
1869 }
1870
1871 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1872 int size, unsigned int truesize);
1873
1874 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1875 unsigned int truesize);
1876
1877 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1878 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1879 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1880
1881 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1882 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1883 {
1884 return skb->head + skb->tail;
1885 }
1886
1887 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1888 {
1889 skb->tail = skb->data - skb->head;
1890 }
1891
1892 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1893 {
1894 skb_reset_tail_pointer(skb);
1895 skb->tail += offset;
1896 }
1897
1898 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1899 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1900 {
1901 return skb->tail;
1902 }
1903
1904 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1905 {
1906 skb->tail = skb->data;
1907 }
1908
1909 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1910 {
1911 skb->tail = skb->data + offset;
1912 }
1913
1914 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1915
1916 /*
1917 * Add data to an sk_buff
1918 */
1919 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1920 unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1921 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1922 {
1923 unsigned char *tmp = skb_tail_pointer(skb);
1924 SKB_LINEAR_ASSERT(skb);
1925 skb->tail += len;
1926 skb->len += len;
1927 return tmp;
1928 }
1929
1930 unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1931 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1932 {
1933 skb->data -= len;
1934 skb->len += len;
1935 return skb->data;
1936 }
1937
1938 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1939 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1940 {
1941 skb->len -= len;
1942 BUG_ON(skb->len < skb->data_len);
1943 return skb->data += len;
1944 }
1945
1946 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1947 {
1948 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1949 }
1950
1951 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1952
1953 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1954 {
1955 if (len > skb_headlen(skb) &&
1956 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1957 return NULL;
1958 skb->len -= len;
1959 return skb->data += len;
1960 }
1961
1962 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1963 {
1964 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1965 }
1966
1967 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1968 {
1969 if (likely(len <= skb_headlen(skb)))
1970 return 1;
1971 if (unlikely(len > skb->len))
1972 return 0;
1973 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1974 }
1975
1976 void skb_condense(struct sk_buff *skb);
1977
1978 /**
1979 * skb_headroom - bytes at buffer head
1980 * @skb: buffer to check
1981 *
1982 * Return the number of bytes of free space at the head of an &sk_buff.
1983 */
1984 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1985 {
1986 return skb->data - skb->head;
1987 }
1988
1989 /**
1990 * skb_tailroom - bytes at buffer end
1991 * @skb: buffer to check
1992 *
1993 * Return the number of bytes of free space at the tail of an sk_buff
1994 */
1995 static inline int skb_tailroom(const struct sk_buff *skb)
1996 {
1997 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1998 }
1999
2000 /**
2001 * skb_availroom - bytes at buffer end
2002 * @skb: buffer to check
2003 *
2004 * Return the number of bytes of free space at the tail of an sk_buff
2005 * allocated by sk_stream_alloc()
2006 */
2007 static inline int skb_availroom(const struct sk_buff *skb)
2008 {
2009 if (skb_is_nonlinear(skb))
2010 return 0;
2011
2012 return skb->end - skb->tail - skb->reserved_tailroom;
2013 }
2014
2015 /**
2016 * skb_reserve - adjust headroom
2017 * @skb: buffer to alter
2018 * @len: bytes to move
2019 *
2020 * Increase the headroom of an empty &sk_buff by reducing the tail
2021 * room. This is only allowed for an empty buffer.
2022 */
2023 static inline void skb_reserve(struct sk_buff *skb, int len)
2024 {
2025 skb->data += len;
2026 skb->tail += len;
2027 }
2028
2029 /**
2030 * skb_tailroom_reserve - adjust reserved_tailroom
2031 * @skb: buffer to alter
2032 * @mtu: maximum amount of headlen permitted
2033 * @needed_tailroom: minimum amount of reserved_tailroom
2034 *
2035 * Set reserved_tailroom so that headlen can be as large as possible but
2036 * not larger than mtu and tailroom cannot be smaller than
2037 * needed_tailroom.
2038 * The required headroom should already have been reserved before using
2039 * this function.
2040 */
2041 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2042 unsigned int needed_tailroom)
2043 {
2044 SKB_LINEAR_ASSERT(skb);
2045 if (mtu < skb_tailroom(skb) - needed_tailroom)
2046 /* use at most mtu */
2047 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2048 else
2049 /* use up to all available space */
2050 skb->reserved_tailroom = needed_tailroom;
2051 }
2052
2053 #define ENCAP_TYPE_ETHER 0
2054 #define ENCAP_TYPE_IPPROTO 1
2055
2056 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2057 __be16 protocol)
2058 {
2059 skb->inner_protocol = protocol;
2060 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2061 }
2062
2063 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2064 __u8 ipproto)
2065 {
2066 skb->inner_ipproto = ipproto;
2067 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2068 }
2069
2070 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2071 {
2072 skb->inner_mac_header = skb->mac_header;
2073 skb->inner_network_header = skb->network_header;
2074 skb->inner_transport_header = skb->transport_header;
2075 }
2076
2077 static inline void skb_reset_mac_len(struct sk_buff *skb)
2078 {
2079 skb->mac_len = skb->network_header - skb->mac_header;
2080 }
2081
2082 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2083 *skb)
2084 {
2085 return skb->head + skb->inner_transport_header;
2086 }
2087
2088 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2089 {
2090 return skb_inner_transport_header(skb) - skb->data;
2091 }
2092
2093 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2094 {
2095 skb->inner_transport_header = skb->data - skb->head;
2096 }
2097
2098 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2099 const int offset)
2100 {
2101 skb_reset_inner_transport_header(skb);
2102 skb->inner_transport_header += offset;
2103 }
2104
2105 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2106 {
2107 return skb->head + skb->inner_network_header;
2108 }
2109
2110 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2111 {
2112 skb->inner_network_header = skb->data - skb->head;
2113 }
2114
2115 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2116 const int offset)
2117 {
2118 skb_reset_inner_network_header(skb);
2119 skb->inner_network_header += offset;
2120 }
2121
2122 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2123 {
2124 return skb->head + skb->inner_mac_header;
2125 }
2126
2127 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2128 {
2129 skb->inner_mac_header = skb->data - skb->head;
2130 }
2131
2132 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2133 const int offset)
2134 {
2135 skb_reset_inner_mac_header(skb);
2136 skb->inner_mac_header += offset;
2137 }
2138 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2139 {
2140 return skb->transport_header != (typeof(skb->transport_header))~0U;
2141 }
2142
2143 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2144 {
2145 return skb->head + skb->transport_header;
2146 }
2147
2148 static inline void skb_reset_transport_header(struct sk_buff *skb)
2149 {
2150 skb->transport_header = skb->data - skb->head;
2151 }
2152
2153 static inline void skb_set_transport_header(struct sk_buff *skb,
2154 const int offset)
2155 {
2156 skb_reset_transport_header(skb);
2157 skb->transport_header += offset;
2158 }
2159
2160 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2161 {
2162 return skb->head + skb->network_header;
2163 }
2164
2165 static inline void skb_reset_network_header(struct sk_buff *skb)
2166 {
2167 skb->network_header = skb->data - skb->head;
2168 }
2169
2170 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2171 {
2172 skb_reset_network_header(skb);
2173 skb->network_header += offset;
2174 }
2175
2176 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2177 {
2178 return skb->head + skb->mac_header;
2179 }
2180
2181 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2182 {
2183 return skb->mac_header != (typeof(skb->mac_header))~0U;
2184 }
2185
2186 static inline void skb_reset_mac_header(struct sk_buff *skb)
2187 {
2188 skb->mac_header = skb->data - skb->head;
2189 }
2190
2191 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2192 {
2193 skb_reset_mac_header(skb);
2194 skb->mac_header += offset;
2195 }
2196
2197 static inline void skb_pop_mac_header(struct sk_buff *skb)
2198 {
2199 skb->mac_header = skb->network_header;
2200 }
2201
2202 static inline void skb_probe_transport_header(struct sk_buff *skb,
2203 const int offset_hint)
2204 {
2205 struct flow_keys keys;
2206
2207 if (skb_transport_header_was_set(skb))
2208 return;
2209 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2210 skb_set_transport_header(skb, keys.control.thoff);
2211 else
2212 skb_set_transport_header(skb, offset_hint);
2213 }
2214
2215 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2216 {
2217 if (skb_mac_header_was_set(skb)) {
2218 const unsigned char *old_mac = skb_mac_header(skb);
2219
2220 skb_set_mac_header(skb, -skb->mac_len);
2221 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2222 }
2223 }
2224
2225 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2226 {
2227 return skb->csum_start - skb_headroom(skb);
2228 }
2229
2230 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2231 {
2232 return skb->head + skb->csum_start;
2233 }
2234
2235 static inline int skb_transport_offset(const struct sk_buff *skb)
2236 {
2237 return skb_transport_header(skb) - skb->data;
2238 }
2239
2240 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2241 {
2242 return skb->transport_header - skb->network_header;
2243 }
2244
2245 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2246 {
2247 return skb->inner_transport_header - skb->inner_network_header;
2248 }
2249
2250 static inline int skb_network_offset(const struct sk_buff *skb)
2251 {
2252 return skb_network_header(skb) - skb->data;
2253 }
2254
2255 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2256 {
2257 return skb_inner_network_header(skb) - skb->data;
2258 }
2259
2260 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2261 {
2262 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2263 }
2264
2265 /*
2266 * CPUs often take a performance hit when accessing unaligned memory
2267 * locations. The actual performance hit varies, it can be small if the
2268 * hardware handles it or large if we have to take an exception and fix it
2269 * in software.
2270 *
2271 * Since an ethernet header is 14 bytes network drivers often end up with
2272 * the IP header at an unaligned offset. The IP header can be aligned by
2273 * shifting the start of the packet by 2 bytes. Drivers should do this
2274 * with:
2275 *
2276 * skb_reserve(skb, NET_IP_ALIGN);
2277 *
2278 * The downside to this alignment of the IP header is that the DMA is now
2279 * unaligned. On some architectures the cost of an unaligned DMA is high
2280 * and this cost outweighs the gains made by aligning the IP header.
2281 *
2282 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
2283 * to be overridden.
2284 */
2285 #ifndef NET_IP_ALIGN
2286 #define NET_IP_ALIGN 2
2287 #endif
2288
2289 /*
2290 * The networking layer reserves some headroom in skb data (via
2291 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2292 * the header has to grow. In the default case, if the header has to grow
2293 * 32 bytes or less we avoid the reallocation.
2294 *
2295 * Unfortunately this headroom changes the DMA alignment of the resulting
2296 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
2297 * on some architectures. An architecture can override this value,
2298 * perhaps setting it to a cacheline in size (since that will maintain
2299 * cacheline alignment of the DMA). It must be a power of 2.
2300 *
2301 * Various parts of the networking layer expect at least 32 bytes of
2302 * headroom, you should not reduce this.
2303 *
2304 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
2305 * to reduce average number of cache lines per packet.
2306 * get_rps_cpus() for example only access one 64 bytes aligned block :
2307 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2308 */
2309 #ifndef NET_SKB_PAD
2310 #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2311 #endif
2312
2313 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2314
2315 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2316 {
2317 if (unlikely(skb_is_nonlinear(skb))) {
2318 WARN_ON(1);
2319 return;
2320 }
2321 skb->len = len;
2322 skb_set_tail_pointer(skb, len);
2323 }
2324
2325 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2326 {
2327 __skb_set_length(skb, len);
2328 }
2329
2330 void skb_trim(struct sk_buff *skb, unsigned int len);
2331
2332 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2333 {
2334 if (skb->data_len)
2335 return ___pskb_trim(skb, len);
2336 __skb_trim(skb, len);
2337 return 0;
2338 }
2339
2340 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2341 {
2342 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2343 }
2344
2345 /**
2346 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2347 * @skb: buffer to alter
2348 * @len: new length
2349 *
2350 * This is identical to pskb_trim except that the caller knows that
2351 * the skb is not cloned so we should never get an error due to out-
2352 * of-memory.
2353 */
2354 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2355 {
2356 int err = pskb_trim(skb, len);
2357 BUG_ON(err);
2358 }
2359
2360 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2361 {
2362 unsigned int diff = len - skb->len;
2363
2364 if (skb_tailroom(skb) < diff) {
2365 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2366 GFP_ATOMIC);
2367 if (ret)
2368 return ret;
2369 }
2370 __skb_set_length(skb, len);
2371 return 0;
2372 }
2373
2374 /**
2375 * skb_orphan - orphan a buffer
2376 * @skb: buffer to orphan
2377 *
2378 * If a buffer currently has an owner then we call the owner's
2379 * destructor function and make the @skb unowned. The buffer continues
2380 * to exist but is no longer charged to its former owner.
2381 */
2382 static inline void skb_orphan(struct sk_buff *skb)
2383 {
2384 if (skb->destructor) {
2385 skb->destructor(skb);
2386 skb->destructor = NULL;
2387 skb->sk = NULL;
2388 } else {
2389 BUG_ON(skb->sk);
2390 }
2391 }
2392
2393 /**
2394 * skb_orphan_frags - orphan the frags contained in a buffer
2395 * @skb: buffer to orphan frags from
2396 * @gfp_mask: allocation mask for replacement pages
2397 *
2398 * For each frag in the SKB which needs a destructor (i.e. has an
2399 * owner) create a copy of that frag and release the original
2400 * page by calling the destructor.
2401 */
2402 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2403 {
2404 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2405 return 0;
2406 return skb_copy_ubufs(skb, gfp_mask);
2407 }
2408
2409 /**
2410 * __skb_queue_purge - empty a list
2411 * @list: list to empty
2412 *
2413 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2414 * the list and one reference dropped. This function does not take the
2415 * list lock and the caller must hold the relevant locks to use it.
2416 */
2417 void skb_queue_purge(struct sk_buff_head *list);
2418 static inline void __skb_queue_purge(struct sk_buff_head *list)
2419 {
2420 struct sk_buff *skb;
2421 while ((skb = __skb_dequeue(list)) != NULL)
2422 kfree_skb(skb);
2423 }
2424
2425 void skb_rbtree_purge(struct rb_root *root);
2426
2427 void *netdev_alloc_frag(unsigned int fragsz);
2428
2429 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2430 gfp_t gfp_mask);
2431
2432 /**
2433 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
2434 * @dev: network device to receive on
2435 * @length: length to allocate
2436 *
2437 * Allocate a new &sk_buff and assign it a usage count of one. The
2438 * buffer has unspecified headroom built in. Users should allocate
2439 * the headroom they think they need without accounting for the
2440 * built in space. The built in space is used for optimisations.
2441 *
2442 * %NULL is returned if there is no free memory. Although this function
2443 * allocates memory it can be called from an interrupt.
2444 */
2445 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2446 unsigned int length)
2447 {
2448 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2449 }
2450
2451 /* legacy helper around __netdev_alloc_skb() */
2452 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2453 gfp_t gfp_mask)
2454 {
2455 return __netdev_alloc_skb(NULL, length, gfp_mask);
2456 }
2457
2458 /* legacy helper around netdev_alloc_skb() */
2459 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2460 {
2461 return netdev_alloc_skb(NULL, length);
2462 }
2463
2464
2465 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2466 unsigned int length, gfp_t gfp)
2467 {
2468 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2469
2470 if (NET_IP_ALIGN && skb)
2471 skb_reserve(skb, NET_IP_ALIGN);
2472 return skb;
2473 }
2474
2475 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2476 unsigned int length)
2477 {
2478 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2479 }
2480
2481 static inline void skb_free_frag(void *addr)
2482 {
2483 __free_page_frag(addr);
2484 }
2485
2486 void *napi_alloc_frag(unsigned int fragsz);
2487 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2488 unsigned int length, gfp_t gfp_mask);
2489 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2490 unsigned int length)
2491 {
2492 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2493 }
2494 void napi_consume_skb(struct sk_buff *skb, int budget);
2495
2496 void __kfree_skb_flush(void);
2497 void __kfree_skb_defer(struct sk_buff *skb);
2498
2499 /**
2500 * __dev_alloc_pages - allocate page for network Rx
2501 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2502 * @order: size of the allocation
2503 *
2504 * Allocate a new page.
2505 *
2506 * %NULL is returned if there is no free memory.
2507 */
2508 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2509 unsigned int order)
2510 {
2511 /* This piece of code contains several assumptions.
2512 * 1. This is for device Rx, therefor a cold page is preferred.
2513 * 2. The expectation is the user wants a compound page.
2514 * 3. If requesting a order 0 page it will not be compound
2515 * due to the check to see if order has a value in prep_new_page
2516 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2517 * code in gfp_to_alloc_flags that should be enforcing this.
2518 */
2519 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2520
2521 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2522 }
2523
2524 static inline struct page *dev_alloc_pages(unsigned int order)
2525 {
2526 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2527 }
2528
2529 /**
2530 * __dev_alloc_page - allocate a page for network Rx
2531 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2532 *
2533 * Allocate a new page.
2534 *
2535 * %NULL is returned if there is no free memory.
2536 */
2537 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2538 {
2539 return __dev_alloc_pages(gfp_mask, 0);
2540 }
2541
2542 static inline struct page *dev_alloc_page(void)
2543 {
2544 return dev_alloc_pages(0);
2545 }
2546
2547 /**
2548 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2549 * @page: The page that was allocated from skb_alloc_page
2550 * @skb: The skb that may need pfmemalloc set
2551 */
2552 static inline void skb_propagate_pfmemalloc(struct page *page,
2553 struct sk_buff *skb)
2554 {
2555 if (page_is_pfmemalloc(page))
2556 skb->pfmemalloc = true;
2557 }
2558
2559 /**
2560 * skb_frag_page - retrieve the page referred to by a paged fragment
2561 * @frag: the paged fragment
2562 *
2563 * Returns the &struct page associated with @frag.
2564 */
2565 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2566 {
2567 return frag->page.p;
2568 }
2569
2570 /**
2571 * __skb_frag_ref - take an addition reference on a paged fragment.
2572 * @frag: the paged fragment
2573 *
2574 * Takes an additional reference on the paged fragment @frag.
2575 */
2576 static inline void __skb_frag_ref(skb_frag_t *frag)
2577 {
2578 get_page(skb_frag_page(frag));
2579 }
2580
2581 /**
2582 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2583 * @skb: the buffer
2584 * @f: the fragment offset.
2585 *
2586 * Takes an additional reference on the @f'th paged fragment of @skb.
2587 */
2588 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2589 {
2590 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2591 }
2592
2593 /**
2594 * __skb_frag_unref - release a reference on a paged fragment.
2595 * @frag: the paged fragment
2596 *
2597 * Releases a reference on the paged fragment @frag.
2598 */
2599 static inline void __skb_frag_unref(skb_frag_t *frag)
2600 {
2601 put_page(skb_frag_page(frag));
2602 }
2603
2604 /**
2605 * skb_frag_unref - release a reference on a paged fragment of an skb.
2606 * @skb: the buffer
2607 * @f: the fragment offset
2608 *
2609 * Releases a reference on the @f'th paged fragment of @skb.
2610 */
2611 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2612 {
2613 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2614 }
2615
2616 /**
2617 * skb_frag_address - gets the address of the data contained in a paged fragment
2618 * @frag: the paged fragment buffer
2619 *
2620 * Returns the address of the data within @frag. The page must already
2621 * be mapped.
2622 */
2623 static inline void *skb_frag_address(const skb_frag_t *frag)
2624 {
2625 return page_address(skb_frag_page(frag)) + frag->page_offset;
2626 }
2627
2628 /**
2629 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2630 * @frag: the paged fragment buffer
2631 *
2632 * Returns the address of the data within @frag. Checks that the page
2633 * is mapped and returns %NULL otherwise.
2634 */
2635 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2636 {
2637 void *ptr = page_address(skb_frag_page(frag));
2638 if (unlikely(!ptr))
2639 return NULL;
2640
2641 return ptr + frag->page_offset;
2642 }
2643
2644 /**
2645 * __skb_frag_set_page - sets the page contained in a paged fragment
2646 * @frag: the paged fragment
2647 * @page: the page to set
2648 *
2649 * Sets the fragment @frag to contain @page.
2650 */
2651 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2652 {
2653 frag->page.p = page;
2654 }
2655
2656 /**
2657 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2658 * @skb: the buffer
2659 * @f: the fragment offset
2660 * @page: the page to set
2661 *
2662 * Sets the @f'th fragment of @skb to contain @page.
2663 */
2664 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2665 struct page *page)
2666 {
2667 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2668 }
2669
2670 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2671
2672 /**
2673 * skb_frag_dma_map - maps a paged fragment via the DMA API
2674 * @dev: the device to map the fragment to
2675 * @frag: the paged fragment to map
2676 * @offset: the offset within the fragment (starting at the
2677 * fragment's own offset)
2678 * @size: the number of bytes to map
2679 * @dir: the direction of the mapping (%PCI_DMA_*)
2680 *
2681 * Maps the page associated with @frag to @device.
2682 */
2683 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2684 const skb_frag_t *frag,
2685 size_t offset, size_t size,
2686 enum dma_data_direction dir)
2687 {
2688 return dma_map_page(dev, skb_frag_page(frag),
2689 frag->page_offset + offset, size, dir);
2690 }
2691
2692 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2693 gfp_t gfp_mask)
2694 {
2695 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2696 }
2697
2698
2699 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2700 gfp_t gfp_mask)
2701 {
2702 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2703 }
2704
2705
2706 /**
2707 * skb_clone_writable - is the header of a clone writable
2708 * @skb: buffer to check
2709 * @len: length up to which to write
2710 *
2711 * Returns true if modifying the header part of the cloned buffer
2712 * does not requires the data to be copied.
2713 */
2714 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2715 {
2716 return !skb_header_cloned(skb) &&
2717 skb_headroom(skb) + len <= skb->hdr_len;
2718 }
2719
2720 static inline int skb_try_make_writable(struct sk_buff *skb,
2721 unsigned int write_len)
2722 {
2723 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2724 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2725 }
2726
2727 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2728 int cloned)
2729 {
2730 int delta = 0;
2731
2732 if (headroom > skb_headroom(skb))
2733 delta = headroom - skb_headroom(skb);
2734
2735 if (delta || cloned)
2736 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2737 GFP_ATOMIC);
2738 return 0;
2739 }
2740
2741 /**
2742 * skb_cow - copy header of skb when it is required
2743 * @skb: buffer to cow
2744 * @headroom: needed headroom
2745 *
2746 * If the skb passed lacks sufficient headroom or its data part
2747 * is shared, data is reallocated. If reallocation fails, an error
2748 * is returned and original skb is not changed.
2749 *
2750 * The result is skb with writable area skb->head...skb->tail
2751 * and at least @headroom of space at head.
2752 */
2753 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2754 {
2755 return __skb_cow(skb, headroom, skb_cloned(skb));
2756 }
2757
2758 /**
2759 * skb_cow_head - skb_cow but only making the head writable
2760 * @skb: buffer to cow
2761 * @headroom: needed headroom
2762 *
2763 * This function is identical to skb_cow except that we replace the
2764 * skb_cloned check by skb_header_cloned. It should be used when
2765 * you only need to push on some header and do not need to modify
2766 * the data.
2767 */
2768 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2769 {
2770 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2771 }
2772
2773 /**
2774 * skb_padto - pad an skbuff up to a minimal size
2775 * @skb: buffer to pad
2776 * @len: minimal length
2777 *
2778 * Pads up a buffer to ensure the trailing bytes exist and are
2779 * blanked. If the buffer already contains sufficient data it
2780 * is untouched. Otherwise it is extended. Returns zero on
2781 * success. The skb is freed on error.
2782 */
2783 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2784 {
2785 unsigned int size = skb->len;
2786 if (likely(size >= len))
2787 return 0;
2788 return skb_pad(skb, len - size);
2789 }
2790
2791 /**
2792 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2793 * @skb: buffer to pad
2794 * @len: minimal length
2795 *
2796 * Pads up a buffer to ensure the trailing bytes exist and are
2797 * blanked. If the buffer already contains sufficient data it
2798 * is untouched. Otherwise it is extended. Returns zero on
2799 * success. The skb is freed on error.
2800 */
2801 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2802 {
2803 unsigned int size = skb->len;
2804
2805 if (unlikely(size < len)) {
2806 len -= size;
2807 if (skb_pad(skb, len))
2808 return -ENOMEM;
2809 __skb_put(skb, len);
2810 }
2811 return 0;
2812 }
2813
2814 static inline int skb_add_data(struct sk_buff *skb,
2815 struct iov_iter *from, int copy)
2816 {
2817 const int off = skb->len;
2818
2819 if (skb->ip_summed == CHECKSUM_NONE) {
2820 __wsum csum = 0;
2821 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2822 &csum, from)) {
2823 skb->csum = csum_block_add(skb->csum, csum, off);
2824 return 0;
2825 }
2826 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
2827 return 0;
2828
2829 __skb_trim(skb, off);
2830 return -EFAULT;
2831 }
2832
2833 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2834 const struct page *page, int off)
2835 {
2836 if (i) {
2837 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2838
2839 return page == skb_frag_page(frag) &&
2840 off == frag->page_offset + skb_frag_size(frag);
2841 }
2842 return false;
2843 }
2844
2845 static inline int __skb_linearize(struct sk_buff *skb)
2846 {
2847 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2848 }
2849
2850 /**
2851 * skb_linearize - convert paged skb to linear one
2852 * @skb: buffer to linarize
2853 *
2854 * If there is no free memory -ENOMEM is returned, otherwise zero
2855 * is returned and the old skb data released.
2856 */
2857 static inline int skb_linearize(struct sk_buff *skb)
2858 {
2859 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2860 }
2861
2862 /**
2863 * skb_has_shared_frag - can any frag be overwritten
2864 * @skb: buffer to test
2865 *
2866 * Return true if the skb has at least one frag that might be modified
2867 * by an external entity (as in vmsplice()/sendfile())
2868 */
2869 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2870 {
2871 return skb_is_nonlinear(skb) &&
2872 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2873 }
2874
2875 /**
2876 * skb_linearize_cow - make sure skb is linear and writable
2877 * @skb: buffer to process
2878 *
2879 * If there is no free memory -ENOMEM is returned, otherwise zero
2880 * is returned and the old skb data released.
2881 */
2882 static inline int skb_linearize_cow(struct sk_buff *skb)
2883 {
2884 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2885 __skb_linearize(skb) : 0;
2886 }
2887
2888 static __always_inline void
2889 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2890 unsigned int off)
2891 {
2892 if (skb->ip_summed == CHECKSUM_COMPLETE)
2893 skb->csum = csum_block_sub(skb->csum,
2894 csum_partial(start, len, 0), off);
2895 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2896 skb_checksum_start_offset(skb) < 0)
2897 skb->ip_summed = CHECKSUM_NONE;
2898 }
2899
2900 /**
2901 * skb_postpull_rcsum - update checksum for received skb after pull
2902 * @skb: buffer to update
2903 * @start: start of data before pull
2904 * @len: length of data pulled
2905 *
2906 * After doing a pull on a received packet, you need to call this to
2907 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2908 * CHECKSUM_NONE so that it can be recomputed from scratch.
2909 */
2910 static inline void skb_postpull_rcsum(struct sk_buff *skb,
2911 const void *start, unsigned int len)
2912 {
2913 __skb_postpull_rcsum(skb, start, len, 0);
2914 }
2915
2916 static __always_inline void
2917 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2918 unsigned int off)
2919 {
2920 if (skb->ip_summed == CHECKSUM_COMPLETE)
2921 skb->csum = csum_block_add(skb->csum,
2922 csum_partial(start, len, 0), off);
2923 }
2924
2925 /**
2926 * skb_postpush_rcsum - update checksum for received skb after push
2927 * @skb: buffer to update
2928 * @start: start of data after push
2929 * @len: length of data pushed
2930 *
2931 * After doing a push on a received packet, you need to call this to
2932 * update the CHECKSUM_COMPLETE checksum.
2933 */
2934 static inline void skb_postpush_rcsum(struct sk_buff *skb,
2935 const void *start, unsigned int len)
2936 {
2937 __skb_postpush_rcsum(skb, start, len, 0);
2938 }
2939
2940 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2941
2942 /**
2943 * skb_push_rcsum - push skb and update receive checksum
2944 * @skb: buffer to update
2945 * @len: length of data pulled
2946 *
2947 * This function performs an skb_push on the packet and updates
2948 * the CHECKSUM_COMPLETE checksum. It should be used on
2949 * receive path processing instead of skb_push unless you know
2950 * that the checksum difference is zero (e.g., a valid IP header)
2951 * or you are setting ip_summed to CHECKSUM_NONE.
2952 */
2953 static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2954 unsigned int len)
2955 {
2956 skb_push(skb, len);
2957 skb_postpush_rcsum(skb, skb->data, len);
2958 return skb->data;
2959 }
2960
2961 /**
2962 * pskb_trim_rcsum - trim received skb and update checksum
2963 * @skb: buffer to trim
2964 * @len: new length
2965 *
2966 * This is exactly the same as pskb_trim except that it ensures the
2967 * checksum of received packets are still valid after the operation.
2968 */
2969
2970 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2971 {
2972 if (likely(len >= skb->len))
2973 return 0;
2974 if (skb->ip_summed == CHECKSUM_COMPLETE)
2975 skb->ip_summed = CHECKSUM_NONE;
2976 return __pskb_trim(skb, len);
2977 }
2978
2979 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2980 {
2981 if (skb->ip_summed == CHECKSUM_COMPLETE)
2982 skb->ip_summed = CHECKSUM_NONE;
2983 __skb_trim(skb, len);
2984 return 0;
2985 }
2986
2987 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2988 {
2989 if (skb->ip_summed == CHECKSUM_COMPLETE)
2990 skb->ip_summed = CHECKSUM_NONE;
2991 return __skb_grow(skb, len);
2992 }
2993
2994 #define skb_queue_walk(queue, skb) \
2995 for (skb = (queue)->next; \
2996 skb != (struct sk_buff *)(queue); \
2997 skb = skb->next)
2998
2999 #define skb_queue_walk_safe(queue, skb, tmp) \
3000 for (skb = (queue)->next, tmp = skb->next; \
3001 skb != (struct sk_buff *)(queue); \
3002 skb = tmp, tmp = skb->next)
3003
3004 #define skb_queue_walk_from(queue, skb) \
3005 for (; skb != (struct sk_buff *)(queue); \
3006 skb = skb->next)
3007
3008 #define skb_queue_walk_from_safe(queue, skb, tmp) \
3009 for (tmp = skb->next; \
3010 skb != (struct sk_buff *)(queue); \
3011 skb = tmp, tmp = skb->next)
3012
3013 #define skb_queue_reverse_walk(queue, skb) \
3014 for (skb = (queue)->prev; \
3015 skb != (struct sk_buff *)(queue); \
3016 skb = skb->prev)
3017
3018 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3019 for (skb = (queue)->prev, tmp = skb->prev; \
3020 skb != (struct sk_buff *)(queue); \
3021 skb = tmp, tmp = skb->prev)
3022
3023 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3024 for (tmp = skb->prev; \
3025 skb != (struct sk_buff *)(queue); \
3026 skb = tmp, tmp = skb->prev)
3027
3028 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3029 {
3030 return skb_shinfo(skb)->frag_list != NULL;
3031 }
3032
3033 static inline void skb_frag_list_init(struct sk_buff *skb)
3034 {
3035 skb_shinfo(skb)->frag_list = NULL;
3036 }
3037
3038 #define skb_walk_frags(skb, iter) \
3039 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3040
3041
3042 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3043 const struct sk_buff *skb);
3044 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3045 void (*destructor)(struct sock *sk,
3046 struct sk_buff *skb),
3047 int *peeked, int *off, int *err,
3048 struct sk_buff **last);
3049 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3050 void (*destructor)(struct sock *sk,
3051 struct sk_buff *skb),
3052 int *peeked, int *off, int *err);
3053 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3054 int *err);
3055 unsigned int datagram_poll(struct file *file, struct socket *sock,
3056 struct poll_table_struct *wait);
3057 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3058 struct iov_iter *to, int size);
3059 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3060 struct msghdr *msg, int size)
3061 {
3062 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3063 }
3064 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3065 struct msghdr *msg);
3066 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3067 struct iov_iter *from, int len);
3068 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3069 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3070 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3071 static inline void skb_free_datagram_locked(struct sock *sk,
3072 struct sk_buff *skb)
3073 {
3074 __skb_free_datagram_locked(sk, skb, 0);
3075 }
3076 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3077 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3078 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3079 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3080 int len, __wsum csum);
3081 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3082 struct pipe_inode_info *pipe, unsigned int len,
3083 unsigned int flags);
3084 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3085 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3086 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3087 int len, int hlen);
3088 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3089 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3090 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3091 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3092 bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3093 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3094 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3095 int skb_ensure_writable(struct sk_buff *skb, int write_len);
3096 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3097 int skb_vlan_pop(struct sk_buff *skb);
3098 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3099 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3100 gfp_t gfp);
3101
3102 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3103 {
3104 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3105 }
3106
3107 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3108 {
3109 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3110 }
3111
3112 struct skb_checksum_ops {
3113 __wsum (*update)(const void *mem, int len, __wsum wsum);
3114 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3115 };
3116
3117 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3118 __wsum csum, const struct skb_checksum_ops *ops);
3119 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3120 __wsum csum);
3121
3122 static inline void * __must_check
3123 __skb_header_pointer(const struct sk_buff *skb, int offset,
3124 int len, void *data, int hlen, void *buffer)
3125 {
3126 if (hlen - offset >= len)
3127 return data + offset;
3128
3129 if (!skb ||
3130 skb_copy_bits(skb, offset, buffer, len) < 0)
3131 return NULL;
3132
3133 return buffer;
3134 }
3135
3136 static inline void * __must_check
3137 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3138 {
3139 return __skb_header_pointer(skb, offset, len, skb->data,
3140 skb_headlen(skb), buffer);
3141 }
3142
3143 /**
3144 * skb_needs_linearize - check if we need to linearize a given skb
3145 * depending on the given device features.
3146 * @skb: socket buffer to check
3147 * @features: net device features
3148 *
3149 * Returns true if either:
3150 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
3151 * 2. skb is fragmented and the device does not support SG.
3152 */
3153 static inline bool skb_needs_linearize(struct sk_buff *skb,
3154 netdev_features_t features)
3155 {
3156 return skb_is_nonlinear(skb) &&
3157 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3158 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3159 }
3160
3161 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3162 void *to,
3163 const unsigned int len)
3164 {
3165 memcpy(to, skb->data, len);
3166 }
3167
3168 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3169 const int offset, void *to,
3170 const unsigned int len)
3171 {
3172 memcpy(to, skb->data + offset, len);
3173 }
3174
3175 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3176 const void *from,
3177 const unsigned int len)
3178 {
3179 memcpy(skb->data, from, len);
3180 }
3181
3182 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3183 const int offset,
3184 const void *from,
3185 const unsigned int len)
3186 {
3187 memcpy(skb->data + offset, from, len);
3188 }
3189
3190 void skb_init(void);
3191
3192 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3193 {
3194 return skb->tstamp;
3195 }
3196
3197 /**
3198 * skb_get_timestamp - get timestamp from a skb
3199 * @skb: skb to get stamp from
3200 * @stamp: pointer to struct timeval to store stamp in
3201 *
3202 * Timestamps are stored in the skb as offsets to a base timestamp.
3203 * This function converts the offset back to a struct timeval and stores
3204 * it in stamp.
3205 */
3206 static inline void skb_get_timestamp(const struct sk_buff *skb,
3207 struct timeval *stamp)
3208 {
3209 *stamp = ktime_to_timeval(skb->tstamp);
3210 }
3211
3212 static inline void skb_get_timestampns(const struct sk_buff *skb,
3213 struct timespec *stamp)
3214 {
3215 *stamp = ktime_to_timespec(skb->tstamp);
3216 }
3217
3218 static inline void __net_timestamp(struct sk_buff *skb)
3219 {
3220 skb->tstamp = ktime_get_real();
3221 }
3222
3223 static inline ktime_t net_timedelta(ktime_t t)
3224 {
3225 return ktime_sub(ktime_get_real(), t);
3226 }
3227
3228 static inline ktime_t net_invalid_timestamp(void)
3229 {
3230 return 0;
3231 }
3232
3233 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3234
3235 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3236
3237 void skb_clone_tx_timestamp(struct sk_buff *skb);
3238 bool skb_defer_rx_timestamp(struct sk_buff *skb);
3239
3240 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
3241
3242 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3243 {
3244 }
3245
3246 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3247 {
3248 return false;
3249 }
3250
3251 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
3252
3253 /**
3254 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3255 *
3256 * PHY drivers may accept clones of transmitted packets for
3257 * timestamping via their phy_driver.txtstamp method. These drivers
3258 * must call this function to return the skb back to the stack with a
3259 * timestamp.
3260 *
3261 * @skb: clone of the the original outgoing packet
3262 * @hwtstamps: hardware time stamps
3263 *
3264 */
3265 void skb_complete_tx_timestamp(struct sk_buff *skb,
3266 struct skb_shared_hwtstamps *hwtstamps);
3267
3268 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3269 struct skb_shared_hwtstamps *hwtstamps,
3270 struct sock *sk, int tstype);
3271
3272 /**
3273 * skb_tstamp_tx - queue clone of skb with send time stamps
3274 * @orig_skb: the original outgoing packet
3275 * @hwtstamps: hardware time stamps, may be NULL if not available
3276 *
3277 * If the skb has a socket associated, then this function clones the
3278 * skb (thus sharing the actual data and optional structures), stores
3279 * the optional hardware time stamping information (if non NULL) or
3280 * generates a software time stamp (otherwise), then queues the clone
3281 * to the error queue of the socket. Errors are silently ignored.
3282 */
3283 void skb_tstamp_tx(struct sk_buff *orig_skb,
3284 struct skb_shared_hwtstamps *hwtstamps);
3285
3286 static inline void sw_tx_timestamp(struct sk_buff *skb)
3287 {
3288 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3289 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3290 skb_tstamp_tx(skb, NULL);
3291 }
3292
3293 /**
3294 * skb_tx_timestamp() - Driver hook for transmit timestamping
3295 *
3296 * Ethernet MAC Drivers should call this function in their hard_xmit()
3297 * function immediately before giving the sk_buff to the MAC hardware.
3298 *
3299 * Specifically, one should make absolutely sure that this function is
3300 * called before TX completion of this packet can trigger. Otherwise
3301 * the packet could potentially already be freed.
3302 *
3303 * @skb: A socket buffer.
3304 */
3305 static inline void skb_tx_timestamp(struct sk_buff *skb)
3306 {
3307 skb_clone_tx_timestamp(skb);
3308 sw_tx_timestamp(skb);
3309 }
3310
3311 /**
3312 * skb_complete_wifi_ack - deliver skb with wifi status
3313 *
3314 * @skb: the original outgoing packet
3315 * @acked: ack status
3316 *
3317 */
3318 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3319
3320 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3321 __sum16 __skb_checksum_complete(struct sk_buff *skb);
3322
3323 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3324 {
3325 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3326 skb->csum_valid ||
3327 (skb->ip_summed == CHECKSUM_PARTIAL &&
3328 skb_checksum_start_offset(skb) >= 0));
3329 }
3330
3331 /**
3332 * skb_checksum_complete - Calculate checksum of an entire packet
3333 * @skb: packet to process
3334 *
3335 * This function calculates the checksum over the entire packet plus
3336 * the value of skb->csum. The latter can be used to supply the
3337 * checksum of a pseudo header as used by TCP/UDP. It returns the
3338 * checksum.
3339 *
3340 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
3341 * this function can be used to verify that checksum on received
3342 * packets. In that case the function should return zero if the
3343 * checksum is correct. In particular, this function will return zero
3344 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
3345 * hardware has already verified the correctness of the checksum.
3346 */
3347 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3348 {
3349 return skb_csum_unnecessary(skb) ?
3350 0 : __skb_checksum_complete(skb);
3351 }
3352
3353 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3354 {
3355 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3356 if (skb->csum_level == 0)
3357 skb->ip_summed = CHECKSUM_NONE;
3358 else
3359 skb->csum_level--;
3360 }
3361 }
3362
3363 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3364 {
3365 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3366 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3367 skb->csum_level++;
3368 } else if (skb->ip_summed == CHECKSUM_NONE) {
3369 skb->ip_summed = CHECKSUM_UNNECESSARY;
3370 skb->csum_level = 0;
3371 }
3372 }
3373
3374 static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3375 {
3376 /* Mark current checksum as bad (typically called from GRO
3377 * path). In the case that ip_summed is CHECKSUM_NONE
3378 * this must be the first checksum encountered in the packet.
3379 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
3380 * checksum after the last one validated. For UDP, a zero
3381 * checksum can not be marked as bad.
3382 */
3383
3384 if (skb->ip_summed == CHECKSUM_NONE ||
3385 skb->ip_summed == CHECKSUM_UNNECESSARY)
3386 skb->csum_bad = 1;
3387 }
3388
3389 /* Check if we need to perform checksum complete validation.
3390 *
3391 * Returns true if checksum complete is needed, false otherwise
3392 * (either checksum is unnecessary or zero checksum is allowed).
3393 */
3394 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3395 bool zero_okay,
3396 __sum16 check)
3397 {
3398 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3399 skb->csum_valid = 1;
3400 __skb_decr_checksum_unnecessary(skb);
3401 return false;
3402 }
3403
3404 return true;
3405 }
3406
3407 /* For small packets <= CHECKSUM_BREAK peform checksum complete directly
3408 * in checksum_init.
3409 */
3410 #define CHECKSUM_BREAK 76
3411
3412 /* Unset checksum-complete
3413 *
3414 * Unset checksum complete can be done when packet is being modified
3415 * (uncompressed for instance) and checksum-complete value is
3416 * invalidated.
3417 */
3418 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3419 {
3420 if (skb->ip_summed == CHECKSUM_COMPLETE)
3421 skb->ip_summed = CHECKSUM_NONE;
3422 }
3423
3424 /* Validate (init) checksum based on checksum complete.
3425 *
3426 * Return values:
3427 * 0: checksum is validated or try to in skb_checksum_complete. In the latter
3428 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
3429 * checksum is stored in skb->csum for use in __skb_checksum_complete
3430 * non-zero: value of invalid checksum
3431 *
3432 */
3433 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3434 bool complete,
3435 __wsum psum)
3436 {
3437 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3438 if (!csum_fold(csum_add(psum, skb->csum))) {
3439 skb->csum_valid = 1;
3440 return 0;
3441 }
3442 } else if (skb->csum_bad) {
3443 /* ip_summed == CHECKSUM_NONE in this case */
3444 return (__force __sum16)1;
3445 }
3446
3447 skb->csum = psum;
3448
3449 if (complete || skb->len <= CHECKSUM_BREAK) {
3450 __sum16 csum;
3451
3452 csum = __skb_checksum_complete(skb);
3453 skb->csum_valid = !csum;
3454 return csum;
3455 }
3456
3457 return 0;
3458 }
3459
3460 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3461 {
3462 return 0;
3463 }
3464
3465 /* Perform checksum validate (init). Note that this is a macro since we only
3466 * want to calculate the pseudo header which is an input function if necessary.
3467 * First we try to validate without any computation (checksum unnecessary) and
3468 * then calculate based on checksum complete calling the function to compute
3469 * pseudo header.
3470 *
3471 * Return values:
3472 * 0: checksum is validated or try to in skb_checksum_complete
3473 * non-zero: value of invalid checksum
3474 */
3475 #define __skb_checksum_validate(skb, proto, complete, \
3476 zero_okay, check, compute_pseudo) \
3477 ({ \
3478 __sum16 __ret = 0; \
3479 skb->csum_valid = 0; \
3480 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3481 __ret = __skb_checksum_validate_complete(skb, \
3482 complete, compute_pseudo(skb, proto)); \
3483 __ret; \
3484 })
3485
3486 #define skb_checksum_init(skb, proto, compute_pseudo) \
3487 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3488
3489 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3490 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3491
3492 #define skb_checksum_validate(skb, proto, compute_pseudo) \
3493 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3494
3495 #define skb_checksum_validate_zero_check(skb, proto, check, \
3496 compute_pseudo) \
3497 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3498
3499 #define skb_checksum_simple_validate(skb) \
3500 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3501
3502 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3503 {
3504 return (skb->ip_summed == CHECKSUM_NONE &&
3505 skb->csum_valid && !skb->csum_bad);
3506 }
3507
3508 static inline void __skb_checksum_convert(struct sk_buff *skb,
3509 __sum16 check, __wsum pseudo)
3510 {
3511 skb->csum = ~pseudo;
3512 skb->ip_summed = CHECKSUM_COMPLETE;
3513 }
3514
3515 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3516 do { \
3517 if (__skb_checksum_convert_check(skb)) \
3518 __skb_checksum_convert(skb, check, \
3519 compute_pseudo(skb, proto)); \
3520 } while (0)
3521
3522 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3523 u16 start, u16 offset)
3524 {
3525 skb->ip_summed = CHECKSUM_PARTIAL;
3526 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3527 skb->csum_offset = offset - start;
3528 }
3529
3530 /* Update skbuf and packet to reflect the remote checksum offload operation.
3531 * When called, ptr indicates the starting point for skb->csum when
3532 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
3533 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
3534 */
3535 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3536 int start, int offset, bool nopartial)
3537 {
3538 __wsum delta;
3539
3540 if (!nopartial) {
3541 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3542 return;
3543 }
3544
3545 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3546 __skb_checksum_complete(skb);
3547 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3548 }
3549
3550 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3551
3552 /* Adjust skb->csum since we changed the packet */
3553 skb->csum = csum_add(skb->csum, delta);
3554 }
3555
3556 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3557 void nf_conntrack_destroy(struct nf_conntrack *nfct);
3558 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3559 {
3560 if (nfct && atomic_dec_and_test(&nfct->use))
3561 nf_conntrack_destroy(nfct);
3562 }
3563 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3564 {
3565 if (nfct)
3566 atomic_inc(&nfct->use);
3567 }
3568 #endif
3569 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3570 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3571 {
3572 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3573 kfree(nf_bridge);
3574 }
3575 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3576 {
3577 if (nf_bridge)
3578 atomic_inc(&nf_bridge->use);
3579 }
3580 #endif /* CONFIG_BRIDGE_NETFILTER */
3581 static inline void nf_reset(struct sk_buff *skb)
3582 {
3583 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3584 nf_conntrack_put(skb->nfct);
3585 skb->nfct = NULL;
3586 #endif
3587 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3588 nf_bridge_put(skb->nf_bridge);
3589 skb->nf_bridge = NULL;
3590 #endif
3591 }
3592
3593 static inline void nf_reset_trace(struct sk_buff *skb)
3594 {
3595 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3596 skb->nf_trace = 0;
3597 #endif
3598 }
3599
3600 /* Note: This doesn't put any conntrack and bridge info in dst. */
3601 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3602 bool copy)
3603 {
3604 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3605 dst->nfct = src->nfct;
3606 nf_conntrack_get(src->nfct);
3607 if (copy)
3608 dst->nfctinfo = src->nfctinfo;
3609 #endif
3610 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3611 dst->nf_bridge = src->nf_bridge;
3612 nf_bridge_get(src->nf_bridge);
3613 #endif
3614 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3615 if (copy)
3616 dst->nf_trace = src->nf_trace;
3617 #endif
3618 }
3619
3620 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3621 {
3622 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3623 nf_conntrack_put(dst->nfct);
3624 #endif
3625 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3626 nf_bridge_put(dst->nf_bridge);
3627 #endif
3628 __nf_copy(dst, src, true);
3629 }
3630
3631 #ifdef CONFIG_NETWORK_SECMARK
3632 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3633 {
3634 to->secmark = from->secmark;
3635 }
3636
3637 static inline void skb_init_secmark(struct sk_buff *skb)
3638 {
3639 skb->secmark = 0;
3640 }
3641 #else
3642 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3643 { }
3644
3645 static inline void skb_init_secmark(struct sk_buff *skb)
3646 { }
3647 #endif
3648
3649 static inline bool skb_irq_freeable(const struct sk_buff *skb)
3650 {
3651 return !skb->destructor &&
3652 #if IS_ENABLED(CONFIG_XFRM)
3653 !skb->sp &&
3654 #endif
3655 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
3656 !skb->nfct &&
3657 #endif
3658 !skb->_skb_refdst &&
3659 !skb_has_frag_list(skb);
3660 }
3661
3662 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3663 {
3664 skb->queue_mapping = queue_mapping;
3665 }
3666
3667 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3668 {
3669 return skb->queue_mapping;
3670 }
3671
3672 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3673 {
3674 to->queue_mapping = from->queue_mapping;
3675 }
3676
3677 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3678 {
3679 skb->queue_mapping = rx_queue + 1;
3680 }
3681
3682 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3683 {
3684 return skb->queue_mapping - 1;
3685 }
3686
3687 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3688 {
3689 return skb->queue_mapping != 0;
3690 }
3691
3692 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3693 {
3694 #ifdef CONFIG_XFRM
3695 return skb->sp;
3696 #else
3697 return NULL;
3698 #endif
3699 }
3700
3701 /* Keeps track of mac header offset relative to skb->head.
3702 * It is useful for TSO of Tunneling protocol. e.g. GRE.
3703 * For non-tunnel skb it points to skb_mac_header() and for
3704 * tunnel skb it points to outer mac header.
3705 * Keeps track of level of encapsulation of network headers.
3706 */
3707 struct skb_gso_cb {
3708 union {
3709 int mac_offset;
3710 int data_offset;
3711 };
3712 int encap_level;
3713 __wsum csum;
3714 __u16 csum_start;
3715 };
3716 #define SKB_SGO_CB_OFFSET 32
3717 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3718
3719 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3720 {
3721 return (skb_mac_header(inner_skb) - inner_skb->head) -
3722 SKB_GSO_CB(inner_skb)->mac_offset;
3723 }
3724
3725 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3726 {
3727 int new_headroom, headroom;
3728 int ret;
3729
3730 headroom = skb_headroom(skb);
3731 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3732 if (ret)
3733 return ret;
3734
3735 new_headroom = skb_headroom(skb);
3736 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3737 return 0;
3738 }
3739
3740 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3741 {
3742 /* Do not update partial checksums if remote checksum is enabled. */
3743 if (skb->remcsum_offload)
3744 return;
3745
3746 SKB_GSO_CB(skb)->csum = res;
3747 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3748 }
3749
3750 /* Compute the checksum for a gso segment. First compute the checksum value
3751 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
3752 * then add in skb->csum (checksum from csum_start to end of packet).
3753 * skb->csum and csum_start are then updated to reflect the checksum of the
3754 * resultant packet starting from the transport header-- the resultant checksum
3755 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
3756 * header.
3757 */
3758 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3759 {
3760 unsigned char *csum_start = skb_transport_header(skb);
3761 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3762 __wsum partial = SKB_GSO_CB(skb)->csum;
3763
3764 SKB_GSO_CB(skb)->csum = res;
3765 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3766
3767 return csum_fold(csum_partial(csum_start, plen, partial));
3768 }
3769
3770 static inline bool skb_is_gso(const struct sk_buff *skb)
3771 {
3772 return skb_shinfo(skb)->gso_size;
3773 }
3774
3775 /* Note: Should be called only if skb_is_gso(skb) is true */
3776 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3777 {
3778 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3779 }
3780
3781 static inline void skb_gso_reset(struct sk_buff *skb)
3782 {
3783 skb_shinfo(skb)->gso_size = 0;
3784 skb_shinfo(skb)->gso_segs = 0;
3785 skb_shinfo(skb)->gso_type = 0;
3786 }
3787
3788 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3789
3790 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3791 {
3792 /* LRO sets gso_size but not gso_type, whereas if GSO is really
3793 * wanted then gso_type will be set. */
3794 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3795
3796 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3797 unlikely(shinfo->gso_type == 0)) {
3798 __skb_warn_lro_forwarding(skb);
3799 return true;
3800 }
3801 return false;
3802 }
3803
3804 static inline void skb_forward_csum(struct sk_buff *skb)
3805 {
3806 /* Unfortunately we don't support this one. Any brave souls? */
3807 if (skb->ip_summed == CHECKSUM_COMPLETE)
3808 skb->ip_summed = CHECKSUM_NONE;
3809 }
3810
3811 /**
3812 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
3813 * @skb: skb to check
3814 *
3815 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
3816 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
3817 * use this helper, to document places where we make this assertion.
3818 */
3819 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3820 {
3821 #ifdef DEBUG
3822 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3823 #endif
3824 }
3825
3826 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3827
3828 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3829 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3830 unsigned int transport_len,
3831 __sum16(*skb_chkf)(struct sk_buff *skb));
3832
3833 /**
3834 * skb_head_is_locked - Determine if the skb->head is locked down
3835 * @skb: skb to check
3836 *
3837 * The head on skbs build around a head frag can be removed if they are
3838 * not cloned. This function returns true if the skb head is locked down
3839 * due to either being allocated via kmalloc, or by being a clone with
3840 * multiple references to the head.
3841 */
3842 static inline bool skb_head_is_locked(const struct sk_buff *skb)
3843 {
3844 return !skb->head_frag || skb_cloned(skb);
3845 }
3846
3847 /**
3848 * skb_gso_network_seglen - Return length of individual segments of a gso packet
3849 *
3850 * @skb: GSO skb
3851 *
3852 * skb_gso_network_seglen is used to determine the real size of the
3853 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
3854 *
3855 * The MAC/L2 header is not accounted for.
3856 */
3857 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3858 {
3859 unsigned int hdr_len = skb_transport_header(skb) -
3860 skb_network_header(skb);
3861 return hdr_len + skb_gso_transport_seglen(skb);
3862 }
3863
3864 /* Local Checksum Offload.
3865 * Compute outer checksum based on the assumption that the
3866 * inner checksum will be offloaded later.
3867 * See Documentation/networking/checksum-offloads.txt for
3868 * explanation of how this works.
3869 * Fill in outer checksum adjustment (e.g. with sum of outer
3870 * pseudo-header) before calling.
3871 * Also ensure that inner checksum is in linear data area.
3872 */
3873 static inline __wsum lco_csum(struct sk_buff *skb)
3874 {
3875 unsigned char *csum_start = skb_checksum_start(skb);
3876 unsigned char *l4_hdr = skb_transport_header(skb);
3877 __wsum partial;
3878
3879 /* Start with complement of inner checksum adjustment */
3880 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3881 skb->csum_offset));
3882
3883 /* Add in checksum of our headers (incl. outer checksum
3884 * adjustment filled in by caller) and return result.
3885 */
3886 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3887 }
3888
3889 #endif /* __KERNEL__ */
3890 #endif /* _LINUX_SKBUFF_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.10-rc1.tar.xz | drivers/net/ethernet/adaptec/starfire.ko | 331_1a | CPAchecker | Bug | Fixed | 2017-01-21 01:54:17 | L0260 |
Comment
Reported: 21 Jan 2017
[Home]