Error Trace
[Home]
Bug # 154
Show/hide error trace Error trace
{ 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 32 typedef __u16 __le16; 33 typedef __u16 __be16; 34 typedef __u32 __le32; 35 typedef __u32 __be32; 36 typedef __u64 __le64; 37 typedef __u64 __be64; 39 typedef __u16 __sum16; 40 typedef __u32 __wsum; 257 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 108 typedef __u32 uint32_t; 111 typedef __u64 uint64_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 166 typedef phys_addr_t resource_size_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 125 typedef void (*ctor_fn_t)(); 67 struct ctl_table ; 279 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 58 struct device ; 64 struct net_device ; 467 struct file_operations ; 479 struct completion ; 480 struct pt_regs ; 27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ; 556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 111 struct timespec ; 112 struct compat_timespec ; 113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 113 struct pollfd ; 113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ; 113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ; 39 struct page ; 26 struct task_struct ; 27 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_32 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_32 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_33 pgd_t; 297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_35 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 447 struct seq_file ; 483 struct thread_struct ; 485 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 341 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 26 union __anonunion___u_42 { int __val; char __c[1U]; } ; 23 typedef atomic64_t atomic_long_t; 81 struct static_key { atomic_t enabled; } ; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ; 26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ; 169 struct seq_operations ; 372 struct perf_event ; 377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ; 377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t; 378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 69 typedef int pao_T__; 74 typedef int pao_T_____0; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_76 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_91 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_92 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_92 kuid_t; 27 struct __anonstruct_kgid_t_93 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_93 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct vm_area_struct ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_94 nodemask_t; 80 struct free_area { struct list_head free_list[6U]; unsigned long nr_free; } ; 92 struct pglist_data ; 93 struct zone_padding { char x[0U]; } ; 208 struct zone_reclaim_stat { unsigned long recent_rotated[2U]; unsigned long recent_scanned[2U]; } ; 221 struct lruvec { struct list_head lists[5U]; struct zone_reclaim_stat reclaim_stat; atomic_long_t inactive_age; struct pglist_data *pgdat; } ; 247 typedef unsigned int isolate_mode_t; 255 struct per_cpu_pages { int count; int high; int batch; struct list_head lists[3U]; } ; 268 struct per_cpu_pageset { struct per_cpu_pages pcp; s8 expire; s8 stat_threshold; s8 vm_stat_diff[21U]; } ; 278 struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[26U]; } ; 284 enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ; 292 struct zone { unsigned long watermark[3U]; unsigned long nr_reserved_highatomic; long lowmem_reserve[4U]; int node; struct pglist_data *zone_pgdat; struct per_cpu_pageset *pageset; unsigned long zone_start_pfn; unsigned long managed_pages; unsigned long spanned_pages; unsigned long present_pages; const char *name; unsigned long nr_isolate_pageblock; wait_queue_head_t *wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; struct zone_padding _pad1_; struct free_area free_area[11U]; unsigned long flags; spinlock_t lock; struct zone_padding _pad2_; unsigned long percpu_drift_mark; unsigned long compact_cached_free_pfn; unsigned long compact_cached_migrate_pfn[2U]; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; struct zone_padding _pad3_; atomic_long_t vm_stat[21U]; } ; 560 struct zoneref { struct zone *zone; int zone_idx; } ; 585 struct zonelist { struct zoneref _zonerefs[4097U]; } ; 608 struct pglist_data { struct zone node_zones[4U]; struct zonelist node_zonelists[2U]; int nr_zones; unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_classzone_idx; int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; spinlock_t numabalancing_migrate_lock; unsigned long numabalancing_migrate_next_window; unsigned long numabalancing_migrate_nr_pages; unsigned long totalreserve_pages; unsigned long min_unmapped_pages; unsigned long min_slab_pages; struct zone_padding _pad1_; spinlock_t lru_lock; spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; struct lruvec lruvec; unsigned int inactive_ratio; unsigned long flags; struct zone_padding _pad2_; struct per_cpu_nodestat *per_cpu_nodestats; atomic_long_t vm_stat[26U]; } ; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 177 struct rw_semaphore ; 178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 178 struct completion { unsigned int done; wait_queue_head_t wait; } ; 446 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 835 struct nsproxy ; 836 struct ctl_table_root ; 837 struct ctl_table_header ; 838 struct ctl_dir ; 38 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 58 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ; 97 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ; 118 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ; 123 struct __anonstruct____missing_field_name_100 { struct ctl_table *ctl_table; int used; int count; int nreg; } ; 123 union __anonunion____missing_field_name_99 { struct __anonstruct____missing_field_name_100 __annonCompField21; struct callback_head rcu; } ; 123 struct ctl_table_set ; 123 struct ctl_table_header { union __anonunion____missing_field_name_99 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ; 144 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ; 150 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ; 155 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 268 struct notifier_block ; 53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ; 217 struct resource ; 64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ; 220 struct pci_dev ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 144 struct pci_bus ; 26 struct ldt_struct ; 26 struct vdso_image ; 26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ; 26 typedef struct __anonstruct_mm_context_t_165 mm_context_t; 22 struct bio_vec ; 1276 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 37 struct cred ; 19 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 110 struct xol_area ; 111 struct uprobes_state { struct xol_area *xol_area; } ; 150 struct address_space ; 151 struct mem_cgroup ; 152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ; 152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ; 152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ; 152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ; 152 struct dev_pagemap ; 152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ; 152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ; 152 struct kmem_cache ; 152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct vm_operations_struct ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 565 struct vm_fault ; 619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 199 struct dentry ; 200 struct iattr ; 201 struct super_block ; 202 struct file_system_type ; 203 struct kernfs_open_node ; 204 struct kernfs_iattrs ; 227 struct kernfs_root ; 227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 499 struct sock ; 500 struct kobject ; 501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct exception_table_entry ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 158 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct kvec { void *iov_base; size_t iov_len; } ; 27 union __anonunion____missing_field_name_244 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ; 27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_244 __annonCompField52; unsigned long nr_segs; } ; 11 typedef unsigned short __kernel_sa_family_t; 18 struct pid ; 23 typedef __kernel_sa_family_t sa_family_t; 24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ; 38 struct kiocb ; 38 struct msghdr { void *msg_name; int msg_namelen; struct iov_iter msg_iter; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; struct kiocb *msg_iocb; } ; 217 enum ldv_15580 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; 53 typedef enum ldv_15580 socket_state; 54 struct poll_table_struct ; 55 struct pipe_inode_info ; 56 struct net ; 73 struct fasync_struct ; 73 struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; struct callback_head rcu; } ; 99 struct proto_ops ; 99 struct socket { socket_state state; short type; unsigned long flags; struct socket_wq *wq; struct file *file; struct sock *sk; const struct proto_ops *ops; } ; 125 struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int); int (*getname)(struct socket *, struct sockaddr *, int *, int); unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, char *, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct socket *, int, int, char *, int *); int (*sendmsg)(struct socket *, struct msghdr *, size_t ); int (*recvmsg)(struct socket *, struct msghdr *, size_t , int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*sendpage)(struct socket *, struct page *, int, size_t , int); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 78 struct user_struct ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_254 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_254 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_256 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_257 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_258 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_259 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_262 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_261 { struct __anonstruct__addr_bnd_262 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_260 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_261 __annonCompField53; } ; 11 struct __anonstruct__sigpoll_263 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_264 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_255 { int _pad[28U]; struct __anonstruct__kill_256 _kill; struct __anonstruct__timer_257 _timer; struct __anonstruct__rt_258 _rt; struct __anonstruct__sigchld_259 _sigchld; struct __anonstruct__sigfault_260 _sigfault; struct __anonstruct__sigpoll_263 _sigpoll; struct __anonstruct__sigsys_264 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_255 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 271 struct k_sigaction { struct sigaction sa; } ; 457 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 464 struct pid_namespace ; 464 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct assoc_array_ptr ; 41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_299 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_300 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_302 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_301 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_302 __annonCompField56; } ; 128 struct __anonstruct____missing_field_name_304 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_303 { union key_payload payload; struct __anonstruct____missing_field_name_304 __annonCompField58; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_299 __annonCompField54; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_300 __annonCompField55; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_301 __annonCompField57; union __anonunion____missing_field_name_303 __annonCompField59; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 377 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 325 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 331 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ; 54 struct cgroup ; 55 struct cgroup_root ; 56 struct cgroup_subsys ; 57 struct cgroup_taskset ; 101 struct cgroup_file { struct kernfs_node *kn; } ; 90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ; 306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 546 struct __anonstruct____missing_field_name_308 { u8 is_data; u8 padding; u16 prioidx; u32 classid; } ; 546 union __anonunion____missing_field_name_307 { struct __anonstruct____missing_field_name_308 __annonCompField60; u64 val; } ; 546 struct sock_cgroup_data { union __anonunion____missing_field_name_307 __annonCompField61; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 135 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 659 struct autogroup ; 660 struct tty_struct ; 660 struct taskstats ; 660 struct tty_audit_buf ; 660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 880 struct backing_dev_info ; 881 struct reclaim_state ; 882 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 953 struct wake_q_node { struct wake_q_node *next; } ; 1185 struct io_context ; 1219 struct uts_namespace ; 1220 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1358 struct rt_rq ; 1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1459 struct sched_class ; 1459 struct files_struct ; 1459 struct compat_robust_list_head ; 1459 struct numa_group ; 1459 struct kcov ; 1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ; 63 struct exception_table_entry { int insn; int fixup; int handler; } ; 161 struct in6_addr ; 145 struct sk_buff ; 184 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_346 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_345 { struct __anonstruct____missing_field_name_346 __annonCompField65; } ; 114 struct lockref { union __anonunion____missing_field_name_345 __annonCompField66; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_348 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_347 { struct __anonstruct____missing_field_name_348 __annonCompField67; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_347 __annonCompField68; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_349 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_350 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_349 __annonCompField69; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_350 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 struct __anonstruct____missing_field_name_352 { struct radix_tree_node *parent; void *private_data; } ; 63 union __anonunion____missing_field_name_351 { struct __anonstruct____missing_field_name_352 __annonCompField70; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_351 __annonCompField71; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 34 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ; 84 struct bio_set ; 85 struct bio ; 86 struct bio_integrity_payload ; 87 struct block_device ; 18 typedef void bio_end_io_t(struct bio *); 20 union __anonunion____missing_field_name_359 { struct bio_integrity_payload *bi_integrity; } ; 20 struct bio { struct bio *bi_next; struct block_device *bi_bdev; int bi_error; unsigned int bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_359 __annonCompField72; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ; 266 struct delayed_call { void (*fn)(void *); void *arg; } ; 261 struct bdi_writeback ; 262 struct export_operations ; 264 struct kstatfs ; 265 struct swap_info_struct ; 266 struct fscrypt_info ; 267 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 261 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_360 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_360 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_361 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_361 __annonCompField73; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 541 struct writeback_control ; 542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 447 struct request_queue ; 448 struct hd_struct ; 448 struct gendisk ; 448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 563 struct posix_acl ; 589 struct inode_operations ; 589 union __anonunion____missing_field_name_366 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 589 union __anonunion____missing_field_name_367 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 589 struct file_lock_context ; 589 struct cdev ; 589 union __anonunion____missing_field_name_368 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_366 __annonCompField74; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_367 __annonCompField75; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_368 __annonCompField76; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 874 union __anonunion_f_u_369 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 874 struct file { union __anonunion_f_u_369 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 959 typedef void *fl_owner_t; 960 struct file_lock ; 961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 994 struct nlm_lockowner ; 995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct __anonstruct_afs_371 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_370 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_371 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_370 fl_u; } ; 1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1320 struct super_operations ; 1320 struct xattr_handler ; 1320 struct mtd_info ; 1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1616 struct dir_context ; 1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3193 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 61 struct device_attribute ; 61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 142 struct device_type ; 201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 207 struct of_device_id ; 207 struct acpi_device_id ; 207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 357 struct class_attribute ; 357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 708 struct irq_domain ; 708 struct dma_coherent_mem ; 708 struct cma ; 708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1327 struct scatterlist ; 89 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 143 union __anonunion___u_373 { unsigned long __val; char __c[1U]; } ; 273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ; 308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 2451 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 406 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 15 typedef u64 netdev_features_t; 70 union __anonunion_in6_u_382 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ; 70 struct in6_addr { union __anonunion_in6_u_382 in6_u; } ; 46 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ; 199 struct pipe_buf_operations ; 199 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ; 27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ; 63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ; 295 struct flowi_tunnel { __be64 tun_id; } ; 26 struct flowi_common { int flowic_oif; int flowic_iif; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; } ; 42 struct __anonstruct_ports_389 { __be16 dport; __be16 sport; } ; 42 struct __anonstruct_icmpt_390 { __u8 type; __u8 code; } ; 42 struct __anonstruct_dnports_391 { __le16 dport; __le16 sport; } ; 42 struct __anonstruct_mht_392 { __u8 type; } ; 42 union flowi_uli { struct __anonstruct_ports_389 ports; struct __anonstruct_icmpt_390 icmpt; struct __anonstruct_dnports_391 dnports; __be32 spi; __be32 gre_key; struct __anonstruct_mht_392 mht; } ; 66 struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; } ; 123 struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; } ; 141 struct flowidn { struct flowi_common __fl_common; __le16 daddr; __le16 saddr; union flowi_uli uli; } ; 161 union __anonunion_u_393 { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; struct flowidn dn; } ; 161 struct flowi { union __anonunion_u_393 u; } ; 265 struct napi_struct ; 266 struct nf_conntrack { atomic_t use; } ; 254 union __anonunion____missing_field_name_394 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ; 254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_394 __annonCompField82; } ; 278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ; 310 struct skb_frag_struct ; 310 typedef struct skb_frag_struct skb_frag_t; 311 struct __anonstruct_page_395 { struct page *p; } ; 311 struct skb_frag_struct { struct __anonstruct_page_395 page; __u32 page_offset; __u32 size; } ; 344 struct skb_shared_hwtstamps { ktime_t hwtstamp; } ; 410 struct skb_shared_info { unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; u32 tskey; __be32 ip6_frag_id; atomic_t dataref; void *destructor_arg; skb_frag_t frags[17U]; } ; 500 typedef unsigned int sk_buff_data_t; 501 struct __anonstruct____missing_field_name_397 { u32 stamp_us; u32 stamp_jiffies; } ; 501 union __anonunion____missing_field_name_396 { u64 v64; struct __anonstruct____missing_field_name_397 __annonCompField83; } ; 501 struct skb_mstamp { union __anonunion____missing_field_name_396 __annonCompField84; } ; 564 union __anonunion____missing_field_name_400 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ; 564 struct __anonstruct____missing_field_name_399 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_400 __annonCompField85; } ; 564 union __anonunion____missing_field_name_398 { struct __anonstruct____missing_field_name_399 __annonCompField86; struct rb_node rbnode; } ; 564 struct sec_path ; 564 struct __anonstruct____missing_field_name_402 { __u16 csum_start; __u16 csum_offset; } ; 564 union __anonunion____missing_field_name_401 { __wsum csum; struct __anonstruct____missing_field_name_402 __annonCompField88; } ; 564 union __anonunion____missing_field_name_403 { unsigned int napi_id; unsigned int sender_cpu; } ; 564 union __anonunion____missing_field_name_404 { __u32 secmark; __u32 offload_fwd_mark; } ; 564 union __anonunion____missing_field_name_405 { __u32 mark; __u32 reserved_tailroom; } ; 564 union __anonunion____missing_field_name_406 { __be16 inner_protocol; __u8 inner_ipproto; } ; 564 struct sk_buff { union __anonunion____missing_field_name_398 __annonCompField87; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_401 __annonCompField89; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_403 __annonCompField90; union __anonunion____missing_field_name_404 __annonCompField91; union __anonunion____missing_field_name_405 __annonCompField92; union __anonunion____missing_field_name_406 __annonCompField93; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ; 831 struct dst_entry ; 880 struct rtable ; 1012 enum pkt_hash_types { PKT_HASH_TYPE_NONE = 0, PKT_HASH_TYPE_L2 = 1, PKT_HASH_TYPE_L3 = 2, PKT_HASH_TYPE_L4 = 3 } ; 3815 struct iphdr { unsigned char ihl; unsigned char version; __u8 tos; __be16 tot_len; __be16 id; __be16 frag_off; __u8 ttl; __u8 protocol; __sum16 check; __be32 saddr; __be32 daddr; } ; 1402 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ; 43 struct __anonstruct_sync_serial_settings_409 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ; 43 typedef struct __anonstruct_sync_serial_settings_409 sync_serial_settings; 50 struct __anonstruct_te1_settings_410 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ; 50 typedef struct __anonstruct_te1_settings_410 te1_settings; 55 struct __anonstruct_raw_hdlc_proto_411 { unsigned short encoding; unsigned short parity; } ; 55 typedef struct __anonstruct_raw_hdlc_proto_411 raw_hdlc_proto; 65 struct __anonstruct_fr_proto_412 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ; 65 typedef struct __anonstruct_fr_proto_412 fr_proto; 69 struct __anonstruct_fr_proto_pvc_413 { unsigned int dlci; } ; 69 typedef struct __anonstruct_fr_proto_pvc_413 fr_proto_pvc; 74 struct __anonstruct_fr_proto_pvc_info_414 { unsigned int dlci; char master[16U]; } ; 74 typedef struct __anonstruct_fr_proto_pvc_info_414 fr_proto_pvc_info; 79 struct __anonstruct_cisco_proto_415 { unsigned int interval; unsigned int timeout; } ; 79 typedef struct __anonstruct_cisco_proto_415 cisco_proto; 117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ; 197 union __anonunion_ifs_ifsu_416 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ; 197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_416 ifs_ifsu; } ; 216 union __anonunion_ifr_ifrn_417 { char ifrn_name[16U]; } ; 216 union __anonunion_ifr_ifru_418 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ; 216 struct ifreq { union __anonunion_ifr_ifrn_417 ifr_ifrn; union __anonunion_ifr_ifru_418 ifr_ifru; } ; 18 typedef s32 compat_time_t; 39 typedef s32 compat_long_t; 45 typedef u32 compat_uptr_t; 46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ; 278 struct compat_robust_list { compat_uptr_t next; } ; 282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ; 131 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ; 195 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ; 239 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ; 251 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ; 273 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ; 299 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ; 328 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ; 345 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ; 444 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ; 481 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ; 509 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ; 613 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ; 645 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ; 687 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ; 720 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ; 736 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ; 756 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ; 774 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ; 790 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ; 806 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ; 823 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ; 842 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ; 892 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ; 1063 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ; 1071 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ; 1147 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ; 1515 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ; 39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; 97 struct __anonstruct_link_modes_438 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ; 97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_438 link_modes; } ; 158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ; 375 struct prot_inuse ; 376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ; 38 struct u64_stats_sync { } ; 160 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ; 61 struct icmp_mib { unsigned long mibs[28U]; } ; 67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ; 72 struct icmpv6_mib { unsigned long mibs[6U]; } ; 79 struct icmpv6_mib_device { atomic_long_t mibs[6U]; } ; 83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ; 89 struct icmpv6msg_mib_device { atomic_long_t mibs[512U]; } ; 93 struct tcp_mib { unsigned long mibs[16U]; } ; 100 struct udp_mib { unsigned long mibs[9U]; } ; 106 struct linux_mib { unsigned long mibs[117U]; } ; 112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ; 118 struct proc_dir_entry ; 118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ; 26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ; 12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ; 14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ; 187 struct ipv4_devconf ; 188 struct fib_rules_ops ; 189 struct fib_table ; 190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ; 24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ; 29 struct inet_peer_base ; 29 struct xt_table ; 29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; atomic_t rt_genid; } ; 142 struct neighbour ; 142 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ; 73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ; 40 struct ipv6_devconf ; 40 struct rt6_info ; 40 struct rt6_statistics ; 40 struct fib6_table ; 40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; } ; 89 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ; 95 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ; 14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ; 20 struct sctp_mib ; 21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ; 141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ; 79 struct nf_logger ; 80 struct nf_queue_handler ; 81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct list_head hooks[13U][8U]; } ; 21 struct ebt_table ; 22 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ; 19 struct hlist_nulls_node ; 19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ; 23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ; 32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ; 25 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ; 30 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ; 44 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 49 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ; 54 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ; 65 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ; 72 struct ip_conntrack_stat ; 72 struct nf_ct_event_notifier ; 72 struct nf_exp_event_notifier ; 72 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; } ; 104 struct nft_af_info ; 105 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ; 509 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 486 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 708 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ; 16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ; 25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ; 21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ; 30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ; 89 struct mpls_route ; 90 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ; 16 struct proc_ns_operations ; 17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ; 11 struct net_generic ; 12 struct netns_ipvs ; 13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ; 247 struct __anonstruct_possible_net_t_453 { struct net *net; } ; 247 typedef struct __anonstruct_possible_net_t_453 possible_net_t; 13 typedef unsigned long kernel_ulong_t; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 674 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ; 683 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 296 struct mii_bus ; 303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ; 41 struct mdio_driver_common { struct device_driver driver; int flags; } ; 244 struct phy_device ; 245 enum ldv_30630 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; 84 typedef enum ldv_30630 phy_interface_t; 130 enum ldv_30681 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; 137 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_30681 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ; 218 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; 233 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ; 326 struct phy_driver ; 326 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; int autoneg; int link_timeout; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; void (*adjust_link)(struct net_device *); } ; 428 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ; 841 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ; 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_LAST = 5 } ; 36 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ; 70 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ; 86 struct packet_type ; 87 struct dsa_switch ; 87 struct dsa_device_ops ; 87 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ; 140 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; } ; 146 struct dsa_switch_driver ; 146 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_driver *drv; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ; 233 struct switchdev_trans ; 234 struct switchdev_obj ; 235 struct switchdev_obj_port_fdb ; 236 struct switchdev_obj_port_vlan ; 237 struct dsa_switch_driver { struct list_head list; enum dsa_tag_protocol tag_protocol; const char * (*probe)(struct device *, struct device *, int, void **); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); } ; 389 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ; 69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ; 87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ; 132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ; 144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ; 164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ; 187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ; 202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ; 236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ; 40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ; 105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ; 58 struct mnt_namespace ; 59 struct ipc_namespace ; 60 struct cgroup_namespace ; 61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ; 86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ; 19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ; 20 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; } ; 609 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct css_set *root_cset; } ; 663 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ; 99 struct xfrm_policy ; 100 struct xfrm_state ; 116 struct request_sock ; 41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ; 143 struct nlattr { __u16 nla_len; __u16 nla_type; } ; 105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ; 183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ; 41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ; 840 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ; 16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; } ; 118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ; 96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ; 486 struct netpoll_info ; 487 struct wireless_dev ; 488 struct wpan_dev ; 489 struct mpls_dev ; 490 struct udp_tunnel_info ; 491 struct bpf_prog ; 69 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ; 112 typedef enum netdev_tx netdev_tx_t; 131 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ; 194 struct neigh_parms ; 195 struct netdev_hw_addr { struct list_head list; unsigned char addr[32U]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; } ; 215 struct netdev_hw_addr_list { struct list_head list; int count; } ; 220 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ; 249 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ; 300 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ; 337 enum gro_result { GRO_MERGED = 0, GRO_MERGED_FREE = 1, GRO_HELD = 2, GRO_NORMAL = 3, GRO_DROP = 4 } ; 345 typedef enum gro_result gro_result_t; 346 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; 394 typedef enum rx_handler_result rx_handler_result_t; 395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); 540 struct Qdisc ; 540 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ; 611 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ; 623 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ; 635 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ; 687 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ; 710 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ; 723 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ; 734 struct netdev_tc_txq { u16 count; u16 offset; } ; 745 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ; 761 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ; 788 struct tc_cls_u32_offload ; 789 struct tc_cls_flower_offload ; 789 struct tc_cls_matchall_offload ; 789 union __anonunion____missing_field_name_469 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; } ; 789 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_469 __annonCompField106; } ; 804 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ; 809 union __anonunion____missing_field_name_470 { struct bpf_prog *prog; bool prog_attached; } ; 809 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_470 __annonCompField107; } ; 832 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ; 1354 struct __anonstruct_adj_list_471 { struct list_head upper; struct list_head lower; } ; 1354 struct __anonstruct_all_adj_list_472 { struct list_head upper; struct list_head lower; } ; 1354 struct iw_handler_def ; 1354 struct iw_public_data ; 1354 struct switchdev_ops ; 1354 struct l3mdev_ops ; 1354 struct ndisc_ops ; 1354 struct vlan_info ; 1354 struct tipc_bearer ; 1354 struct in_device ; 1354 struct dn_dev ; 1354 struct inet6_dev ; 1354 struct tcf_proto ; 1354 struct cpu_rmap ; 1354 struct pcpu_lstats ; 1354 struct pcpu_sw_netstats ; 1354 struct pcpu_dstats ; 1354 struct pcpu_vstats ; 1354 union __anonunion____missing_field_name_473 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ; 1354 struct garp_port ; 1354 struct mrp_port ; 1354 struct rtnl_link_ops ; 1354 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_471 adj_list; struct __anonstruct_all_adj_list_472 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct list_head nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; u32 offload_fwd_mark; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_473 __annonCompField108; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ; 2165 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ; 2195 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ; 3206 enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ; 103 struct page_counter { atomic_long_t count; unsigned long limit; struct page_counter *parent; unsigned long watermark; unsigned long failcnt; } ; 33 struct eventfd_ctx ; 41 struct vmpressure { unsigned long scanned; unsigned long reclaimed; unsigned long tree_scanned; unsigned long tree_reclaimed; struct spinlock sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; } ; 44 struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; } ; 72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ; 32 typedef int congested_fn(void *, int); 41 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ; 60 union __anonunion____missing_field_name_478 { struct work_struct release_work; struct callback_head rcu; } ; 60 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_478 __annonCompField109; } ; 134 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ; 14 enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; 31 struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned char for_kupdate; unsigned char for_background; unsigned char tagged_writepages; unsigned char for_reclaim; unsigned char range_cyclic; unsigned char for_sync; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; } ; 101 struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; unsigned long period_time; unsigned long dirty_limit_tstamp; unsigned long dirty_limit; } ; 12 typedef void * mempool_alloc_t(gfp_t , void *); 13 typedef void mempool_free_t(void *, void *); 14 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ; 25 typedef struct mempool_s mempool_t; 79 union __anonunion____missing_field_name_479 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ; 79 union __anonunion____missing_field_name_480 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ; 79 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_479 __annonCompField110; union __anonunion____missing_field_name_480 __annonCompField111; unsigned int flags; } ; 92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ; 295 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ; 529 struct bio_list { struct bio *head; struct bio *tail; } ; 661 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ; 87 struct mem_cgroup_id { int id; atomic_t ref; } ; 104 struct mem_cgroup_stat_cpu { long count[11U]; unsigned long events[8U]; unsigned long nr_page_events; unsigned long targets[3U]; } ; 111 struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; unsigned int generation; } ; 117 struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[5U]; struct mem_cgroup_reclaim_iter iter[13U]; struct rb_node tree_node; unsigned long usage_in_excess; bool on_tree; struct mem_cgroup *memcg; } ; 133 struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; } ; 139 struct mem_cgroup_threshold_ary { int current_threshold; unsigned int size; struct mem_cgroup_threshold entries[0U]; } ; 149 struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *primary; struct mem_cgroup_threshold_ary *spare; } ; 160 enum memcg_kmem_state { KMEM_NONE = 0, KMEM_ALLOCATED = 1, KMEM_ONLINE = 2 } ; 166 struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; struct page_counter memory; struct page_counter swap; struct page_counter memsw; struct page_counter kmem; struct page_counter tcpmem; unsigned long low; unsigned long high; struct work_struct high_work; unsigned long soft_limit; struct vmpressure vmpressure; bool use_hierarchy; bool oom_lock; int under_oom; int swappiness; int oom_kill_disable; struct cgroup_file events_file; struct mutex thresholds_lock; struct mem_cgroup_thresholds thresholds; struct mem_cgroup_thresholds memsw_thresholds; struct list_head oom_notify; unsigned long move_charge_at_immigrate; atomic_t moving_account; spinlock_t move_lock; struct task_struct *move_lock_task; unsigned long move_lock_flags; struct mem_cgroup_stat_cpu *stat; unsigned long socket_pressure; bool tcpmem_active; int tcpmem_pressure; int kmemcg_id; enum memcg_kmem_state kmem_state; int last_scanned_node; nodemask_t scan_nodes; atomic_t numainfo_events; atomic_t numainfo_updating; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct list_head event_list; spinlock_t event_list_lock; struct mem_cgroup_per_node *nodeinfo[0U]; } ; 27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ; 41 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ; 51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ; 519 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ; 122 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ; 13 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ; 87 struct nla_policy { u16 type; u16 len; } ; 25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ; 158 struct Qdisc_ops ; 159 struct qdisc_walker ; 160 struct tcf_walker ; 30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ; 38 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ; 126 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ; 158 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ; 183 struct tcf_result { unsigned long class; u32 classid; } ; 189 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ; 214 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ; 806 struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; } ; 49 struct bpf_insn { __u8 code; unsigned char dst_reg; unsigned char src_reg; __s16 off; __s32 imm; } ; 88 enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6 } ; 472 struct bpf_prog_aux ; 323 struct sock_fprog_kern { u16 len; struct sock_filter *filter; } ; 334 union __anonunion____missing_field_name_504 { struct sock_filter insns[0U]; struct bpf_insn insnsi[0U]; } ; 334 struct bpf_prog { u16 pages; unsigned char jited; unsigned char gpl_compatible; unsigned char cb_access; unsigned char dst_needed; u32 len; enum bpf_prog_type type; struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *); union __anonunion____missing_field_name_504 __annonCompField118; } ; 355 struct sk_filter { atomic_t refcnt; struct callback_head rcu; struct bpf_prog *prog; } ; 138 struct pollfd { int fd; short events; short revents; } ; 32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ; 187 struct neigh_table ; 187 struct neigh_parms { possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; atomic_t refcnt; struct callback_head callback_head; int reachable_time; int data[13U]; unsigned long data_state[1U]; } ; 110 struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; unsigned long table_fulls; } ; 130 struct neigh_ops ; 130 struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; seqlock_t ha_lock; unsigned char ha[32U]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct callback_head rcu; struct net_device *dev; u8 primary_key[0U]; } ; 159 struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); } ; 167 struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; u8 flags; u8 key[0U]; } ; 175 struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4U]; struct callback_head rcu; } ; 188 struct neigh_table { int family; int entry_size; int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; } ; 520 struct lwtunnel_state ; 520 struct dn_route ; 520 union __anonunion____missing_field_name_520 { struct dst_entry *next; struct rtable *rt_next; struct rt6_info *rt6_next; struct dn_route *dn_next; } ; 520 struct dst_entry { struct callback_head callback_head; struct dst_entry *child; struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct dst_entry *path; struct dst_entry *from; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); unsigned short flags; unsigned short pending_confirm; short error; short obsolete; unsigned short header_len; unsigned short trailer_len; __u32 tclassid; long __pad_to_align_refcnt[2U]; atomic_t __refcnt; int __use; unsigned long lastuse; struct lwtunnel_state *lwtstate; union __anonunion____missing_field_name_520 __annonCompField119; } ; 110 struct __anonstruct_socket_lock_t_521 { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } ; 110 typedef struct __anonstruct_socket_lock_t_521 socket_lock_t; 110 struct proto ; 116 typedef __u32 __portpair; 117 typedef __u64 __addrpair; 118 struct __anonstruct____missing_field_name_523 { __be32 skc_daddr; __be32 skc_rcv_saddr; } ; 118 union __anonunion____missing_field_name_522 { __addrpair skc_addrpair; struct __anonstruct____missing_field_name_523 __annonCompField120; } ; 118 union __anonunion____missing_field_name_524 { unsigned int skc_hash; __u16 skc_u16hashes[2U]; } ; 118 struct __anonstruct____missing_field_name_526 { __be16 skc_dport; __u16 skc_num; } ; 118 union __anonunion____missing_field_name_525 { __portpair skc_portpair; struct __anonstruct____missing_field_name_526 __annonCompField123; } ; 118 union __anonunion____missing_field_name_527 { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; } ; 118 struct inet_timewait_death_row ; 118 union __anonunion____missing_field_name_528 { unsigned long skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; } ; 118 union __anonunion____missing_field_name_529 { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; } ; 118 union __anonunion____missing_field_name_530 { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; } ; 118 union __anonunion____missing_field_name_531 { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; } ; 118 struct sock_common { union __anonunion____missing_field_name_522 __annonCompField121; union __anonunion____missing_field_name_524 __annonCompField122; union __anonunion____missing_field_name_525 __annonCompField124; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; unsigned char skc_reuseport; unsigned char skc_ipv6only; unsigned char skc_net_refcnt; int skc_bound_dev_if; union __anonunion____missing_field_name_527 __annonCompField125; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union __anonunion____missing_field_name_528 __annonCompField126; int skc_dontcopy_begin[0U]; union __anonunion____missing_field_name_529 __annonCompField127; int skc_tx_queue_mapping; union __anonunion____missing_field_name_530 __annonCompField128; atomic_t skc_refcnt; int skc_dontcopy_end[0U]; union __anonunion____missing_field_name_531 __annonCompField129; } ; 230 struct __anonstruct_sk_backlog_532 { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } ; 230 union __anonunion____missing_field_name_533 { struct socket_wq *sk_wq; struct socket_wq *sk_wq_raw; } ; 230 struct sock_reuseport ; 230 struct sock { struct sock_common __sk_common; socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; struct __anonstruct_sk_backlog_532 sk_backlog; int sk_forward_alloc; __u32 sk_txhash; unsigned int sk_napi_id; unsigned int sk_ll_usec; atomic_t sk_drops; int sk_rcvbuf; struct sk_filter *sk_filter; union __anonunion____missing_field_name_533 __annonCompField130; struct xfrm_policy *sk_policy[2U]; struct dst_entry *sk_rx_dst; struct dst_entry *sk_dst_cache; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; unsigned char sk_padding; unsigned char sk_no_check_tx; unsigned char sk_no_check_rx; unsigned char sk_userlocks; unsigned char sk_protocol; unsigned short sk_type; int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; __u32 sk_priority; __u32 sk_mark; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; struct page_frag sk_frag; struct sk_buff *sk_send_head; __s32 sk_peek_off; int sk_write_pending; void *sk_security; struct sock_cgroup_data sk_cgrp_data; struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport *sk_reuseport_cb; struct callback_head sk_rcu; } ; 948 struct request_sock_ops ; 949 struct timewait_sock_ops ; 950 struct inet_hashinfo ; 951 struct raw_hashinfo ; 965 struct udp_table ; 965 union __anonunion_h_544 { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; } ; 965 struct proto { void (*close)(struct sock *, long); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *); int (*ioctl)(struct sock *, int, unsigned long); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, char *, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct sock *, int, int, char *, int *); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct sock *, struct msghdr *, size_t ); int (*recvmsg)(struct sock *, struct msghdr *, size_t , int, int, int *); int (*sendpage)(struct sock *, struct page *, int, size_t , int); int (*bind)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); void (*clear_sk)(struct sock *, int); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *); void (*enter_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; struct percpu_counter *sockets_allocated; int *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; int slab_flags; struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union __anonunion_h_544 h; struct module *owner; char name[32U]; struct list_head node; int (*diag_destroy)(struct sock *, int); } ; 2266 struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); } ; 46 struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; unsigned char cookie_ts; unsigned char num_timeout; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 *saved_syn; u32 secid; u32 peer_secid; } ; 18 struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; } ; 68 struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u32 target; __be64 tun_id; struct fib_rule *ctarget; struct net *fr_net; atomic_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16U]; char oifname[16U]; struct callback_head rcu; } ; 35 struct fib_lookup_arg { void *lookup_ptr; void *result; struct fib_rule *rule; u32 table; int flags; } ; 43 struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; const struct nla_policy *policy; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; } ; 140 struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *); struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16 ); struct rtable * (*l3mdev_get_rtable)(const struct net_device *, const struct flowi4 *); int (*l3mdev_get_saddr)(struct net_device *, struct flowi4 *); struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *, struct flowi6 *); int (*l3mdev_get_saddr6)(struct net_device *, const struct sock *, struct flowi6 *); } ; 106 struct ipv6hdr { unsigned char priority; unsigned char version; __u8 flow_lbl[3U]; __be16 payload_len; __u8 nexthdr; __u8 hop_limit; struct in6_addr saddr; struct in6_addr daddr; } ; 180 struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } ; 64 struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_max_plen; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 use_optimistic; __s32 mc_forwarding; __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; struct ctl_table_header *sysctl_header; } ; 328 struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); } ; 39 struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_max_tw_buckets; } ; 142 struct tcphdr { __be16 source; __be16 dest; __be32 seq; __be32 ack_seq; unsigned char res1; unsigned char doff; unsigned char fin; unsigned char syn; unsigned char rst; unsigned char psh; unsigned char ack; unsigned char urg; unsigned char ece; unsigned char cwr; __be16 window; __sum16 check; __be16 urg_ptr; } ; 100 struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2U]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; } ; 109 struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2U]; struct timer_list mca_timer; unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; unsigned long mca_cstamp; unsigned long mca_tstamp; } ; 141 struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; struct rt6_info *aca_rt; struct ifacaddr6 *aca_next; int aca_users; atomic_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; } ; 152 struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; } ; 163 struct inet6_dev { struct net_device *dev; struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; spinlock_t mc_lock; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_qi; unsigned long mc_qri; unsigned long mc_maxdelay; struct timer_list mc_gq_timer; struct timer_list mc_ifc_timer; struct timer_list mc_dad_timer; struct ifacaddr6 *ac_list; rwlock_t lock; atomic_t refcnt; __u32 if_flags; int dead; u8 rndid[8U]; struct timer_list regen_timer; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __u8 rs_probes; __u8 addr_gen_mode; unsigned long tstamp; struct callback_head rcu; } ; 127 struct arphdr { __be16 ar_hrd; __be16 ar_pro; unsigned char ar_hln; unsigned char ar_pln; __be16 ar_op; } ; 47 struct prefix_info ; 98 struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; } ; 103 struct ndisc_options { struct nd_opt_hdr *nd_opt_array[6U]; struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; struct nd_opt_hdr *nd_802154_opt_array[3U]; } ; 134 struct ndisc_ops { int (*is_useropt)(u8 ); int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); void (*update)(const struct net_device *, struct neighbour *, u32 , u8 , const struct ndisc_options *); int (*opt_addr_space)(const struct net_device *, u8 , struct neighbour *, u8 *, u8 **); void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8 , const u8 *); void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32 , bool , bool , __u32 , u32 , bool ); } ; 1047 struct ipv4_addr_key { __be32 addr; int vif; } ; 23 union __anonunion____missing_field_name_583 { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[4U]; } ; 23 struct inetpeer_addr { union __anonunion____missing_field_name_583 __annonCompField133; __u16 family; } ; 34 union __anonunion____missing_field_name_584 { struct list_head gc_list; struct callback_head gc_rcu; } ; 34 struct __anonstruct____missing_field_name_586 { atomic_t rid; } ; 34 union __anonunion____missing_field_name_585 { struct __anonstruct____missing_field_name_586 __annonCompField135; struct callback_head rcu; struct inet_peer *gc_next; } ; 34 struct inet_peer { struct inet_peer *avl_left; struct inet_peer *avl_right; struct inetpeer_addr daddr; __u32 avl_height; u32 metrics[16U]; u32 rate_tokens; unsigned long rate_last; union __anonunion____missing_field_name_584 __annonCompField134; union __anonunion____missing_field_name_585 __annonCompField136; __u32 dtime; atomic_t refcnt; } ; 65 struct inet_peer_base { struct inet_peer *root; seqlock_t lock; int total; } ; 174 struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct callback_head rcu; unsigned long *tb_data; unsigned long __data[0U]; } ; 48 struct uncached_list ; 49 struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; __be32 rt_gateway; u32 rt_pmtu; u32 rt_table_id; struct list_head rt_uncached; struct uncached_list *rt_uncached_list; } ; 213 struct in_ifaddr ; 70 struct hotplug_slot ; 70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ; 108 typedef int pci_power_t; 135 typedef unsigned int pci_channel_state_t; 136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; 161 typedef unsigned short pci_dev_flags_t; 188 typedef unsigned short pci_bus_flags_t; 245 struct pcie_link_state ; 246 struct pci_vpd ; 247 struct pci_sriov ; 249 struct pci_driver ; 249 union __anonunion____missing_field_name_594 { struct pci_sriov *sriov; struct pci_dev *physfn; } ; 249 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct cpumask *irq_affinity; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_594 __annonCompField138; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ; 452 struct pci_ops ; 452 struct msi_controller ; 452 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ; 576 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ; 606 struct pci_dynids { spinlock_t lock; struct list_head list; } ; 620 typedef unsigned int pci_ers_result_t; 630 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*link_reset)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ; 663 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ; 41 struct msix_entry { u32 vector; u16 entry; } ; 63 struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; } ; 646 struct ipv4_devconf { void *sysctl; int data[31U]; unsigned long state[1U]; } ; 20 struct ip_mc_list ; 20 struct in_device { struct net_device *dev; atomic_t refcnt; int dead; struct in_ifaddr *ifa_list; struct ip_mc_list *mc_list; struct ip_mc_list **mc_hash; int mc_count; spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned char mr_qrv; unsigned char mr_gq_running; unsigned char mr_ifc_count; struct timer_list mr_gq_timer; struct timer_list mr_ifc_timer; struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct callback_head callback_head; } ; 71 struct in_ifaddr { struct hlist_node hash; struct in_ifaddr *ifa_next; struct in_device *ifa_dev; struct callback_head callback_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; __u32 ifa_flags; char ifa_label[16U]; __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; unsigned long ifa_tstamp; } ; 205 union __anonunion___u_601 { struct in_device *__val; char __c[1U]; } ; 259 struct UPT1_TxStats { u64 TSOPktsTxOK; u64 TSOBytesTxOK; u64 ucastPktsTxOK; u64 ucastBytesTxOK; u64 mcastPktsTxOK; u64 mcastBytesTxOK; u64 bcastPktsTxOK; u64 bcastBytesTxOK; u64 pktsTxError; u64 pktsTxDiscard; } ; 42 struct UPT1_RxStats { u64 LROPktsRxOK; u64 LROBytesRxOK; u64 ucastPktsRxOK; u64 ucastBytesRxOK; u64 mcastPktsRxOK; u64 mcastBytesRxOK; u64 bcastPktsRxOK; u64 bcastBytesRxOK; u64 pktsRxOutOfBuf; u64 pktsRxError; } ; 75 struct UPT1_RSSConf { u16 hashType; u16 hashFunc; u16 hashKeySize; u16 indTableSize; u8 hashKey[40U]; u8 indTable[128U]; } ; 147 struct Vmxnet3_TxDesc { __le64 addr; unsigned short len; unsigned char gen; unsigned char rsvd; unsigned char dtype; unsigned char ext1; unsigned short msscof; unsigned short hlen; unsigned char om; unsigned char eop; unsigned char cq; unsigned char ext2; unsigned char ti; unsigned short tci; } ; 155 struct Vmxnet3_TxDataDesc { u8 data[128U]; } ; 180 typedef u8 Vmxnet3_RxDataDesc; 181 struct Vmxnet3_TxCompDesc { unsigned short txdIdx; unsigned int ext1; __le32 ext2; __le32 ext3; unsigned int rsvd; unsigned char type; unsigned char gen; } ; 199 struct Vmxnet3_RxDesc { __le64 addr; unsigned short len; unsigned char btype; unsigned char dtype; unsigned short rsvd; unsigned char gen; u32 ext1; } ; 218 struct Vmxnet3_RxCompDesc { unsigned short rxdIdx; unsigned char ext1; unsigned char eop; unsigned char sop; unsigned short rqID; unsigned char rssType; unsigned char cnc; unsigned char ext2; __le32 rssHash; unsigned short len; unsigned char err; unsigned char ts; unsigned short tci; unsigned short csum; unsigned char tuc; unsigned char udp; unsigned char tcp; unsigned char ipc; unsigned char v6; unsigned char v4; unsigned char frg; unsigned char fcs; unsigned char type; unsigned char gen; } ; 288 struct Vmxnet3_RxCompDescExt { __le32 dword1; u8 segCnt; u8 dupAckCnt; __le16 tsDelta; __le32 dword2; unsigned short mss; unsigned char tuc; unsigned char udp; unsigned char tcp; unsigned char ipc; unsigned char v6; unsigned char v4; unsigned char frg; unsigned char fcs; unsigned char type; unsigned char gen; } ; 329 union Vmxnet3_GenericDesc { __le64 qword[2U]; __le32 dword[4U]; __le16 word[8U]; struct Vmxnet3_TxDesc txd; struct Vmxnet3_RxDesc rxd; struct Vmxnet3_TxCompDesc tcd; struct Vmxnet3_RxCompDesc rcd; struct Vmxnet3_RxCompDescExt rcdExt; } ; 376 struct Vmxnet3_GOSInfo { unsigned char gosBits; unsigned char gosType; unsigned short gosVer; unsigned short gosMisc; } ; 445 struct Vmxnet3_DriverInfo { __le32 version; struct Vmxnet3_GOSInfo gos; __le32 vmxnet3RevSpt; __le32 uptVerSpt; } ; 453 struct Vmxnet3_MiscConf { struct Vmxnet3_DriverInfo driverInfo; __le64 uptFeatures; __le64 ddPA; __le64 queueDescPA; __le32 ddLen; __le32 queueDescLen; __le32 mtu; __le16 maxNumRxSG; u8 numTxQueues; u8 numRxQueues; __le32 reserved[4U]; } ; 479 struct Vmxnet3_TxQueueConf { __le64 txRingBasePA; __le64 dataRingBasePA; __le64 compRingBasePA; __le64 ddPA; __le64 reserved; __le32 txRingSize; __le32 dataRingSize; __le32 compRingSize; __le32 ddLen; u8 intrIdx; u8 _pad1[1U]; __le16 txDataRingDescSize; u8 _pad2[4U]; } ; 496 struct Vmxnet3_RxQueueConf { __le64 rxRingBasePA[2U]; __le64 compRingBasePA; __le64 ddPA; __le64 rxDataRingBasePA; __le32 rxRingSize[2U]; __le32 compRingSize; __le32 ddLen; u8 intrIdx; u8 _pad1[1U]; __le16 rxDataRingDescSize; u8 _pad2[4U]; } ; 511 enum vmxnet3_intr_mask_mode { VMXNET3_IMM_AUTO = 0, VMXNET3_IMM_ACTIVE = 1, VMXNET3_IMM_LAZY = 2 } ; 517 enum vmxnet3_intr_type { VMXNET3_IT_AUTO = 0, VMXNET3_IT_INTX = 1, VMXNET3_IT_MSI = 2, VMXNET3_IT_MSIX = 3 } ; 524 struct Vmxnet3_IntrConf { bool autoMask; u8 numIntrs; u8 eventIntrIdx; u8 modLevels[25U]; __le32 intrCtrl; __le32 reserved[2U]; } ; 544 struct Vmxnet3_QueueStatus { bool stopped; u8 _pad[3U]; __le32 error; } ; 554 struct Vmxnet3_TxQueueCtrl { __le32 txNumDeferred; __le32 txThreshold; __le64 reserved; } ; 561 struct Vmxnet3_RxQueueCtrl { bool updateRxProd; u8 _pad[7U]; __le64 reserved; } ; 576 struct Vmxnet3_RxFilterConf { __le32 rxMode; __le16 mfTableLen; __le16 _pad1; __le64 mfTablePA; __le32 vfTable[128U]; } ; 584 struct Vmxnet3_PM_PktFilter { u8 maskSize; u8 patternSize; u8 mask[16U]; u8 pattern[128U]; u8 pad[6U]; } ; 602 struct Vmxnet3_PMConf { __le16 wakeUpEvents; u8 numFilters; u8 pad[5U]; struct Vmxnet3_PM_PktFilter filters[6U]; } ; 610 struct Vmxnet3_VariableLenConfDesc { __le32 confVer; __le32 confLen; __le64 confPA; } ; 617 struct Vmxnet3_TxQueueDesc { struct Vmxnet3_TxQueueCtrl ctrl; struct Vmxnet3_TxQueueConf conf; struct Vmxnet3_QueueStatus status; struct UPT1_TxStats stats; u8 _pad[88U]; } ; 628 struct Vmxnet3_RxQueueDesc { struct Vmxnet3_RxQueueCtrl ctrl; struct Vmxnet3_RxQueueConf conf; struct Vmxnet3_QueueStatus status; struct UPT1_RxStats stats; u8 __pad[88U]; } ; 638 struct Vmxnet3_SetPolling { u8 enablePolling; } ; 642 enum Vmxnet3_CoalesceMode { VMXNET3_COALESCE_DISABLED = 0, VMXNET3_COALESCE_ADAPT = 1, VMXNET3_COALESCE_STATIC = 2, VMXNET3_COALESCE_RBC = 3 } ; 649 struct Vmxnet3_CoalesceRbc { u32 rbc_rate; } ; 657 struct Vmxnet3_CoalesceStatic { u32 tx_depth; u32 tx_comp_depth; u32 rx_depth; } ; 663 union __anonunion_coalPara_602 { struct Vmxnet3_CoalesceRbc coalRbc; struct Vmxnet3_CoalesceStatic coalStatic; } ; 663 struct Vmxnet3_CoalesceScheme { enum Vmxnet3_CoalesceMode coalMode; union __anonunion_coalPara_602 coalPara; } ; 687 union Vmxnet3_CmdInfo { struct Vmxnet3_VariableLenConfDesc varConf; struct Vmxnet3_SetPolling setPolling; __le64 data[2U]; } ; 696 struct Vmxnet3_DSDevRead { struct Vmxnet3_MiscConf misc; struct Vmxnet3_IntrConf intrConf; struct Vmxnet3_RxFilterConf rxFilterConf; struct Vmxnet3_VariableLenConfDesc rssConfDesc; struct Vmxnet3_VariableLenConfDesc pmConfDesc; struct Vmxnet3_VariableLenConfDesc pluginConfDesc; } ; 706 union __anonunion_cu_603 { __le32 reserved1[4U]; union Vmxnet3_CmdInfo cmdInfo; } ; 706 struct Vmxnet3_DriverShared { __le32 magic; __le32 pad; struct Vmxnet3_DSDevRead devRead; __le32 ecr; __le32 reserved; union __anonunion_cu_603 cu; } ; 749 struct vmxnet3_cmd_ring { union Vmxnet3_GenericDesc *base; u32 size; u32 next2fill; u32 next2comp; u8 gen; dma_addr_t basePA; } ; 159 struct vmxnet3_comp_ring { union Vmxnet3_GenericDesc *base; u32 size; u32 next2proc; u8 gen; u8 intr_idx; dma_addr_t basePA; } ; 178 struct vmxnet3_tx_data_ring { struct Vmxnet3_TxDataDesc *base; u32 size; dma_addr_t basePA; } ; 191 struct vmxnet3_tx_buf_info { u32 map_type; u16 len; u16 sop_idx; dma_addr_t dma_addr; struct sk_buff *skb; } ; 199 struct vmxnet3_tq_driver_stats { u64 drop_total; u64 drop_too_many_frags; u64 drop_oversized_hdr; u64 drop_hdr_inspect_err; u64 drop_tso; u64 tx_ring_full; u64 linearized; u64 copy_skb_header; u64 oversized_hdr; } ; 215 struct vmxnet3_tx_ctx { bool ipv4; bool ipv6; u16 mss; u32 eth_ip_hdr_size; u32 l4_hdr_size; u32 copy_size; union Vmxnet3_GenericDesc *sop_txd; union Vmxnet3_GenericDesc *eop_txd; } ; 228 struct vmxnet3_adapter ; 228 struct vmxnet3_tx_queue { char name[24U]; struct vmxnet3_adapter *adapter; spinlock_t tx_lock; struct vmxnet3_cmd_ring tx_ring; struct vmxnet3_tx_buf_info *buf_info; dma_addr_t buf_info_pa; struct vmxnet3_tx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; struct Vmxnet3_TxQueueCtrl *shared; struct vmxnet3_tq_driver_stats stats; bool stopped; int num_stop; int qid; u16 txdata_desc_size; } ; 246 enum vmxnet3_rx_buf_type { VMXNET3_RX_BUF_NONE = 0, VMXNET3_RX_BUF_SKB = 1, VMXNET3_RX_BUF_PAGE = 2 } ; 252 union __anonunion____missing_field_name_604 { struct sk_buff *skb; struct page *page; } ; 252 struct vmxnet3_rx_buf_info { enum vmxnet3_rx_buf_type buf_type; u16 len; union __anonunion____missing_field_name_604 __annonCompField139; dma_addr_t dma_addr; } ; 262 struct vmxnet3_rx_ctx { struct sk_buff *skb; u32 sop_idx; } ; 267 struct vmxnet3_rq_driver_stats { u64 drop_total; u64 drop_err; u64 drop_fcs; u64 rx_buf_alloc_failure; } ; 274 struct vmxnet3_rx_data_ring { Vmxnet3_RxDataDesc *base; dma_addr_t basePA; u16 desc_size; } ; 280 struct vmxnet3_rx_queue { char name[24U]; struct vmxnet3_adapter *adapter; struct napi_struct napi; struct vmxnet3_cmd_ring rx_ring[2U]; struct vmxnet3_rx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_rx_ctx rx_ctx; u32 qid; u32 qid2; u32 dataRingQid; struct vmxnet3_rx_buf_info *buf_info[2U]; dma_addr_t buf_info_pa; struct Vmxnet3_RxQueueCtrl *shared; struct vmxnet3_rq_driver_stats stats; } ; 297 struct vmxnet3_intr { enum vmxnet3_intr_mask_mode mask_mode; enum vmxnet3_intr_type type; u8 num_intrs; u8 event_intr_idx; u8 mod_levels[17U]; char event_msi_vector_name[27U]; struct msix_entry msix_entries[17U]; } ; 319 struct vmxnet3_adapter { struct vmxnet3_tx_queue tx_queue[8U]; struct vmxnet3_rx_queue rx_queue[8U]; unsigned long active_vlans[64U]; struct vmxnet3_intr intr; spinlock_t cmd_lock; struct Vmxnet3_DriverShared *shared; struct Vmxnet3_PMConf *pm_conf; struct Vmxnet3_TxQueueDesc *tqd_start; struct Vmxnet3_RxQueueDesc *rqd_start; struct net_device *netdev; struct pci_dev *pdev; u8 *hw_addr0; u8 *hw_addr1; u8 version; bool rxcsum; bool lro; struct UPT1_RSSConf *rss_conf; bool rss; u32 num_rx_queues; u32 num_tx_queues; unsigned int skb_buf_size; int rx_buf_per_pkt; dma_addr_t shared_pa; dma_addr_t queue_desc_pa; dma_addr_t coal_conf_pa; u32 wol; u32 link_speed; u64 tx_timeout_count; u32 tx_ring_size; u32 rx_ring_size; u32 rx_ring2_size; u16 txdata_desc_size; u16 rxdata_desc_size; bool rxdataring_enabled; struct work_struct work; unsigned long state; int share_intr; struct Vmxnet3_CoalesceScheme *coal_conf; bool default_coal_mode; dma_addr_t adapter_pa; dma_addr_t pm_conf_pa; dma_addr_t rss_conf_pa; } ; 1220 union __anonunion_hdr_606 { void *ptr; struct ethhdr *eth; struct iphdr *ipv4; struct ipv6hdr *ipv6; struct tcphdr *tcp; } ; 473 struct vmxnet3_stat_desc { char desc[32U]; int offset; } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long, long); 218 void __read_once_size(const volatile void *p, void *res, int size); 243 void __write_once_size(volatile void *p, void *res, int size); 33 extern struct module __this_module; 72 void set_bit(long nr, volatile unsigned long *addr); 110 void clear_bit(long nr, volatile unsigned long *addr); 204 bool test_and_set_bit(long nr, volatile unsigned long *addr); 308 bool constant_test_bit(long nr, const volatile unsigned long *addr); 479 int fls64(__u64 x); 14 unsigned long int find_next_bit(const unsigned long *, unsigned long, unsigned long); 42 unsigned long int find_first_bit(const unsigned long *, unsigned long); 46 __u16 __fswab16(__u16 val); 148 void le32_add_cpu(__le32 *var, u32 val); 187 unsigned int fls_long(unsigned long l); 70 unsigned long int __rounddown_pow_of_two(unsigned long n); 278 void __pr_info(const char *, ...); 63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 69 void __dynamic_netdev_dbg(struct _ddebug *, const struct net_device *, const char *, ...); 411 int sprintf(char *, const char *, ...); 8 void ldv_dma_map_page(); 9 void ldv_dma_mapping_error(); 25 void INIT_LIST_HEAD(struct list_head *list); 87 void __bad_percpu_size(); 295 void __bad_size_call_parameter(); 71 void warn_slowpath_null(const char *, const int); 7 extern unsigned long page_offset_base; 23 unsigned long int __phys_addr(unsigned long); 31 void * __memcpy(void *, const void *, size_t ); 56 void * __memset(void *, int, size_t ); 114 int __bitmap_weight(const unsigned long *, unsigned int); 311 int bitmap_weight(const unsigned long *src, unsigned int nbits); 37 extern int nr_cpu_ids; 89 extern struct cpumask __cpu_online_mask; 478 unsigned int cpumask_weight(const struct cpumask *srcp); 24 int atomic_read(const atomic_t *v); 89 void atomic_inc(atomic_t *v); 115 bool atomic_dec_and_test(atomic_t *v); 170 int static_key_count(struct static_key *key); 180 bool static_key_false(struct static_key *key); 8 extern int __preempt_count; 67 void __preempt_count_add(int val); 72 void __preempt_count_sub(int val); 7 void __local_bh_disable_ip(unsigned long, unsigned int); 16 void local_bh_disable(); 22 void __local_bh_enable_ip(unsigned long, unsigned int); 29 void local_bh_enable(); 281 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int); 334 void lock_acquire(struct lockdep_map *, unsigned int, int, int, int, struct lockdep_map *, unsigned long); 338 void lock_release(struct lockdep_map *, int, unsigned long); 571 void lockdep_rcu_suspicious(const char *, const int, const char *); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 41 void _raw_spin_unlock(raw_spinlock_t *); 45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 300 void spin_lock(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 11 void dump_page(struct page *, const char *); 307 void __rcu_read_lock(); 313 void __rcu_read_unlock(); 110 bool rcu_is_watching(); 486 void rcu_lock_acquire(struct lockdep_map *map); 491 void rcu_lock_release(struct lockdep_map *map); 496 extern struct lockdep_map rcu_lock_map; 500 int debug_lockdep_rcu_enabled(); 502 int rcu_read_lock_held(); 866 void rcu_read_lock(); 920 void rcu_read_unlock(); 181 void __init_work(struct work_struct *, int); 353 extern struct workqueue_struct *system_wq; 430 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *); 445 bool cancel_work_sync(struct work_struct *); 470 bool queue_work(struct workqueue_struct *wq, struct work_struct *work); 529 bool schedule_work(struct work_struct *work); 58 unsigned int readl(const volatile void *addr); 66 void writel(unsigned int val, volatile void *addr); 181 void * ioremap_nocache(resource_size_t , unsigned long); 192 void * ioremap(resource_size_t offset, unsigned long size); 197 void iounmap(volatile void *); 28 extern int cpu_number; 464 struct page * alloc_pages_current(gfp_t , unsigned int); 467 struct page * alloc_pages(gfp_t gfp_mask, unsigned int order); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 154 void kfree(const void *); 318 void * __kmalloc(size_t , gfp_t ); 466 void * kmalloc(size_t size, gfp_t flags); 22 __sum16 csum_fold(__wsum sum); 87 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum); 112 __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum); 179 __sum16 csum_ipv6_magic(const struct in6_addr *, const struct in6_addr *, __u32 , __u8 , __wsum ); 912 void * dev_get_drvdata(const struct device *dev); 917 void dev_set_drvdata(struct device *dev, void *data); 1135 void dev_err(const struct device *, const char *, ...); 1141 void _dev_info(const struct device *, const char *, ...); 37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool ); 42 void debug_dma_mapping_error(struct device *, dma_addr_t ); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 141 struct page * compound_head(struct page *page); 11 extern struct tracepoint __tracepoint_page_ref_mod_and_test; 30 void __page_ref_mod_and_test(struct page *, int, int); 64 int page_ref_count(struct page *page); 136 int page_ref_dec_and_test(struct page *page); 443 int put_page_testzero(struct page *page); 560 void __put_page(struct page *); 752 void put_zone_device_page(struct page *page); 755 bool is_zone_device_page(const struct page *page); 775 void put_page(struct page *page); 1003 void * lowmem_page_address(const struct page *page); 125 int valid_dma_direction(int dma_direction); 28 extern struct dma_map_ops *dma_ops; 30 struct dma_map_ops * get_dma_ops(struct device *dev); 42 bool arch_dma_alloc_attrs(struct device **, gfp_t *); 46 int dma_supported(struct device *, u64 ); 169 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 169 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 192 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); 239 dma_addr_t ldv_dma_map_page_6(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 239 dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 258 void dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 404 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); 445 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 451 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); 471 int ldv_dma_mapping_error_7(struct device *dev, dma_addr_t dma_addr); 471 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 503 int dma_set_mask(struct device *dev, u64 mask); 527 int dma_set_coherent_mask(struct device *dev, u64 mask); 608 void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 325 unsigned int skb_frag_size(const skb_frag_t *frag); 330 void skb_frag_size_set(skb_frag_t *frag, unsigned int size); 897 void consume_skb(struct sk_buff *); 974 int pskb_expand_head(struct sk_buff *, int, int, gfp_t ); 1061 void __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4); 1069 void skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type); 1183 unsigned char * skb_end_pointer(const struct sk_buff *skb); 1334 int skb_header_cloned(const struct sk_buff *skb); 1784 bool skb_is_nonlinear(const struct sk_buff *skb); 1789 unsigned int skb_headlen(const struct sk_buff *skb); 1905 unsigned char * skb_put(struct sk_buff *, unsigned int); 1936 unsigned char * __pskb_pull_tail(struct sk_buff *, int); 1952 int pskb_may_pull(struct sk_buff *skb, unsigned int len); 1967 unsigned int skb_headroom(const struct sk_buff *skb); 2126 unsigned char * skb_transport_header(const struct sk_buff *skb); 2143 unsigned char * skb_network_header(const struct sk_buff *skb); 2159 unsigned char * skb_mac_header(const struct sk_buff *skb); 2208 int skb_checksum_start_offset(const struct sk_buff *skb); 2218 int skb_transport_offset(const struct sk_buff *skb); 2391 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t ); 2427 struct sk_buff * __netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length, gfp_t gfp); 2437 struct sk_buff * netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length); 2527 struct page * skb_frag_page(const skb_frag_t *frag); 2613 void __skb_frag_set_page(skb_frag_t *frag, struct page *page); 2645 dma_addr_t skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, size_t offset, size_t size, enum dma_data_direction dir); 2807 int __skb_linearize(struct sk_buff *skb); 2819 int skb_linearize(struct sk_buff *skb); 3746 void skb_checksum_none_assert(const struct sk_buff *skb); 23 struct iphdr * ip_hdr(const struct sk_buff *skb); 46 void msleep(unsigned int); 26 struct ethhdr * eth_hdr(const struct sk_buff *skb); 93 u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings); 139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 158 void free_irq(unsigned int, void *); 397 void __napi_schedule(struct napi_struct *); 400 bool napi_disable_pending(struct napi_struct *n); 414 bool napi_schedule_prep(struct napi_struct *n); 427 void napi_schedule(struct napi_struct *n); 464 void napi_complete(struct napi_struct *n); 501 void napi_disable(struct napi_struct *); 510 void napi_enable(struct napi_struct *n); 1946 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index); 2041 void * netdev_priv(const struct net_device *dev); 2072 void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int); 2392 int dev_close(struct net_device *); 2407 void free_netdev(struct net_device *); 2828 void netif_tx_start_queue(struct netdev_queue *dev_queue); 2854 void netif_tx_wake_queue(struct netdev_queue *); 2863 void netif_wake_queue(struct net_device *dev); 2878 void netif_tx_stop_queue(struct netdev_queue *dev_queue); 2895 void netif_tx_stop_all_queues(struct net_device *); 3082 bool netif_running(const struct net_device *dev); 3101 void netif_start_subqueue(struct net_device *dev, u16 queue_index); 3115 void netif_stop_subqueue(struct net_device *dev, u16 queue_index); 3142 void netif_wake_subqueue(struct net_device *, u16 ); 3180 int netif_set_real_num_tx_queues(struct net_device *, unsigned int); 3183 int netif_set_real_num_rx_queues(struct net_device *, unsigned int); 3212 void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason ); 3213 void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason ); 3234 void dev_kfree_skb_irq(struct sk_buff *skb); 3244 void dev_kfree_skb_any(struct sk_buff *skb); 3256 int netif_receive_skb(struct sk_buff *); 3257 gro_result_t napi_gro_receive(struct napi_struct *, struct sk_buff *); 3352 bool netif_carrier_ok(const struct net_device *dev); 3361 void netif_carrier_on(struct net_device *); 3363 void netif_carrier_off(struct net_device *); 3430 void netif_device_detach(struct net_device *); 3432 void netif_device_attach(struct net_device *); 3483 void __netif_tx_lock(struct netdev_queue *txq, int cpu); 3503 void __netif_tx_unlock(struct netdev_queue *txq); 3604 void netif_tx_disable(struct net_device *dev); 3674 int register_netdev(struct net_device *); 3675 void unregister_netdev(struct net_device *); 3892 void netdev_rss_key_fill(void *, size_t ); 4259 void netdev_err(const struct net_device *, const char *, ...); 4263 void netdev_notice(const struct net_device *, const char *, ...); 4265 void netdev_info(const struct net_device *, const char *, ...); 27 void rtnl_lock(); 28 void rtnl_unlock(); 27 struct tcphdr * tcp_hdr(const struct sk_buff *skb); 32 unsigned int __tcp_hdrlen(const struct tcphdr *th); 37 unsigned int tcp_hdrlen(const struct sk_buff *skb); 81 struct ipv6hdr * ipv6_hdr(const struct sk_buff *skb); 994 int pci_enable_device(struct pci_dev *); 996 int pci_enable_device_mem(struct pci_dev *); 1011 void pci_disable_device(struct pci_dev *); 1014 void pci_set_master(struct pci_dev *); 1067 int pci_save_state(struct pci_dev *); 1068 void pci_restore_state(struct pci_dev *); 1081 int pci_set_power_state(struct pci_dev *, pci_power_t ); 1082 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t ); 1085 int __pci_enable_wake(struct pci_dev *, pci_power_t , bool , bool ); 1096 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); 1145 int pci_request_selected_regions(struct pci_dev *, int, const char *); 1147 void pci_release_selected_regions(struct pci_dev *, int); 1194 int __pci_register_driver(struct pci_driver *, struct module *, const char *); 1203 void pci_unregister_driver(struct pci_driver *); 1281 void pci_disable_msi(struct pci_dev *); 1285 void pci_disable_msix(struct pci_dev *); 1287 int pci_msi_enabled(); 1288 int pci_enable_msi_range(struct pci_dev *, int, int); 1289 int pci_enable_msi_exact(struct pci_dev *dev, int nvec); 1296 int pci_enable_msix_range(struct pci_dev *, struct msix_entry *, int, int); 1606 void * pci_get_drvdata(struct pci_dev *pdev); 1611 void pci_set_drvdata(struct pci_dev *pdev, void *data); 113 int pci_set_dma_mask(struct pci_dev *dev, u64 mask); 118 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); 36 __be16 eth_type_trans(struct sk_buff *, struct net_device *); 52 struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int); 409 void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 481 __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, int *depth); 525 __be16 vlan_get_protocol(struct sk_buff *skb); 203 struct in_device * __in_dev_get_rcu(const struct net_device *dev); 208 struct in_device * in_dev_get(const struct net_device *dev); 232 void in_dev_finish_destroy(struct in_device *); 234 void in_dev_put(struct in_device *idev); 138 void vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring); 148 void vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring); 154 int vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring); 170 void vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring); 442 int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 445 int vmxnet3_activate_dev(struct vmxnet3_adapter *adapter); 448 void vmxnet3_force_close(struct vmxnet3_adapter *adapter); 451 void vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); 454 void vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter); 457 void vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); 460 int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); 463 int vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size, u16 txdata_desc_size, u16 rxdata_desc_size); 467 void vmxnet3_set_ethtool_ops(struct net_device *netdev); 470 struct rtnl_link_stats64 * vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); 472 char vmxnet3_driver_name[8U]; 33 char vmxnet3_driver_name[8U] = { 'v', 'm', 'x', 'n', 'e', 't', '3', '\x0' }; 40 const struct pci_device_id vmxnet3_pciid_table[2U] = { { 5549U, 1968U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } }; 45 const struct pci_device_id __mod_pci__vmxnet3_pciid_table_device_table[2U] = { }; 47 int enable_mq = 1; 50 void vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); 56 void vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned int intr_idx); 63 void vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned int intr_idx); 73 void vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter); 85 void vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter); 97 void vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events); 104 bool vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 111 void vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 119 void vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 127 void vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 139 void vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue); 173 void vmxnet3_process_events(struct vmxnet3_adapter *adapter); 313 void vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, struct pci_dev *pdev); 330 int vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter); 365 int vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 396 void vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 429 void vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 471 void vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 501 int vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter); 547 void vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter); 562 int vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, int num_to_alloc, struct vmxnet3_adapter *adapter); 653 void vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_buf_info *rbi); 671 int vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter); 808 void vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter); 835 int vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_adapter *adapter); 912 void vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_adapter *adapter); 928 void vmxnet3_prepare_tso(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx); 947 int txd_estimate(const struct sk_buff *skb); 974 int vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter, struct net_device *netdev); 1140 netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1152 void vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union Vmxnet3_GenericDesc *gdesc); 1183 void vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter); 1210 u32 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union Vmxnet3_GenericDesc *gdesc); 1256 int vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota); 1545 void vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter); 1585 void vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter); 1594 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter); 1644 void vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter); 1664 int vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter); 1716 int vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter); 1735 int vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter); 1799 int vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter); 1828 int vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget); 1844 int vmxnet3_poll(struct napi_struct *napi, int budget); 1865 int vmxnet3_poll_rx_only(struct napi_struct *napi, int budget); 1899 irqreturn_t vmxnet3_msix_tx(int irq, void *data); 1929 irqreturn_t vmxnet3_msix_rx(int irq, void *data); 1956 irqreturn_t vmxnet3_msix_event(int irq, void *data); 1978 irqreturn_t vmxnet3_intr(int irq, void *dev_id); 2004 void vmxnet3_netpoll(struct net_device *netdev); 2027 int vmxnet3_request_irqs(struct vmxnet3_adapter *adapter); 2148 void vmxnet3_free_irqs(struct vmxnet3_adapter *adapter); 2192 void vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter); 2206 int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); 2228 int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); 2250 u8 * vmxnet3_copy_mc(struct net_device *netdev); 2273 void vmxnet3_set_mc(struct net_device *netdev); 2361 void vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter); 2496 void vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter); 2659 int vmxnet3_set_mac_addr(struct net_device *netdev, void *p); 2674 int vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64); 2744 void vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter); 2756 void vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter); 2867 int vmxnet3_open(struct net_device *netdev); 2924 int vmxnet3_close(struct net_device *netdev); 2966 int vmxnet3_change_mtu(struct net_device *netdev, int new_mtu); 3017 void vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64); 3035 void vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); 3059 int vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec); 3087 void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter); 3169 void vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter); 3181 void vmxnet3_tx_timeout(struct net_device *netdev); 3193 void vmxnet3_reset_work(struct work_struct *data); 3220 int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id); 3493 void vmxnet3_remove_device(struct pci_dev *pdev); 3539 void vmxnet3_shutdown_device(struct pci_dev *pdev); 3569 int vmxnet3_suspend(struct device *device); 3680 int vmxnet3_resume(struct device *device); 3728 const struct dev_pm_ops vmxnet3_pm_ops = { 0, 0, &vmxnet3_suspend, &vmxnet3_resume, &vmxnet3_suspend, 0, 0, &vmxnet3_resume, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 3736 struct pci_driver vmxnet3_driver = { { 0, 0 }, (const char *)(&vmxnet3_driver_name), (const struct pci_device_id *)(&vmxnet3_pciid_table), &vmxnet3_probe_device, &vmxnet3_remove_device, 0, 0, 0, 0, &vmxnet3_shutdown_device, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &vmxnet3_pm_ops, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } }; 3749 int vmxnet3_init_module(); 3760 void vmxnet3_exit_module(); 3788 void ldv_check_final_state(); 3791 void ldv_check_return_value(int); 3794 void ldv_check_return_value_probe(int); 3797 void ldv_initialize(); 3800 void ldv_handler_precall(); 3803 int nondet_int(); 3806 int LDV_IN_INTERRUPT = 0; 3809 void ldv_main0_sequence_infinite_withcheck_stateful(); 27 size_t strlcpy(char *, const char *, size_t ); 87 const char * kobject_name(const struct kobject *kobj); 104 int device_set_wakeup_enable(struct device *, bool ); 865 const char * dev_name(const struct device *dev); 119 void ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed); 83 u32 ethtool_op_get_link(struct net_device *); 1619 const char * pci_name(const struct pci_dev *pdev); 39 const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[11U] = { { { 'T', 'x', ' ', 'Q', 'u', 'e', 'u', 'e', '#', '\x0' }, 0 }, { { ' ', ' ', 'T', 'S', 'O', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 0 }, { { ' ', ' ', 'T', 'S', 'O', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 8 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 16 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 24 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 32 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 40 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', '\x0' }, 48 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 't', 'x', '\x0' }, 56 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', ' ', 'e', 'r', 'r', '\x0' }, 64 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 't', 'x', ' ', 'd', 'i', 's', 'c', 'a', 'r', 'd', '\x0' }, 72 } }; 56 const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[9U] = { { { ' ', ' ', 'd', 'r', 'v', ' ', 'd', 'r', 'o', 'p', 'p', 'e', 'd', ' ', 't', 'x', ' ', 't', 'o', 't', 'a', 'l', '\x0' }, 0 }, { { ' ', ' ', ' ', ' ', ' ', 't', 'o', 'o', ' ', 'm', 'a', 'n', 'y', ' ', 'f', 'r', 'a', 'g', 's', '\x0' }, 8 }, { { ' ', ' ', ' ', ' ', ' ', 'g', 'i', 'a', 'n', 't', ' ', 'h', 'd', 'r', '\x0' }, 16 }, { { ' ', ' ', ' ', ' ', ' ', 'h', 'd', 'r', ' ', 'e', 'r', 'r', '\x0' }, 24 }, { { ' ', ' ', ' ', ' ', ' ', 't', 's', 'o', '\x0' }, 32 }, { { ' ', ' ', 'r', 'i', 'n', 'g', ' ', 'f', 'u', 'l', 'l', '\x0' }, 40 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 'l', 'i', 'n', 'e', 'a', 'r', 'i', 'z', 'e', 'd', '\x0' }, 48 }, { { ' ', ' ', 'h', 'd', 'r', ' ', 'c', 'l', 'o', 'n', 'e', 'd', '\x0' }, 56 }, { { ' ', ' ', 'g', 'i', 'a', 'n', 't', ' ', 'h', 'd', 'r', '\x0' }, 64 } }; 80 const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[11U] = { { { 'R', 'x', ' ', 'Q', 'u', 'e', 'u', 'e', '#', '\x0' }, 0 }, { { ' ', ' ', 'L', 'R', 'O', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 0 }, { { ' ', ' ', 'L', 'R', 'O', ' ', 'b', 'y', 't', 'e', ' ', 'r', 'x', '\x0' }, 8 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 16 }, { { ' ', ' ', 'u', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 'r', 'x', '\x0' }, 24 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 32 }, { { ' ', ' ', 'm', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 'r', 'x', '\x0' }, 40 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', '\x0' }, 48 }, { { ' ', ' ', 'b', 'c', 'a', 's', 't', ' ', 'b', 'y', 't', 'e', 's', ' ', 'r', 'x', '\x0' }, 56 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', ' ', 'O', 'O', 'B', '\x0' }, 64 }, { { ' ', ' ', 'p', 'k', 't', 's', ' ', 'r', 'x', ' ', 'e', 'r', 'r', '\x0' }, 72 } }; 96 const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[4U] = { { { ' ', ' ', 'd', 'r', 'v', ' ', 'd', 'r', 'o', 'p', 'p', 'e', 'd', ' ', 'r', 'x', ' ', 't', 'o', 't', 'a', 'l', '\x0' }, 0 }, { { ' ', ' ', ' ', ' ', ' ', 'e', 'r', 'r', '\x0' }, 8 }, { { ' ', ' ', ' ', ' ', ' ', 'f', 'c', 's', '\x0' }, 16 }, { { ' ', ' ', 'r', 'x', ' ', 'b', 'u', 'f', ' ', 'a', 'l', 'l', 'o', 'c', ' ', 'f', 'a', 'i', 'l', '\x0' }, 24 } }; 110 const struct vmxnet3_stat_desc vmxnet3_global_stats[1U] = { { { 't', 'x', ' ', 't', 'i', 'm', 'e', 'o', 'u', 't', ' ', 'c', 'o', 'u', 'n', 't', '\x0' }, 8080 } }; 169 int vmxnet3_get_sset_count(struct net_device *netdev, int sset); 194 int vmxnet3_get_regs_len(struct net_device *netdev); 207 void vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo); 222 void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf); 302 void vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf); 355 void vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p); 449 void vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); 459 int vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); 477 int vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd); 499 void vmxnet3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *param); 519 int vmxnet3_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *param); 662 int vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules); 676 u32 vmxnet3_get_rss_indir_size(struct net_device *netdev); 685 int vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc); 702 int vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, const u8 hfunc); 730 int vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec); 765 int vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec); 885 const struct ethtool_ops vmxnet3_ethtool_ops = { &vmxnet3_get_settings, 0, &vmxnet3_get_drvinfo, &vmxnet3_get_regs_len, &vmxnet3_get_regs, &vmxnet3_get_wol, &vmxnet3_set_wol, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, &vmxnet3_get_coalesce, &vmxnet3_set_coalesce, &vmxnet3_get_ringparam, &vmxnet3_set_ringparam, 0, 0, 0, &vmxnet3_get_strings, 0, &vmxnet3_get_ethtool_stats, 0, 0, 0, 0, &vmxnet3_get_sset_count, &vmxnet3_get_rxnfc, 0, 0, 0, 0, &vmxnet3_get_rss_indir_size, &vmxnet3_get_rss, &vmxnet3_set_rss, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 950 void ldv_main1_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 7 bool ldv_is_err(const void *ptr); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 5 int LDV_DMA_MAP_CALLS = 0; return ; } { 3811 struct net_device *var_group1; 3812 int res_vmxnet3_open_73; 3813 int res_vmxnet3_close_74; 3814 struct sk_buff *var_group2; 3815 void *var_vmxnet3_set_mac_addr_68_p1; 3816 int var_vmxnet3_change_mtu_76_p1; 3817 unsigned short var_vmxnet3_vlan_rx_add_vid_57_p1; 3818 unsigned short var_vmxnet3_vlan_rx_add_vid_57_p2; 3819 unsigned short var_vmxnet3_vlan_rx_kill_vid_58_p1; 3820 unsigned short var_vmxnet3_vlan_rx_kill_vid_58_p2; 3821 struct device *var_group3; 3822 struct pci_dev *var_group4; 3823 const struct pci_device_id *var_vmxnet3_probe_device_84_p1; 3824 int res_vmxnet3_probe_device_84; 3825 int var_vmxnet3_intr_52_p0; 3826 void *var_vmxnet3_intr_52_p1; 3827 int var_vmxnet3_msix_event_51_p0; 3828 void *var_vmxnet3_msix_event_51_p1; 3829 int var_vmxnet3_msix_rx_50_p0; 3830 void *var_vmxnet3_msix_rx_50_p1; 3831 int var_vmxnet3_msix_tx_49_p0; 3832 void *var_vmxnet3_msix_tx_49_p1; 3833 int ldv_s_vmxnet3_netdev_ops_net_device_ops; 3834 int ldv_s_vmxnet3_driver_pci_driver; 3835 int tmp; 3836 int tmp___0; 3837 int tmp___1; 5571 ldv_s_vmxnet3_netdev_ops_net_device_ops = 0; 5576 ldv_s_vmxnet3_driver_pci_driver = 0; 5472 LDV_IN_INTERRUPT = 1; 5481 ldv_initialize() { /* Function call is skipped due to function is undefined */} 5568 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 3751 int tmp; 3751 __pr_info("%s - version %s\n", (char *)"VMware vmxnet3 virtual NIC driver", (char *)"1.4.9.0-k-NAPI") { /* Function call is skipped due to function is undefined */} 3753 tmp = __pci_register_driver(&vmxnet3_driver, &__this_module, "vmxnet3") { /* Function call is skipped due to function is undefined */} } 5582 goto ldv_61620; 5582 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 5586 goto ldv_61619; 5583 ldv_61619:; 5587 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 5587 switch (tmp___0); 6779 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 3682 int err; 3683 unsigned long flags; 3684 struct pci_dev *pdev; 3685 const struct device *__mptr; 3686 struct net_device *netdev; 3687 void *tmp; 3688 struct vmxnet3_adapter *adapter; 3689 void *tmp___0; 3690 _Bool tmp___1; 3691 int tmp___2; 3692 raw_spinlock_t *tmp___3; 3684 __mptr = (const struct device *)device; 3684 pdev = ((struct pci_dev *)__mptr) + 18446744073709551456UL; { 1608 void *tmp; { 914 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data); 914 return __CPAchecker_TMP_0;; } 1608 return tmp;; } 3685 netdev = (struct net_device *)tmp; { 2043 return ((void *)dev) + 3072U;; } 3686 adapter = (struct vmxnet3_adapter *)tmp___0; { 3084 _Bool tmp; { 310 return (((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1) != 0;; } 3084 return ((int)tmp) != 0;; } 3688 tmp___2 = 0; 3691 pci_set_power_state(pdev, 0) { /* Function call is skipped due to function is undefined */} 3692 pci_restore_state(pdev) { /* Function call is skipped due to function is undefined */} 3693 err = pci_enable_device_mem(pdev) { /* Function call is skipped due to function is undefined */} { 1098 int tmp; 1099 tmp = __pci_enable_wake(dev, state, 0, (int)enable) { /* Function call is skipped due to function is undefined */} 1099 return tmp;; } { 3089 unsigned int cfg; 3090 unsigned long flags; 3091 raw_spinlock_t *tmp; 3092 int i; 3093 int nvec; 3094 int tmp___0; { 291 return &(lock->__annonCompField20.rlock);; } 3093 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} 3094 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } 3096 const volatile void *__CPAchecker_TMP_1 = (const volatile void *)(adapter->hw_addr1); { 60 unsigned int ret; 58 Ignored inline assembler code 58 return ret;; } { 362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */} 363 return ;; } 3098 adapter->intr.type = (enum vmxnet3_intr_type )(cfg & 3U); 3099 adapter->intr.mask_mode = (enum vmxnet3_intr_mask_mode )((cfg >> 2) & 3U); 3158 adapter->num_rx_queues = 1U; 3159 _dev_info((const struct device *)(&(adapter->netdev->dev)), "Using INTx interrupt, #Rx queues: 1.\n") { /* Function call is skipped due to function is undefined */} 3161 adapter->intr.type = 1; 3164 adapter->intr.num_intrs = 1U; } { 291 return &(lock->__annonCompField20.rlock);; } 3708 flags = _raw_spin_lock_irqsave(tmp___3) { /* Function call is skipped due to function is undefined */} 3709 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } { 362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */} 363 return ;; } { 1587 int i; 1589 i = 0; 1589 goto ldv_60958; 1591 goto ldv_60957; 1590 ldv_60957:; 1590 -vmxnet3_rq_cleanup(((struct vmxnet3_rx_queue *)(&(adapter->rx_queue))) + ((unsigned long)i), adapter) { 1547 unsigned int i; 1548 unsigned int ring_idx; 1549 struct Vmxnet3_RxDesc *rxd; 1550 unsigned int tmp; 1551 ring_idx = 0U; 1551 goto ldv_60951; 1553 goto ldv_60950; 1552 ldv_60950:; 1552 i = 0U; 1552 goto ldv_60948; 1574 ((rq->rx_ring)[ring_idx]).gen = 1U; 1575 tmp = 0U; 1575 ((rq->rx_ring)[ring_idx]).next2comp = tmp; 1575 ((rq->rx_ring)[ring_idx]).next2fill = tmp; 1551 ring_idx = ring_idx + 1U; 1552 ldv_60951:; 1553 goto ldv_60950; 1552 ldv_60950:; 1552 i = 0U; 1552 goto ldv_60948; 1574 ((rq->rx_ring)[ring_idx]).gen = 1U; 1575 tmp = 0U; 1575 ((rq->rx_ring)[ring_idx]).next2comp = tmp; 1575 ((rq->rx_ring)[ring_idx]).next2fill = tmp; 1551 ring_idx = ring_idx + 1U; 1552 ldv_60951:; 1579 rq->comp_ring.gen = 1U; 1580 rq->comp_ring.next2proc = 0U; } 1589 i = i + 1; 1590 ldv_60958:; } { 2610 unsigned long flags; 2611 raw_spinlock_t *tmp; { 291 return &(lock->__annonCompField20.rlock);; } 2611 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} 2612 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } { } 362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */} 363 return ;; } { } 2525 int err; 2526 int i; 2527 unsigned int ret; 2528 unsigned long flags; 2529 struct _ddebug descriptor; 2530 long tmp; 2531 raw_spinlock_t *tmp___0; 2529 descriptor.modname = "vmxnet3"; 2529 descriptor.function = "vmxnet3_activate_dev"; 2529 descriptor.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11688/dscv_tempdir/dscv/ri/331_1a/drivers/net/vmxnet3/vmxnet3_drv.c"; 2529 descriptor.format = "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes %u %u %u\n"; 2529 descriptor.lineno = 2534U; 2529 descriptor.flags = 0U; 2529 tmp = __builtin_expect(((long)(descriptor.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} 2529 const struct net_device *__CPAchecker_TMP_0 = (const struct net_device *)(adapter->netdev); 2529 __dynamic_netdev_dbg(&descriptor, __CPAchecker_TMP_0, "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes %u %u %u\n", (char *)(&(adapter->netdev->name)), adapter->skb_buf_size, adapter->rx_buf_per_pkt, ((adapter->tx_queue)[0]).tx_ring.size, ((((adapter->rx_queue)[0]).rx_ring)[0]).size, ((((adapter->rx_queue)[0]).rx_ring)[1]).size) { /* Function call is skipped due to function is undefined */} { 1718 int i; 1719 int err; 1720 long tmp; 1718 err = 0; 1720 i = 0; 1720 goto ldv_61006; 1726 ldv_61004:; } { 2029 struct vmxnet3_intr *intr; 2030 int err; 2031 int i; 2032 int vector; 2033 int tmp; 2034 int tmp___0; 2035 struct vmxnet3_rx_queue *rq; 2029 intr = &(adapter->intr); 2030 err = 0; 2031 vector = 0; 2101 unsigned int __CPAchecker_TMP_2 = (unsigned int)(intr->type); 2107 adapter->num_rx_queues = 1U; 2108 void *__CPAchecker_TMP_4 = (void *)(adapter->netdev); 2108 -request_irq(adapter->pdev->irq, &vmxnet3_intr, 128UL, (const char *)(&(adapter->netdev->name)), __CPAchecker_TMP_4) { 147 int tmp; 147 tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */} 147 return tmp;; } 2114 intr->num_intrs = ((unsigned int)((u8 )vector)) + 1U; 2121 i = 0; 2121 goto ldv_61117; 2123 goto ldv_61116; 2122 ldv_61116:; 2122 rq = ((struct vmxnet3_rx_queue *)(&(adapter->rx_queue))) + ((unsigned long)i); 2123 rq->qid = (u32 )i; 2124 rq->qid2 = (adapter->num_rx_queues) + ((u32 )i); 2125 rq->dataRingQid = ((adapter->num_rx_queues) * 2U) + ((u32 )i); 2121 i = i + 1; 2122 ldv_61117:; 2129 i = 0; 2129 goto ldv_61120; 2129 int __CPAchecker_TMP_7 = (int)(intr->num_intrs); 2131 goto ldv_61119; 2130 ldv_61119:; 2130 (intr->mod_levels)[i] = 8U; 2129 i = i + 1; 2130 ldv_61120:; 2129 int __CPAchecker_TMP_7 = (int)(intr->num_intrs); 2132 adapter->intr.event_intr_idx = 0U; 2133 i = 0; 2133 goto ldv_61123; 2135 ((adapter->rx_queue)[0]).comp_ring.intr_idx = 0U; 2138 const struct net_device *__CPAchecker_TMP_8 = (const struct net_device *)(adapter->netdev); 2138 unsigned int __CPAchecker_TMP_9 = (unsigned int)(intr->type); 2138 unsigned int __CPAchecker_TMP_10 = (unsigned int)(intr->mask_mode); 2138 int __CPAchecker_TMP_11 = (int)(intr->num_intrs); 2138 netdev_info(__CPAchecker_TMP_8, "intr type %u, mode %u, %u vectors allocated\n", __CPAchecker_TMP_9, __CPAchecker_TMP_10, __CPAchecker_TMP_11) { /* Function call is skipped due to function is undefined */} } { 2363 struct Vmxnet3_DriverShared *shared; 2364 struct Vmxnet3_DSDevRead *devRead; 2365 struct Vmxnet3_TxQueueConf *tqc; 2366 struct Vmxnet3_RxQueueConf *rqc; 2367 int i; 2368 struct vmxnet3_tx_queue *tq; 2369 long tmp; 2370 struct vmxnet3_rx_queue *rq; 2371 struct UPT1_RSSConf *rssConf; 2372 unsigned int tmp___0; 2363 shared = adapter->shared; 2364 devRead = &(shared->devRead); 2369 __memset((void *)shared, 0, 720UL) { /* Function call is skipped due to function is undefined */} 2372 shared->magic = 3133079265U; 2373 devRead->misc.driverInfo.version = 17041664U; 2375 devRead->misc.driverInfo.gos.gosBits = 2U; 2377 devRead->misc.driverInfo.gos.gosType = 1U; 2378 *((u32 *)(&(devRead->misc.driverInfo.gos))) = *((u32 *)(&(devRead->misc.driverInfo.gos))); 2380 devRead->misc.driverInfo.vmxnet3RevSpt = 1U; 2381 devRead->misc.driverInfo.uptVerSpt = 1U; 2383 devRead->misc.ddPA = adapter->adapter_pa; 2384 devRead->misc.ddLen = 8256U; 2388 devRead->misc.uptFeatures = (devRead->misc.uptFeatures) | 1ULL; 2391 devRead->misc.uptFeatures = (devRead->misc.uptFeatures) | 8ULL; 2392 devRead->misc.maxNumRxSG = 18U; 2397 devRead->misc.mtu = adapter->netdev->mtu; 2398 devRead->misc.queueDescPA = adapter->queue_desc_pa; 2399 devRead->misc.queueDescLen = ((adapter->num_tx_queues) + (adapter->num_rx_queues)) * 256U; 2404 u8 __CPAchecker_TMP_0 = (u8 )(adapter->num_tx_queues); 2404 devRead->misc.numTxQueues = __CPAchecker_TMP_0; 2405 i = 0; 2405 goto ldv_61221; 2424 u8 __CPAchecker_TMP_1 = (u8 )(adapter->num_rx_queues); 2424 devRead->misc.numRxQueues = __CPAchecker_TMP_1; 2425 i = 0; 2425 goto ldv_61225; 2449 void *__CPAchecker_TMP_3 = (void *)(adapter->rss_conf); 2449 __memset(__CPAchecker_TMP_3, 0, 176UL) { /* Function call is skipped due to function is undefined */} 2451 int __CPAchecker_TMP_4 = (int)(adapter->rss); 2478 devRead->intrConf.autoMask = ((unsigned int)(adapter->intr.mask_mode)) == 0U; 2480 devRead->intrConf.numIntrs = adapter->intr.num_intrs; 2481 i = 0; 2481 goto ldv_61232; 2484 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 2485 devRead->intrConf.intrCtrl = (devRead->intrConf.intrCtrl) | 1U; 2488 devRead->rxFilterConf.rxMode = 0U; { 2194 u32 *vfTable; 2195 unsigned short vid; 2196 unsigned long tmp; 2197 unsigned long tmp___0; 2194 vfTable = (u32 *)(&(adapter->shared->devRead.rxFilterConf.vfTable)); 2198 *vfTable = (*vfTable) | 1U; 2200 tmp = find_first_bit((const unsigned long *)(&(adapter->active_vlans)), 4096UL) { /* Function call is skipped due to function is undefined */} 2200 vid = (u16 )tmp; 2200 goto ldv_61151; } { } 2648 unsigned int tmp; 2650 tmp = *((u32 *)mac); 2651 volatile void *__CPAchecker_TMP_0 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } 2653 tmp = (u32 )((((int)(*(mac + 5UL))) << 8) | ((int)(*(mac + 4UL)))); 2654 volatile void *__CPAchecker_TMP_1 = (volatile void *)(adapter->hw_addr1); { }66 Ignored inline assembler code 67 return ;; } 2553 unsigned int __CPAchecker_TMP_3 = (unsigned int)(adapter->shared_pa); 2553 volatile void *__CPAchecker_TMP_4 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } 2555 volatile void *__CPAchecker_TMP_5 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } { 291 return &(lock->__annonCompField20.rlock);; } 2557 flags = _raw_spin_lock_irqsave(tmp___0) { /* Function call is skipped due to function is undefined */} 2558 volatile void *__CPAchecker_TMP_6 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } 2560 const volatile void *__CPAchecker_TMP_7 = (const volatile void *)(adapter->hw_addr1); { 60 unsigned int ret; 58 Ignored inline assembler code 58 return ret;; } { 362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */} 363 return ;; } { 2498 struct Vmxnet3_DriverShared *shared; 2499 union Vmxnet3_CmdInfo *cmdInfo; 2500 unsigned long flags; 2501 raw_spinlock_t *tmp; 2498 shared = adapter->shared; 2499 cmdInfo = &(shared->cu.cmdInfo); 2502 unsigned int __CPAchecker_TMP_0 = (unsigned int)(adapter->version); { 291 return &(lock->__annonCompField20.rlock);; } 2505 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} 2506 cmdInfo->varConf.confVer = 1U; 2507 cmdInfo->varConf.confLen = 16U; 2509 cmdInfo->varConf.confPA = adapter->coal_conf_pa; 2511 int __CPAchecker_TMP_1 = (int)(adapter->default_coal_mode); 2512 volatile void *__CPAchecker_TMP_2 = (volatile void *)(adapter->hw_addr1); { 66 Ignored inline assembler code 67 return ;; } { } 362 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */} 363 return ;; } 2572 i = 0; 2572 goto ldv_61259; { } 2275 struct vmxnet3_adapter *adapter; 2276 void *tmp; 2277 unsigned long flags; 2278 struct Vmxnet3_RxFilterConf *rxConf; 2279 u8 *new_table; 2280 unsigned long long new_table_pa; 2281 unsigned int new_mode; 2282 u32 *vfTable; 2283 unsigned long sz; 2284 int tmp___0; 2285 raw_spinlock_t *tmp___1; { 2043 return ((void *)dev) + 3072U;; } 2275 adapter = (struct vmxnet3_adapter *)tmp; 2277 rxConf = &(adapter->shared->devRead.rxFilterConf); 2279 new_table = (u8 *)0U; 2280 new_table_pa = 0ULL; 2281 new_mode = 1U; { 2194 u32 *vfTable; 2195 unsigned short vid; 2196 unsigned long tmp; 2197 unsigned long tmp___0; 2194 vfTable = (u32 *)(&(adapter->shared->devRead.rxFilterConf.vfTable)); 2198 *vfTable = (*vfTable) | 1U; 2200 tmp = find_first_bit((const unsigned long *)(&(adapter->active_vlans)), 4096UL) { /* Function call is skipped due to function is undefined */} 2200 vid = (u16 )tmp; 2200 goto ldv_61151; } { } 2252 u8 *buf; 2253 unsigned int sz; 2254 void *tmp; 2255 struct netdev_hw_addr *ha; 2256 int i; 2257 const struct list_head *__mptr; 2258 int tmp___0; 2259 const struct list_head *__mptr___0; 2252 buf = (u8 *)0U; 2253 sz = (u32 )((netdev->mc.count) * 6); } | Source code
1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
18 #include <asm/barrier.h>
19
20 #if BITS_PER_LONG == 32
21 # define _BITOPS_LONG_SHIFT 5
22 #elif BITS_PER_LONG == 64
23 # define _BITOPS_LONG_SHIFT 6
24 #else
25 # error "Unexpected BITS_PER_LONG"
26 #endif
27
28 #define BIT_64(n) (U64_C(1) << (n))
29
30 /*
31 * These have to be done with inline assembly: that way the bit-setting
32 * is guaranteed to be atomic. All bit operations return 0 if the bit
33 * was cleared before the operation and != 0 if it was not.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37
38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
39 /* Technically wrong, but this avoids compilation errors on some gcc
40 versions. */
41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42 #else
43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
44 #endif
45
46 #define ADDR BITOP_ADDR(addr)
47
48 /*
49 * We do the locked ops that don't return the old value as
50 * a mask operation on a byte.
51 */
52 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
53 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
54 #define CONST_MASK(nr) (1 << ((nr) & 7))
55
56 /**
57 * set_bit - Atomically set a bit in memory
58 * @nr: the bit to set
59 * @addr: the address to start counting from
60 *
61 * This function is atomic and may not be reordered. See __set_bit()
62 * if you do not require the atomic guarantees.
63 *
64 * Note: there are no guarantees that this function will not be reordered
65 * on non x86 architectures, so if you are writing portable code,
66 * make sure not to rely on its reordering guarantees.
67 *
68 * Note that @nr may be almost arbitrarily large; this function is not
69 * restricted to acting on a single-word quantity.
70 */
71 static __always_inline void
72 set_bit(long nr, volatile unsigned long *addr)
73 {
74 if (IS_IMMEDIATE(nr)) {
75 asm volatile(LOCK_PREFIX "orb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" ((u8)CONST_MASK(nr))
78 : "memory");
79 } else {
80 asm volatile(LOCK_PREFIX "bts %1,%0"
81 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
82 }
83 }
84
85 /**
86 * __set_bit - Set a bit in memory
87 * @nr: the bit to set
88 * @addr: the address to start counting from
89 *
90 * Unlike set_bit(), this function is non-atomic and may be reordered.
91 * If it's called on the same region of memory simultaneously, the effect
92 * may be that only one operation succeeds.
93 */
94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
95 {
96 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97 }
98
99 /**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109 static __always_inline void
110 clear_bit(long nr, volatile unsigned long *addr)
111 {
112 if (IS_IMMEDIATE(nr)) {
113 asm volatile(LOCK_PREFIX "andb %1,%0"
114 : CONST_MASK_ADDR(nr, addr)
115 : "iq" ((u8)~CONST_MASK(nr)));
116 } else {
117 asm volatile(LOCK_PREFIX "btr %1,%0"
118 : BITOP_ADDR(addr)
119 : "Ir" (nr));
120 }
121 }
122
123 /*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132 {
133 barrier();
134 clear_bit(nr, addr);
135 }
136
137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
138 {
139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140 }
141
142 /*
143 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear
145 * @addr: Address to start counting from
146 *
147 * __clear_bit() is non-atomic and implies release semantics before the memory
148 * operation. It can be used for an unlock if no other CPUs can concurrently
149 * modify other bits in the word.
150 *
151 * No memory barrier is required here, because x86 cannot reorder stores past
152 * older loads. Same principle as spin_unlock.
153 */
154 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155 {
156 barrier();
157 __clear_bit(nr, addr);
158 }
159
160 /**
161 * __change_bit - Toggle a bit in memory
162 * @nr: the bit to change
163 * @addr: the address to start counting from
164 *
165 * Unlike change_bit(), this function is non-atomic and may be reordered.
166 * If it's called on the same region of memory simultaneously, the effect
167 * may be that only one operation succeeds.
168 */
169 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
170 {
171 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172 }
173
174 /**
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
184 {
185 if (IS_IMMEDIATE(nr)) {
186 asm volatile(LOCK_PREFIX "xorb %1,%0"
187 : CONST_MASK_ADDR(nr, addr)
188 : "iq" ((u8)CONST_MASK(nr)));
189 } else {
190 asm volatile(LOCK_PREFIX "btc %1,%0"
191 : BITOP_ADDR(addr)
192 : "Ir" (nr));
193 }
194 }
195
196 /**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier.
203 */
204 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
205 {
206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
207 }
208
209 /**
210 * test_and_set_bit_lock - Set a bit and return its old value for lock
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This is the same as test_and_set_bit on x86.
215 */
216 static __always_inline bool
217 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218 {
219 return test_and_set_bit(nr, addr);
220 }
221
222 /**
223 * __test_and_set_bit - Set a bit and return its old value
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail. You must protect multiple accesses with a lock.
230 */
231 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
232 {
233 bool oldbit;
234
235 asm("bts %2,%1\n\t"
236 CC_SET(c)
237 : CC_OUT(c) (oldbit), ADDR
238 : "Ir" (nr));
239 return oldbit;
240 }
241
242 /**
243 * test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to clear
245 * @addr: Address to count from
246 *
247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier.
249 */
250 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
251 {
252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
253 }
254
255 /**
256 * __test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is non-atomic and can be reordered.
261 * If two examples of this operation race, one can appear to succeed
262 * but actually fail. You must protect multiple accesses with a lock.
263 *
264 * Note: the operation is performed atomically with respect to
265 * the local CPU, but not other CPUs. Portable code should not
266 * rely on this behaviour.
267 * KVM relies on this behaviour on x86 for modifying memory that is also
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c
270 */
271 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
272 {
273 bool oldbit;
274
275 asm volatile("btr %2,%1\n\t"
276 CC_SET(c)
277 : CC_OUT(c) (oldbit), ADDR
278 : "Ir" (nr));
279 return oldbit;
280 }
281
282 /* WARNING: non atomic and it can be reordered! */
283 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
284 {
285 bool oldbit;
286
287 asm volatile("btc %2,%1\n\t"
288 CC_SET(c)
289 : CC_OUT(c) (oldbit), ADDR
290 : "Ir" (nr) : "memory");
291
292 return oldbit;
293 }
294
295 /**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
302 */
303 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
304 {
305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
306 }
307
308 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
309 {
310 return ((1UL << (nr & (BITS_PER_LONG-1))) &
311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312 }
313
314 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
315 {
316 bool oldbit;
317
318 asm volatile("bt %2,%1\n\t"
319 CC_SET(c)
320 : CC_OUT(c) (oldbit)
321 : "m" (*(unsigned long *)addr), "Ir" (nr));
322
323 return oldbit;
324 }
325
326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
327 /**
328 * test_bit - Determine whether a bit is set
329 * @nr: bit number to test
330 * @addr: Address to start counting from
331 */
332 static bool test_bit(int nr, const volatile unsigned long *addr);
333 #endif
334
335 #define test_bit(nr, addr) \
336 (__builtin_constant_p((nr)) \
337 ? constant_test_bit((nr), (addr)) \
338 : variable_test_bit((nr), (addr)))
339
340 /**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346 static __always_inline unsigned long __ffs(unsigned long word)
347 {
348 asm("rep; bsf %1,%0"
349 : "=r" (word)
350 : "rm" (word));
351 return word;
352 }
353
354 /**
355 * ffz - find first zero bit in word
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360 static __always_inline unsigned long ffz(unsigned long word)
361 {
362 asm("rep; bsf %1,%0"
363 : "=r" (word)
364 : "r" (~word));
365 return word;
366 }
367
368 /*
369 * __fls: find last set bit in word
370 * @word: The word to search
371 *
372 * Undefined if no set bit exists, so code should check against 0 first.
373 */
374 static __always_inline unsigned long __fls(unsigned long word)
375 {
376 asm("bsr %1,%0"
377 : "=r" (word)
378 : "rm" (word));
379 return word;
380 }
381
382 #undef ADDR
383
384 #ifdef __KERNEL__
385 /**
386 * ffs - find first set bit in word
387 * @x: the word to search
388 *
389 * This is defined the same way as the libc and compiler builtin ffs
390 * routines, therefore differs in spirit from the other bitops.
391 *
392 * ffs(value) returns 0 if value is 0 or the position of the first
393 * set bit if value is nonzero. The first (least significant) bit
394 * is at position 1.
395 */
396 static __always_inline int ffs(int x)
397 {
398 int r;
399
400 #ifdef CONFIG_X86_64
401 /*
402 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
403 * dest reg is undefined if x==0, but their CPU architect says its
404 * value is written to set it to the same as before, except that the
405 * top 32 bits will be cleared.
406 *
407 * We cannot do this on 32 bits because at the very least some
408 * 486 CPUs did not behave this way.
409 */
410 asm("bsfl %1,%0"
411 : "=r" (r)
412 : "rm" (x), "0" (-1));
413 #elif defined(CONFIG_X86_CMOV)
414 asm("bsfl %1,%0\n\t"
415 "cmovzl %2,%0"
416 : "=&r" (r) : "rm" (x), "r" (-1));
417 #else
418 asm("bsfl %1,%0\n\t"
419 "jnz 1f\n\t"
420 "movl $-1,%0\n"
421 "1:" : "=r" (r) : "rm" (x));
422 #endif
423 return r + 1;
424 }
425
426 /**
427 * fls - find last set bit in word
428 * @x: the word to search
429 *
430 * This is defined in a similar way as the libc and compiler builtin
431 * ffs, but returns the position of the most significant set bit.
432 *
433 * fls(value) returns 0 if value is 0 or the position of the last
434 * set bit if value is nonzero. The last (most significant) bit is
435 * at position 32.
436 */
437 static __always_inline int fls(int x)
438 {
439 int r;
440
441 #ifdef CONFIG_X86_64
442 /*
443 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
444 * dest reg is undefined if x==0, but their CPU architect says its
445 * value is written to set it to the same as before, except that the
446 * top 32 bits will be cleared.
447 *
448 * We cannot do this on 32 bits because at the very least some
449 * 486 CPUs did not behave this way.
450 */
451 asm("bsrl %1,%0"
452 : "=r" (r)
453 : "rm" (x), "0" (-1));
454 #elif defined(CONFIG_X86_CMOV)
455 asm("bsrl %1,%0\n\t"
456 "cmovzl %2,%0"
457 : "=&r" (r) : "rm" (x), "rm" (-1));
458 #else
459 asm("bsrl %1,%0\n\t"
460 "jnz 1f\n\t"
461 "movl $-1,%0\n"
462 "1:" : "=r" (r) : "rm" (x));
463 #endif
464 return r + 1;
465 }
466
467 /**
468 * fls64 - find last set bit in a 64-bit word
469 * @x: the word to search
470 *
471 * This is defined in a similar way as the libc and compiler builtin
472 * ffsll, but returns the position of the most significant set bit.
473 *
474 * fls64(value) returns 0 if value is 0 or the position of the last
475 * set bit if value is nonzero. The last (most significant) bit is
476 * at position 64.
477 */
478 #ifdef CONFIG_X86_64
479 static __always_inline int fls64(__u64 x)
480 {
481 int bitpos = -1;
482 /*
483 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
484 * dest reg is undefined if x==0, but their CPU architect says its
485 * value is written to set it to the same as before.
486 */
487 asm("bsrq %1,%q0"
488 : "+r" (bitpos)
489 : "rm" (x));
490 return bitpos + 1;
491 }
492 #else
493 #include <asm-generic/bitops/fls64.h>
494 #endif
495
496 #include <asm-generic/bitops/find.h>
497
498 #include <asm-generic/bitops/sched.h>
499
500 #include <asm/arch_hweight.h>
501
502 #include <asm-generic/bitops/const_hweight.h>
503
504 #include <asm-generic/bitops/le.h>
505
506 #include <asm-generic/bitops/ext2-atomic-setbit.h>
507
508 #endif /* __KERNEL__ */
509 #endif /* _ASM_X86_BITOPS_H */ 1 #ifndef _ASM_X86_IO_H
2 #define _ASM_X86_IO_H
3
4 /*
5 * This file contains the definitions for the x86 IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
9 *
10 * This file is not meant to be obfuscating: it's just complicated
11 * to (a) handle it all in a way that makes gcc able to optimize it
12 * as well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
14 * mistake somewhere.
15 */
16
17 /*
18 * Thanks to James van Artsdalen for a better timing-fix than
19 * the two short jumps: using outb's to a nonexistent port seems
20 * to guarantee better timings even on fast machines.
21 *
22 * On the other hand, I'd like to be sure of a non-existent port:
23 * I feel a bit unsafe about using 0x80 (should be safe, though)
24 *
25 * Linus
26 */
27
28 /*
29 * Bit simplified and optimized by Jan Hubicka
30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
31 *
32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
33 * isa_read[wl] and isa_write[wl] fixed
34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
35 */
36
37 #define ARCH_HAS_IOREMAP_WC
38 #define ARCH_HAS_IOREMAP_WT
39
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <asm/page.h>
43 #include <asm/early_ioremap.h>
44 #include <asm/pgtable_types.h>
45
46 #define build_mmio_read(name, size, type, reg, barrier) \
47 static inline type name(const volatile void __iomem *addr) \
48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
50
51 #define build_mmio_write(name, size, type, reg, barrier) \
52 static inline void name(type val, volatile void __iomem *addr) \
53 { asm volatile("mov" size " %0,%1": :reg (val), \
54 "m" (*(volatile type __force *)addr) barrier); }
55
56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
59
60 build_mmio_read(__readb, "b", unsigned char, "=q", )
61 build_mmio_read(__readw, "w", unsigned short, "=r", )
62 build_mmio_read(__readl, "l", unsigned int, "=r", )
63
64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
67
68 build_mmio_write(__writeb, "b", unsigned char, "q", )
69 build_mmio_write(__writew, "w", unsigned short, "r", )
70 build_mmio_write(__writel, "l", unsigned int, "r", )
71
72 #define readb_relaxed(a) __readb(a)
73 #define readw_relaxed(a) __readw(a)
74 #define readl_relaxed(a) __readl(a)
75 #define __raw_readb __readb
76 #define __raw_readw __readw
77 #define __raw_readl __readl
78
79 #define writeb_relaxed(v, a) __writeb(v, a)
80 #define writew_relaxed(v, a) __writew(v, a)
81 #define writel_relaxed(v, a) __writel(v, a)
82 #define __raw_writeb __writeb
83 #define __raw_writew __writew
84 #define __raw_writel __writel
85
86 #define mmiowb() barrier()
87
88 #ifdef CONFIG_X86_64
89
90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
92
93 #define readq_relaxed(a) readq(a)
94 #define writeq_relaxed(v, a) writeq(v, a)
95
96 #define __raw_readq(a) readq(a)
97 #define __raw_writeq(val, addr) writeq(val, addr)
98
99 /* Let people know that we have them */
100 #define readq readq
101 #define writeq writeq
102
103 #endif
104
105 /**
106 * virt_to_phys - map virtual addresses to physical
107 * @address: address to remap
108 *
109 * The returned physical address is the physical (CPU) mapping for
110 * the memory address given. It is only valid to use this function on
111 * addresses directly mapped or allocated via kmalloc.
112 *
113 * This function does not give bus mappings for DMA transfers. In
114 * almost all conceivable cases a device driver should not be using
115 * this function
116 */
117
118 static inline phys_addr_t virt_to_phys(volatile void *address)
119 {
120 return __pa(address);
121 }
122
123 /**
124 * phys_to_virt - map physical address to virtual
125 * @address: address to remap
126 *
127 * The returned virtual address is a current CPU mapping for
128 * the memory address given. It is only valid to use this function on
129 * addresses that have a kernel mapping
130 *
131 * This function does not handle bus mappings for DMA transfers. In
132 * almost all conceivable cases a device driver should not be using
133 * this function
134 */
135
136 static inline void *phys_to_virt(phys_addr_t address)
137 {
138 return __va(address);
139 }
140
141 /*
142 * Change "struct page" to physical address.
143 */
144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
145
146 /*
147 * ISA I/O bus memory addresses are 1:1 with the physical address.
148 * However, we truncate the address to unsigned int to avoid undesirable
149 * promitions in legacy drivers.
150 */
151 static inline unsigned int isa_virt_to_bus(volatile void *address)
152 {
153 return (unsigned int)virt_to_phys(address);
154 }
155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
156 #define isa_bus_to_virt phys_to_virt
157
158 /*
159 * However PCI ones are not necessarily 1:1 and therefore these interfaces
160 * are forbidden in portable PCI drivers.
161 *
162 * Allow them on x86 for legacy drivers, though.
163 */
164 #define virt_to_bus virt_to_phys
165 #define bus_to_virt phys_to_virt
166
167 /**
168 * ioremap - map bus memory into CPU space
169 * @offset: bus address of the memory
170 * @size: size of the resource to map
171 *
172 * ioremap performs a platform specific sequence of operations to
173 * make bus memory CPU accessible via the readb/readw/readl/writeb/
174 * writew/writel functions and the other mmio helpers. The returned
175 * address is not guaranteed to be usable directly as a virtual
176 * address.
177 *
178 * If the area you are trying to map is a PCI BAR you should have a
179 * look at pci_iomap().
180 */
181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
183 #define ioremap_uc ioremap_uc
184
185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
187 unsigned long prot_val);
188
189 /*
190 * The default ioremap() behavior is non-cached:
191 */
192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
193 {
194 return ioremap_nocache(offset, size);
195 }
196
197 extern void iounmap(volatile void __iomem *addr);
198
199 extern void set_iounmap_nonlazy(void);
200
201 #ifdef __KERNEL__
202
203 #include <asm-generic/iomap.h>
204
205 /*
206 * Convert a virtual cached pointer to an uncached pointer
207 */
208 #define xlate_dev_kmem_ptr(p) p
209
210 static inline void
211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
212 {
213 memset((void __force *)addr, val, count);
214 }
215
216 static inline void
217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
218 {
219 memcpy(dst, (const void __force *)src, count);
220 }
221
222 static inline void
223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
224 {
225 memcpy((void __force *)dst, src, count);
226 }
227
228 /*
229 * ISA space is 'always mapped' on a typical x86 system, no need to
230 * explicitly ioremap() it. The fact that the ISA IO space is mapped
231 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
232 * are physical addresses. The following constant pointer can be
233 * used as the IO-area pointer (it can be iounmapped as well, so the
234 * analogy with PCI is quite large):
235 */
236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
237
238 /*
239 * Cache management
240 *
241 * This needed for two cases
242 * 1. Out of order aware processors
243 * 2. Accidentally out of order processors (PPro errata #51)
244 */
245
246 static inline void flush_write_buffers(void)
247 {
248 #if defined(CONFIG_X86_PPRO_FENCE)
249 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
250 #endif
251 }
252
253 #endif /* __KERNEL__ */
254
255 extern void native_io_delay(void);
256
257 extern int io_delay_type;
258 extern void io_delay_init(void);
259
260 #if defined(CONFIG_PARAVIRT)
261 #include <asm/paravirt.h>
262 #else
263
264 static inline void slow_down_io(void)
265 {
266 native_io_delay();
267 #ifdef REALLY_SLOW_IO
268 native_io_delay();
269 native_io_delay();
270 native_io_delay();
271 #endif
272 }
273
274 #endif
275
276 #define BUILDIO(bwl, bw, type) \
277 static inline void out##bwl(unsigned type value, int port) \
278 { \
279 asm volatile("out" #bwl " %" #bw "0, %w1" \
280 : : "a"(value), "Nd"(port)); \
281 } \
282 \
283 static inline unsigned type in##bwl(int port) \
284 { \
285 unsigned type value; \
286 asm volatile("in" #bwl " %w1, %" #bw "0" \
287 : "=a"(value) : "Nd"(port)); \
288 return value; \
289 } \
290 \
291 static inline void out##bwl##_p(unsigned type value, int port) \
292 { \
293 out##bwl(value, port); \
294 slow_down_io(); \
295 } \
296 \
297 static inline unsigned type in##bwl##_p(int port) \
298 { \
299 unsigned type value = in##bwl(port); \
300 slow_down_io(); \
301 return value; \
302 } \
303 \
304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
305 { \
306 asm volatile("rep; outs" #bwl \
307 : "+S"(addr), "+c"(count) : "d"(port)); \
308 } \
309 \
310 static inline void ins##bwl(int port, void *addr, unsigned long count) \
311 { \
312 asm volatile("rep; ins" #bwl \
313 : "+D"(addr), "+c"(count) : "d"(port)); \
314 }
315
316 BUILDIO(b, b, char)
317 BUILDIO(w, w, short)
318 BUILDIO(l, , int)
319
320 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
322
323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
324 enum page_cache_mode pcm);
325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
327
328 extern bool is_early_ioremap_ptep(pte_t *ptep);
329
330 #ifdef CONFIG_XEN
331 #include <xen/xen.h>
332 struct bio_vec;
333
334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
335 const struct bio_vec *vec2);
336
337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
338 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
339 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
340 #endif /* CONFIG_XEN */
341
342 #define IO_SPACE_LIMIT 0xffff
343
344 #ifdef CONFIG_MTRR
345 extern int __must_check arch_phys_wc_index(int handle);
346 #define arch_phys_wc_index arch_phys_wc_index
347
348 extern int __must_check arch_phys_wc_add(unsigned long base,
349 unsigned long size);
350 extern void arch_phys_wc_del(int handle);
351 #define arch_phys_wc_add arch_phys_wc_add
352 #endif
353
354 #endif /* _ASM_X86_IO_H */ 1
2 /*
3 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 *
5 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License and no later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * The full GNU General Public License is included in this distribution in
22 * the file called "COPYING".
23 *
24 * Maintained by: pv-drivers@vmware.com
25 *
26 */
27
28 #include <linux/module.h>
29 #include <net/ip6_checksum.h>
30
31 #include "vmxnet3_int.h"
32
33 char vmxnet3_driver_name[] = "vmxnet3";
34 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
35
36 /*
37 * PCI Device ID Table
38 * Last entry must be all 0s
39 */
40 static const struct pci_device_id vmxnet3_pciid_table[] = {
41 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
42 {0}
43 };
44
45 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46
47 static int enable_mq = 1;
48
49 static void
50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
51
52 /*
53 * Enable/Disable the given intr
54 */
55 static void
56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
57 {
58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
59 }
60
61
62 static void
63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
64 {
65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
66 }
67
68
69 /*
70 * Enable/Disable all intrs used by the device
71 */
72 static void
73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
74 {
75 int i;
76
77 for (i = 0; i < adapter->intr.num_intrs; i++)
78 vmxnet3_enable_intr(adapter, i);
79 adapter->shared->devRead.intrConf.intrCtrl &=
80 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
81 }
82
83
84 static void
85 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
86 {
87 int i;
88
89 adapter->shared->devRead.intrConf.intrCtrl |=
90 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
91 for (i = 0; i < adapter->intr.num_intrs; i++)
92 vmxnet3_disable_intr(adapter, i);
93 }
94
95
96 static void
97 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
98 {
99 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
100 }
101
102
103 static bool
104 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
105 {
106 return tq->stopped;
107 }
108
109
110 static void
111 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
112 {
113 tq->stopped = false;
114 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
115 }
116
117
118 static void
119 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
120 {
121 tq->stopped = false;
122 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
123 }
124
125
126 static void
127 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
128 {
129 tq->stopped = true;
130 tq->num_stop++;
131 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
132 }
133
134
135 /*
136 * Check the link state. This may start or stop the tx queue.
137 */
138 static void
139 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
140 {
141 u32 ret;
142 int i;
143 unsigned long flags;
144
145 spin_lock_irqsave(&adapter->cmd_lock, flags);
146 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
147 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
148 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
149
150 adapter->link_speed = ret >> 16;
151 if (ret & 1) { /* Link is up. */
152 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
153 adapter->link_speed);
154 netif_carrier_on(adapter->netdev);
155
156 if (affectTxQueue) {
157 for (i = 0; i < adapter->num_tx_queues; i++)
158 vmxnet3_tq_start(&adapter->tx_queue[i],
159 adapter);
160 }
161 } else {
162 netdev_info(adapter->netdev, "NIC Link is Down\n");
163 netif_carrier_off(adapter->netdev);
164
165 if (affectTxQueue) {
166 for (i = 0; i < adapter->num_tx_queues; i++)
167 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
168 }
169 }
170 }
171
172 static void
173 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
174 {
175 int i;
176 unsigned long flags;
177 u32 events = le32_to_cpu(adapter->shared->ecr);
178 if (!events)
179 return;
180
181 vmxnet3_ack_events(adapter, events);
182
183 /* Check if link state has changed */
184 if (events & VMXNET3_ECR_LINK)
185 vmxnet3_check_link(adapter, true);
186
187 /* Check if there is an error on xmit/recv queues */
188 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
189 spin_lock_irqsave(&adapter->cmd_lock, flags);
190 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
191 VMXNET3_CMD_GET_QUEUE_STATUS);
192 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
193
194 for (i = 0; i < adapter->num_tx_queues; i++)
195 if (adapter->tqd_start[i].status.stopped)
196 dev_err(&adapter->netdev->dev,
197 "%s: tq[%d] error 0x%x\n",
198 adapter->netdev->name, i, le32_to_cpu(
199 adapter->tqd_start[i].status.error));
200 for (i = 0; i < adapter->num_rx_queues; i++)
201 if (adapter->rqd_start[i].status.stopped)
202 dev_err(&adapter->netdev->dev,
203 "%s: rq[%d] error 0x%x\n",
204 adapter->netdev->name, i,
205 adapter->rqd_start[i].status.error);
206
207 schedule_work(&adapter->work);
208 }
209 }
210
211 #ifdef __BIG_ENDIAN_BITFIELD
212 /*
213 * The device expects the bitfields in shared structures to be written in
214 * little endian. When CPU is big endian, the following routines are used to
215 * correctly read and write into ABI.
216 * The general technique used here is : double word bitfields are defined in
217 * opposite order for big endian architecture. Then before reading them in
218 * driver the complete double word is translated using le32_to_cpu. Similarly
219 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
220 * double words into required format.
221 * In order to avoid touching bits in shared structure more than once, temporary
222 * descriptors are used. These are passed as srcDesc to following functions.
223 */
224 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
225 struct Vmxnet3_RxDesc *dstDesc)
226 {
227 u32 *src = (u32 *)srcDesc + 2;
228 u32 *dst = (u32 *)dstDesc + 2;
229 dstDesc->addr = le64_to_cpu(srcDesc->addr);
230 *dst = le32_to_cpu(*src);
231 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
232 }
233
234 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
235 struct Vmxnet3_TxDesc *dstDesc)
236 {
237 int i;
238 u32 *src = (u32 *)(srcDesc + 1);
239 u32 *dst = (u32 *)(dstDesc + 1);
240
241 /* Working backwards so that the gen bit is set at the end. */
242 for (i = 2; i > 0; i--) {
243 src--;
244 dst--;
245 *dst = cpu_to_le32(*src);
246 }
247 }
248
249
250 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
251 struct Vmxnet3_RxCompDesc *dstDesc)
252 {
253 int i = 0;
254 u32 *src = (u32 *)srcDesc;
255 u32 *dst = (u32 *)dstDesc;
256 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
257 *dst = le32_to_cpu(*src);
258 src++;
259 dst++;
260 }
261 }
262
263
264 /* Used to read bitfield values from double words. */
265 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
266 {
267 u32 temp = le32_to_cpu(*bitfield);
268 u32 mask = ((1 << size) - 1) << pos;
269 temp &= mask;
270 temp >>= pos;
271 return temp;
272 }
273
274
275
276 #endif /* __BIG_ENDIAN_BITFIELD */
277
278 #ifdef __BIG_ENDIAN_BITFIELD
279
280 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
281 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
282 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
283 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
284 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
285 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
286 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
287 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
288 VMXNET3_TCD_GEN_SIZE)
289 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
290 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
291 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
292 (dstrcd) = (tmp); \
293 vmxnet3_RxCompToCPU((rcd), (tmp)); \
294 } while (0)
295 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
296 (dstrxd) = (tmp); \
297 vmxnet3_RxDescToCPU((rxd), (tmp)); \
298 } while (0)
299
300 #else
301
302 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
303 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
304 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
305 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
306 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
307 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
308
309 #endif /* __BIG_ENDIAN_BITFIELD */
310
311
312 static void
313 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
314 struct pci_dev *pdev)
315 {
316 if (tbi->map_type == VMXNET3_MAP_SINGLE)
317 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
318 PCI_DMA_TODEVICE);
319 else if (tbi->map_type == VMXNET3_MAP_PAGE)
320 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
321 PCI_DMA_TODEVICE);
322 else
323 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
324
325 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
326 }
327
328
329 static int
330 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
331 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
332 {
333 struct sk_buff *skb;
334 int entries = 0;
335
336 /* no out of order completion */
337 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
338 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
339
340 skb = tq->buf_info[eop_idx].skb;
341 BUG_ON(skb == NULL);
342 tq->buf_info[eop_idx].skb = NULL;
343
344 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
345
346 while (tq->tx_ring.next2comp != eop_idx) {
347 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
348 pdev);
349
350 /* update next2comp w/o tx_lock. Since we are marking more,
351 * instead of less, tx ring entries avail, the worst case is
352 * that the tx routine incorrectly re-queues a pkt due to
353 * insufficient tx ring entries.
354 */
355 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
356 entries++;
357 }
358
359 dev_kfree_skb_any(skb);
360 return entries;
361 }
362
363
364 static int
365 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
366 struct vmxnet3_adapter *adapter)
367 {
368 int completed = 0;
369 union Vmxnet3_GenericDesc *gdesc;
370
371 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
372 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
373 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
374 &gdesc->tcd), tq, adapter->pdev,
375 adapter);
376
377 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
378 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
379 }
380
381 if (completed) {
382 spin_lock(&tq->tx_lock);
383 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
384 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
385 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
386 netif_carrier_ok(adapter->netdev))) {
387 vmxnet3_tq_wake(tq, adapter);
388 }
389 spin_unlock(&tq->tx_lock);
390 }
391 return completed;
392 }
393
394
395 static void
396 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
397 struct vmxnet3_adapter *adapter)
398 {
399 int i;
400
401 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
402 struct vmxnet3_tx_buf_info *tbi;
403
404 tbi = tq->buf_info + tq->tx_ring.next2comp;
405
406 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
407 if (tbi->skb) {
408 dev_kfree_skb_any(tbi->skb);
409 tbi->skb = NULL;
410 }
411 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
412 }
413
414 /* sanity check, verify all buffers are indeed unmapped and freed */
415 for (i = 0; i < tq->tx_ring.size; i++) {
416 BUG_ON(tq->buf_info[i].skb != NULL ||
417 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
418 }
419
420 tq->tx_ring.gen = VMXNET3_INIT_GEN;
421 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
422
423 tq->comp_ring.gen = VMXNET3_INIT_GEN;
424 tq->comp_ring.next2proc = 0;
425 }
426
427
428 static void
429 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
430 struct vmxnet3_adapter *adapter)
431 {
432 if (tq->tx_ring.base) {
433 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
434 sizeof(struct Vmxnet3_TxDesc),
435 tq->tx_ring.base, tq->tx_ring.basePA);
436 tq->tx_ring.base = NULL;
437 }
438 if (tq->data_ring.base) {
439 dma_free_coherent(&adapter->pdev->dev,
440 tq->data_ring.size * tq->txdata_desc_size,
441 tq->data_ring.base, tq->data_ring.basePA);
442 tq->data_ring.base = NULL;
443 }
444 if (tq->comp_ring.base) {
445 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
446 sizeof(struct Vmxnet3_TxCompDesc),
447 tq->comp_ring.base, tq->comp_ring.basePA);
448 tq->comp_ring.base = NULL;
449 }
450 if (tq->buf_info) {
451 dma_free_coherent(&adapter->pdev->dev,
452 tq->tx_ring.size * sizeof(tq->buf_info[0]),
453 tq->buf_info, tq->buf_info_pa);
454 tq->buf_info = NULL;
455 }
456 }
457
458
459 /* Destroy all tx queues */
460 void
461 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
462 {
463 int i;
464
465 for (i = 0; i < adapter->num_tx_queues; i++)
466 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
467 }
468
469
470 static void
471 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
472 struct vmxnet3_adapter *adapter)
473 {
474 int i;
475
476 /* reset the tx ring contents to 0 and reset the tx ring states */
477 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
478 sizeof(struct Vmxnet3_TxDesc));
479 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480 tq->tx_ring.gen = VMXNET3_INIT_GEN;
481
482 memset(tq->data_ring.base, 0,
483 tq->data_ring.size * tq->txdata_desc_size);
484
485 /* reset the tx comp ring contents to 0 and reset comp ring states */
486 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
487 sizeof(struct Vmxnet3_TxCompDesc));
488 tq->comp_ring.next2proc = 0;
489 tq->comp_ring.gen = VMXNET3_INIT_GEN;
490
491 /* reset the bookkeeping data */
492 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
493 for (i = 0; i < tq->tx_ring.size; i++)
494 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
495
496 /* stats are not reset */
497 }
498
499
500 static int
501 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
502 struct vmxnet3_adapter *adapter)
503 {
504 size_t sz;
505
506 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
507 tq->comp_ring.base || tq->buf_info);
508
509 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
510 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
511 &tq->tx_ring.basePA, GFP_KERNEL);
512 if (!tq->tx_ring.base) {
513 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
514 goto err;
515 }
516
517 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
518 tq->data_ring.size * tq->txdata_desc_size,
519 &tq->data_ring.basePA, GFP_KERNEL);
520 if (!tq->data_ring.base) {
521 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
522 goto err;
523 }
524
525 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
526 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
527 &tq->comp_ring.basePA, GFP_KERNEL);
528 if (!tq->comp_ring.base) {
529 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
530 goto err;
531 }
532
533 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
534 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
535 &tq->buf_info_pa, GFP_KERNEL);
536 if (!tq->buf_info)
537 goto err;
538
539 return 0;
540
541 err:
542 vmxnet3_tq_destroy(tq, adapter);
543 return -ENOMEM;
544 }
545
546 static void
547 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
548 {
549 int i;
550
551 for (i = 0; i < adapter->num_tx_queues; i++)
552 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
553 }
554
555 /*
556 * starting from ring->next2fill, allocate rx buffers for the given ring
557 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
558 * are allocated or allocation fails
559 */
560
561 static int
562 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
563 int num_to_alloc, struct vmxnet3_adapter *adapter)
564 {
565 int num_allocated = 0;
566 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
567 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
568 u32 val;
569
570 while (num_allocated <= num_to_alloc) {
571 struct vmxnet3_rx_buf_info *rbi;
572 union Vmxnet3_GenericDesc *gd;
573
574 rbi = rbi_base + ring->next2fill;
575 gd = ring->base + ring->next2fill;
576
577 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
578 if (rbi->skb == NULL) {
579 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
580 rbi->len,
581 GFP_KERNEL);
582 if (unlikely(rbi->skb == NULL)) {
583 rq->stats.rx_buf_alloc_failure++;
584 break;
585 }
586
587 rbi->dma_addr = dma_map_single(
588 &adapter->pdev->dev,
589 rbi->skb->data, rbi->len,
590 PCI_DMA_FROMDEVICE);
591 if (dma_mapping_error(&adapter->pdev->dev,
592 rbi->dma_addr)) {
593 dev_kfree_skb_any(rbi->skb);
594 rq->stats.rx_buf_alloc_failure++;
595 break;
596 }
597 } else {
598 /* rx buffer skipped by the device */
599 }
600 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
601 } else {
602 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
603 rbi->len != PAGE_SIZE);
604
605 if (rbi->page == NULL) {
606 rbi->page = alloc_page(GFP_ATOMIC);
607 if (unlikely(rbi->page == NULL)) {
608 rq->stats.rx_buf_alloc_failure++;
609 break;
610 }
611 rbi->dma_addr = dma_map_page(
612 &adapter->pdev->dev,
613 rbi->page, 0, PAGE_SIZE,
614 PCI_DMA_FROMDEVICE);
615 if (dma_mapping_error(&adapter->pdev->dev,
616 rbi->dma_addr)) {
617 put_page(rbi->page);
618 rq->stats.rx_buf_alloc_failure++;
619 break;
620 }
621 } else {
622 /* rx buffers skipped by the device */
623 }
624 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
625 }
626
627 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
628 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
629 | val | rbi->len);
630
631 /* Fill the last buffer but dont mark it ready, or else the
632 * device will think that the queue is full */
633 if (num_allocated == num_to_alloc)
634 break;
635
636 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
637 num_allocated++;
638 vmxnet3_cmd_ring_adv_next2fill(ring);
639 }
640
641 netdev_dbg(adapter->netdev,
642 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
643 num_allocated, ring->next2fill, ring->next2comp);
644
645 /* so that the device can distinguish a full ring and an empty ring */
646 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
647
648 return num_allocated;
649 }
650
651
652 static void
653 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
654 struct vmxnet3_rx_buf_info *rbi)
655 {
656 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
657 skb_shinfo(skb)->nr_frags;
658
659 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
660
661 __skb_frag_set_page(frag, rbi->page);
662 frag->page_offset = 0;
663 skb_frag_size_set(frag, rcd->len);
664 skb->data_len += rcd->len;
665 skb->truesize += PAGE_SIZE;
666 skb_shinfo(skb)->nr_frags++;
667 }
668
669
670 static int
671 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
672 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
673 struct vmxnet3_adapter *adapter)
674 {
675 u32 dw2, len;
676 unsigned long buf_offset;
677 int i;
678 union Vmxnet3_GenericDesc *gdesc;
679 struct vmxnet3_tx_buf_info *tbi = NULL;
680
681 BUG_ON(ctx->copy_size > skb_headlen(skb));
682
683 /* use the previous gen bit for the SOP desc */
684 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
685
686 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
687 gdesc = ctx->sop_txd; /* both loops below can be skipped */
688
689 /* no need to map the buffer if headers are copied */
690 if (ctx->copy_size) {
691 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
692 tq->tx_ring.next2fill *
693 tq->txdata_desc_size);
694 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
695 ctx->sop_txd->dword[3] = 0;
696
697 tbi = tq->buf_info + tq->tx_ring.next2fill;
698 tbi->map_type = VMXNET3_MAP_NONE;
699
700 netdev_dbg(adapter->netdev,
701 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
702 tq->tx_ring.next2fill,
703 le64_to_cpu(ctx->sop_txd->txd.addr),
704 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
705 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
706
707 /* use the right gen for non-SOP desc */
708 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
709 }
710
711 /* linear part can use multiple tx desc if it's big */
712 len = skb_headlen(skb) - ctx->copy_size;
713 buf_offset = ctx->copy_size;
714 while (len) {
715 u32 buf_size;
716
717 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
718 buf_size = len;
719 dw2 |= len;
720 } else {
721 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
722 /* spec says that for TxDesc.len, 0 == 2^14 */
723 }
724
725 tbi = tq->buf_info + tq->tx_ring.next2fill;
726 tbi->map_type = VMXNET3_MAP_SINGLE;
727 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
728 skb->data + buf_offset, buf_size,
729 PCI_DMA_TODEVICE);
730 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
731 return -EFAULT;
732
733 tbi->len = buf_size;
734
735 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
736 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
737
738 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
739 gdesc->dword[2] = cpu_to_le32(dw2);
740 gdesc->dword[3] = 0;
741
742 netdev_dbg(adapter->netdev,
743 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
744 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
745 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
746 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
747 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
748
749 len -= buf_size;
750 buf_offset += buf_size;
751 }
752
753 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
754 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
755 u32 buf_size;
756
757 buf_offset = 0;
758 len = skb_frag_size(frag);
759 while (len) {
760 tbi = tq->buf_info + tq->tx_ring.next2fill;
761 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
762 buf_size = len;
763 dw2 |= len;
764 } else {
765 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
766 /* spec says that for TxDesc.len, 0 == 2^14 */
767 }
768 tbi->map_type = VMXNET3_MAP_PAGE;
769 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
770 buf_offset, buf_size,
771 DMA_TO_DEVICE);
772 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
773 return -EFAULT;
774
775 tbi->len = buf_size;
776
777 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
778 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
779
780 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
781 gdesc->dword[2] = cpu_to_le32(dw2);
782 gdesc->dword[3] = 0;
783
784 netdev_dbg(adapter->netdev,
785 "txd[%u]: 0x%llx %u %u\n",
786 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
787 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
788 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
789 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
790
791 len -= buf_size;
792 buf_offset += buf_size;
793 }
794 }
795
796 ctx->eop_txd = gdesc;
797
798 /* set the last buf_info for the pkt */
799 tbi->skb = skb;
800 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
801
802 return 0;
803 }
804
805
806 /* Init all tx queues */
807 static void
808 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
809 {
810 int i;
811
812 for (i = 0; i < adapter->num_tx_queues; i++)
813 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
814 }
815
816
817 /*
818 * parse relevant protocol headers:
819 * For a tso pkt, relevant headers are L2/3/4 including options
820 * For a pkt requesting csum offloading, they are L2/3 and may include L4
821 * if it's a TCP/UDP pkt
822 *
823 * Returns:
824 * -1: error happens during parsing
825 * 0: protocol headers parsed, but too big to be copied
826 * 1: protocol headers parsed and copied
827 *
828 * Other effects:
829 * 1. related *ctx fields are updated.
830 * 2. ctx->copy_size is # of bytes copied
831 * 3. the portion to be copied is guaranteed to be in the linear part
832 *
833 */
834 static int
835 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
836 struct vmxnet3_tx_ctx *ctx,
837 struct vmxnet3_adapter *adapter)
838 {
839 u8 protocol = 0;
840
841 if (ctx->mss) { /* TSO */
842 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
843 ctx->l4_hdr_size = tcp_hdrlen(skb);
844 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
845 } else {
846 if (skb->ip_summed == CHECKSUM_PARTIAL) {
847 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
848
849 if (ctx->ipv4) {
850 const struct iphdr *iph = ip_hdr(skb);
851
852 protocol = iph->protocol;
853 } else if (ctx->ipv6) {
854 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
855
856 protocol = ipv6h->nexthdr;
857 }
858
859 switch (protocol) {
860 case IPPROTO_TCP:
861 ctx->l4_hdr_size = tcp_hdrlen(skb);
862 break;
863 case IPPROTO_UDP:
864 ctx->l4_hdr_size = sizeof(struct udphdr);
865 break;
866 default:
867 ctx->l4_hdr_size = 0;
868 break;
869 }
870
871 ctx->copy_size = min(ctx->eth_ip_hdr_size +
872 ctx->l4_hdr_size, skb->len);
873 } else {
874 ctx->eth_ip_hdr_size = 0;
875 ctx->l4_hdr_size = 0;
876 /* copy as much as allowed */
877 ctx->copy_size = min_t(unsigned int,
878 tq->txdata_desc_size,
879 skb_headlen(skb));
880 }
881
882 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
883 ctx->copy_size = skb->len;
884
885 /* make sure headers are accessible directly */
886 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
887 goto err;
888 }
889
890 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
891 tq->stats.oversized_hdr++;
892 ctx->copy_size = 0;
893 return 0;
894 }
895
896 return 1;
897 err:
898 return -1;
899 }
900
901 /*
902 * copy relevant protocol headers to the transmit ring:
903 * For a tso pkt, relevant headers are L2/3/4 including options
904 * For a pkt requesting csum offloading, they are L2/3 and may include L4
905 * if it's a TCP/UDP pkt
906 *
907 *
908 * Note that this requires that vmxnet3_parse_hdr be called first to set the
909 * appropriate bits in ctx first
910 */
911 static void
912 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
913 struct vmxnet3_tx_ctx *ctx,
914 struct vmxnet3_adapter *adapter)
915 {
916 struct Vmxnet3_TxDataDesc *tdd;
917
918 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
919
920 memcpy(tdd->data, skb->data, ctx->copy_size);
921 netdev_dbg(adapter->netdev,
922 "copy %u bytes to dataRing[%u]\n",
923 ctx->copy_size, tq->tx_ring.next2fill);
924 }
925
926
927 static void
928 vmxnet3_prepare_tso(struct sk_buff *skb,
929 struct vmxnet3_tx_ctx *ctx)
930 {
931 struct tcphdr *tcph = tcp_hdr(skb);
932
933 if (ctx->ipv4) {
934 struct iphdr *iph = ip_hdr(skb);
935
936 iph->check = 0;
937 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
938 IPPROTO_TCP, 0);
939 } else if (ctx->ipv6) {
940 struct ipv6hdr *iph = ipv6_hdr(skb);
941
942 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
943 IPPROTO_TCP, 0);
944 }
945 }
946
947 static int txd_estimate(const struct sk_buff *skb)
948 {
949 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
950 int i;
951
952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
953 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
954
955 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
956 }
957 return count;
958 }
959
960 /*
961 * Transmits a pkt thru a given tq
962 * Returns:
963 * NETDEV_TX_OK: descriptors are setup successfully
964 * NETDEV_TX_OK: error occurred, the pkt is dropped
965 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
966 *
967 * Side-effects:
968 * 1. tx ring may be changed
969 * 2. tq stats may be updated accordingly
970 * 3. shared->txNumDeferred may be updated
971 */
972
973 static int
974 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
975 struct vmxnet3_adapter *adapter, struct net_device *netdev)
976 {
977 int ret;
978 u32 count;
979 unsigned long flags;
980 struct vmxnet3_tx_ctx ctx;
981 union Vmxnet3_GenericDesc *gdesc;
982 #ifdef __BIG_ENDIAN_BITFIELD
983 /* Use temporary descriptor to avoid touching bits multiple times */
984 union Vmxnet3_GenericDesc tempTxDesc;
985 #endif
986
987 count = txd_estimate(skb);
988
989 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
990 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
991
992 ctx.mss = skb_shinfo(skb)->gso_size;
993 if (ctx.mss) {
994 if (skb_header_cloned(skb)) {
995 if (unlikely(pskb_expand_head(skb, 0, 0,
996 GFP_ATOMIC) != 0)) {
997 tq->stats.drop_tso++;
998 goto drop_pkt;
999 }
1000 tq->stats.copy_skb_header++;
1001 }
1002 vmxnet3_prepare_tso(skb, &ctx);
1003 } else {
1004 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1005
1006 /* non-tso pkts must not use more than
1007 * VMXNET3_MAX_TXD_PER_PKT entries
1008 */
1009 if (skb_linearize(skb) != 0) {
1010 tq->stats.drop_too_many_frags++;
1011 goto drop_pkt;
1012 }
1013 tq->stats.linearized++;
1014
1015 /* recalculate the # of descriptors to use */
1016 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1017 }
1018 }
1019
1020 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1021 if (ret >= 0) {
1022 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1023 /* hdrs parsed, check against other limits */
1024 if (ctx.mss) {
1025 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1026 VMXNET3_MAX_TX_BUF_SIZE)) {
1027 tq->stats.drop_oversized_hdr++;
1028 goto drop_pkt;
1029 }
1030 } else {
1031 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1032 if (unlikely(ctx.eth_ip_hdr_size +
1033 skb->csum_offset >
1034 VMXNET3_MAX_CSUM_OFFSET)) {
1035 tq->stats.drop_oversized_hdr++;
1036 goto drop_pkt;
1037 }
1038 }
1039 }
1040 } else {
1041 tq->stats.drop_hdr_inspect_err++;
1042 goto drop_pkt;
1043 }
1044
1045 spin_lock_irqsave(&tq->tx_lock, flags);
1046
1047 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1048 tq->stats.tx_ring_full++;
1049 netdev_dbg(adapter->netdev,
1050 "tx queue stopped on %s, next2comp %u"
1051 " next2fill %u\n", adapter->netdev->name,
1052 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1053
1054 vmxnet3_tq_stop(tq, adapter);
1055 spin_unlock_irqrestore(&tq->tx_lock, flags);
1056 return NETDEV_TX_BUSY;
1057 }
1058
1059
1060 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1061
1062 /* fill tx descs related to addr & len */
1063 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1064 goto unlock_drop_pkt;
1065
1066 /* setup the EOP desc */
1067 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1068
1069 /* setup the SOP desc */
1070 #ifdef __BIG_ENDIAN_BITFIELD
1071 gdesc = &tempTxDesc;
1072 gdesc->dword[2] = ctx.sop_txd->dword[2];
1073 gdesc->dword[3] = ctx.sop_txd->dword[3];
1074 #else
1075 gdesc = ctx.sop_txd;
1076 #endif
1077 if (ctx.mss) {
1078 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1079 gdesc->txd.om = VMXNET3_OM_TSO;
1080 gdesc->txd.msscof = ctx.mss;
1081 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1082 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1083 } else {
1084 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1085 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1086 gdesc->txd.om = VMXNET3_OM_CSUM;
1087 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1088 skb->csum_offset;
1089 } else {
1090 gdesc->txd.om = 0;
1091 gdesc->txd.msscof = 0;
1092 }
1093 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1094 }
1095
1096 if (skb_vlan_tag_present(skb)) {
1097 gdesc->txd.ti = 1;
1098 gdesc->txd.tci = skb_vlan_tag_get(skb);
1099 }
1100
1101 /* finally flips the GEN bit of the SOP desc. */
1102 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1103 VMXNET3_TXD_GEN);
1104 #ifdef __BIG_ENDIAN_BITFIELD
1105 /* Finished updating in bitfields of Tx Desc, so write them in original
1106 * place.
1107 */
1108 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1109 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1110 gdesc = ctx.sop_txd;
1111 #endif
1112 netdev_dbg(adapter->netdev,
1113 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1114 (u32)(ctx.sop_txd -
1115 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1116 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1117
1118 spin_unlock_irqrestore(&tq->tx_lock, flags);
1119
1120 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1121 le32_to_cpu(tq->shared->txThreshold)) {
1122 tq->shared->txNumDeferred = 0;
1123 VMXNET3_WRITE_BAR0_REG(adapter,
1124 VMXNET3_REG_TXPROD + tq->qid * 8,
1125 tq->tx_ring.next2fill);
1126 }
1127
1128 return NETDEV_TX_OK;
1129
1130 unlock_drop_pkt:
1131 spin_unlock_irqrestore(&tq->tx_lock, flags);
1132 drop_pkt:
1133 tq->stats.drop_total++;
1134 dev_kfree_skb_any(skb);
1135 return NETDEV_TX_OK;
1136 }
1137
1138
1139 static netdev_tx_t
1140 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1141 {
1142 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1143
1144 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1145 return vmxnet3_tq_xmit(skb,
1146 &adapter->tx_queue[skb->queue_mapping],
1147 adapter, netdev);
1148 }
1149
1150
1151 static void
1152 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1153 struct sk_buff *skb,
1154 union Vmxnet3_GenericDesc *gdesc)
1155 {
1156 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1157 if (gdesc->rcd.v4 &&
1158 (le32_to_cpu(gdesc->dword[3]) &
1159 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1160 skb->ip_summed = CHECKSUM_UNNECESSARY;
1161 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1162 BUG_ON(gdesc->rcd.frg);
1163 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1164 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1165 skb->ip_summed = CHECKSUM_UNNECESSARY;
1166 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1167 BUG_ON(gdesc->rcd.frg);
1168 } else {
1169 if (gdesc->rcd.csum) {
1170 skb->csum = htons(gdesc->rcd.csum);
1171 skb->ip_summed = CHECKSUM_PARTIAL;
1172 } else {
1173 skb_checksum_none_assert(skb);
1174 }
1175 }
1176 } else {
1177 skb_checksum_none_assert(skb);
1178 }
1179 }
1180
1181
1182 static void
1183 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1184 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1185 {
1186 rq->stats.drop_err++;
1187 if (!rcd->fcs)
1188 rq->stats.drop_fcs++;
1189
1190 rq->stats.drop_total++;
1191
1192 /*
1193 * We do not unmap and chain the rx buffer to the skb.
1194 * We basically pretend this buffer is not used and will be recycled
1195 * by vmxnet3_rq_alloc_rx_buf()
1196 */
1197
1198 /*
1199 * ctx->skb may be NULL if this is the first and the only one
1200 * desc for the pkt
1201 */
1202 if (ctx->skb)
1203 dev_kfree_skb_irq(ctx->skb);
1204
1205 ctx->skb = NULL;
1206 }
1207
1208
1209 static u32
1210 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1211 union Vmxnet3_GenericDesc *gdesc)
1212 {
1213 u32 hlen, maplen;
1214 union {
1215 void *ptr;
1216 struct ethhdr *eth;
1217 struct iphdr *ipv4;
1218 struct ipv6hdr *ipv6;
1219 struct tcphdr *tcp;
1220 } hdr;
1221 BUG_ON(gdesc->rcd.tcp == 0);
1222
1223 maplen = skb_headlen(skb);
1224 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1225 return 0;
1226
1227 hdr.eth = eth_hdr(skb);
1228 if (gdesc->rcd.v4) {
1229 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
1230 hdr.ptr += sizeof(struct ethhdr);
1231 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1232 hlen = hdr.ipv4->ihl << 2;
1233 hdr.ptr += hdr.ipv4->ihl << 2;
1234 } else if (gdesc->rcd.v6) {
1235 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
1236 hdr.ptr += sizeof(struct ethhdr);
1237 /* Use an estimated value, since we also need to handle
1238 * TSO case.
1239 */
1240 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1241 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1242 hlen = sizeof(struct ipv6hdr);
1243 hdr.ptr += sizeof(struct ipv6hdr);
1244 } else {
1245 /* Non-IP pkt, dont estimate header length */
1246 return 0;
1247 }
1248
1249 if (hlen + sizeof(struct tcphdr) > maplen)
1250 return 0;
1251
1252 return (hlen + (hdr.tcp->doff << 2));
1253 }
1254
1255 static int
1256 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1257 struct vmxnet3_adapter *adapter, int quota)
1258 {
1259 static const u32 rxprod_reg[2] = {
1260 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1261 };
1262 u32 num_pkts = 0;
1263 bool skip_page_frags = false;
1264 struct Vmxnet3_RxCompDesc *rcd;
1265 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1266 u16 segCnt = 0, mss = 0;
1267 #ifdef __BIG_ENDIAN_BITFIELD
1268 struct Vmxnet3_RxDesc rxCmdDesc;
1269 struct Vmxnet3_RxCompDesc rxComp;
1270 #endif
1271 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1272 &rxComp);
1273 while (rcd->gen == rq->comp_ring.gen) {
1274 struct vmxnet3_rx_buf_info *rbi;
1275 struct sk_buff *skb, *new_skb = NULL;
1276 struct page *new_page = NULL;
1277 dma_addr_t new_dma_addr;
1278 int num_to_alloc;
1279 struct Vmxnet3_RxDesc *rxd;
1280 u32 idx, ring_idx;
1281 struct vmxnet3_cmd_ring *ring = NULL;
1282 if (num_pkts >= quota) {
1283 /* we may stop even before we see the EOP desc of
1284 * the current pkt
1285 */
1286 break;
1287 }
1288 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1289 rcd->rqID != rq->dataRingQid);
1290 idx = rcd->rxdIdx;
1291 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1292 ring = rq->rx_ring + ring_idx;
1293 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1294 &rxCmdDesc);
1295 rbi = rq->buf_info[ring_idx] + idx;
1296
1297 BUG_ON(rxd->addr != rbi->dma_addr ||
1298 rxd->len != rbi->len);
1299
1300 if (unlikely(rcd->eop && rcd->err)) {
1301 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1302 goto rcd_done;
1303 }
1304
1305 if (rcd->sop) { /* first buf of the pkt */
1306 bool rxDataRingUsed;
1307 u16 len;
1308
1309 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1310 (rcd->rqID != rq->qid &&
1311 rcd->rqID != rq->dataRingQid));
1312
1313 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1314 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1315
1316 if (unlikely(rcd->len == 0)) {
1317 /* Pretend the rx buffer is skipped. */
1318 BUG_ON(!(rcd->sop && rcd->eop));
1319 netdev_dbg(adapter->netdev,
1320 "rxRing[%u][%u] 0 length\n",
1321 ring_idx, idx);
1322 goto rcd_done;
1323 }
1324
1325 skip_page_frags = false;
1326 ctx->skb = rbi->skb;
1327
1328 rxDataRingUsed =
1329 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1330 len = rxDataRingUsed ? rcd->len : rbi->len;
1331 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1332 len);
1333 if (new_skb == NULL) {
1334 /* Skb allocation failed, do not handover this
1335 * skb to stack. Reuse it. Drop the existing pkt
1336 */
1337 rq->stats.rx_buf_alloc_failure++;
1338 ctx->skb = NULL;
1339 rq->stats.drop_total++;
1340 skip_page_frags = true;
1341 goto rcd_done;
1342 }
1343
1344 if (rxDataRingUsed) {
1345 size_t sz;
1346
1347 BUG_ON(rcd->len > rq->data_ring.desc_size);
1348
1349 ctx->skb = new_skb;
1350 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1351 memcpy(new_skb->data,
1352 &rq->data_ring.base[sz], rcd->len);
1353 } else {
1354 ctx->skb = rbi->skb;
1355
1356 new_dma_addr =
1357 dma_map_single(&adapter->pdev->dev,
1358 new_skb->data, rbi->len,
1359 PCI_DMA_FROMDEVICE);
1360 if (dma_mapping_error(&adapter->pdev->dev,
1361 new_dma_addr)) {
1362 dev_kfree_skb(new_skb);
1363 /* Skb allocation failed, do not
1364 * handover this skb to stack. Reuse
1365 * it. Drop the existing pkt.
1366 */
1367 rq->stats.rx_buf_alloc_failure++;
1368 ctx->skb = NULL;
1369 rq->stats.drop_total++;
1370 skip_page_frags = true;
1371 goto rcd_done;
1372 }
1373
1374 dma_unmap_single(&adapter->pdev->dev,
1375 rbi->dma_addr,
1376 rbi->len,
1377 PCI_DMA_FROMDEVICE);
1378
1379 /* Immediate refill */
1380 rbi->skb = new_skb;
1381 rbi->dma_addr = new_dma_addr;
1382 rxd->addr = cpu_to_le64(rbi->dma_addr);
1383 rxd->len = rbi->len;
1384 }
1385
1386 #ifdef VMXNET3_RSS
1387 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1388 (adapter->netdev->features & NETIF_F_RXHASH))
1389 skb_set_hash(ctx->skb,
1390 le32_to_cpu(rcd->rssHash),
1391 PKT_HASH_TYPE_L3);
1392 #endif
1393 skb_put(ctx->skb, rcd->len);
1394
1395 if (VMXNET3_VERSION_GE_2(adapter) &&
1396 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1397 struct Vmxnet3_RxCompDescExt *rcdlro;
1398 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1399
1400 segCnt = rcdlro->segCnt;
1401 WARN_ON_ONCE(segCnt == 0);
1402 mss = rcdlro->mss;
1403 if (unlikely(segCnt <= 1))
1404 segCnt = 0;
1405 } else {
1406 segCnt = 0;
1407 }
1408 } else {
1409 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1410
1411 /* non SOP buffer must be type 1 in most cases */
1412 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1413 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1414
1415 /* If an sop buffer was dropped, skip all
1416 * following non-sop fragments. They will be reused.
1417 */
1418 if (skip_page_frags)
1419 goto rcd_done;
1420
1421 if (rcd->len) {
1422 new_page = alloc_page(GFP_ATOMIC);
1423 /* Replacement page frag could not be allocated.
1424 * Reuse this page. Drop the pkt and free the
1425 * skb which contained this page as a frag. Skip
1426 * processing all the following non-sop frags.
1427 */
1428 if (unlikely(!new_page)) {
1429 rq->stats.rx_buf_alloc_failure++;
1430 dev_kfree_skb(ctx->skb);
1431 ctx->skb = NULL;
1432 skip_page_frags = true;
1433 goto rcd_done;
1434 }
1435 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1436 new_page,
1437 0, PAGE_SIZE,
1438 PCI_DMA_FROMDEVICE);
1439 if (dma_mapping_error(&adapter->pdev->dev,
1440 new_dma_addr)) {
1441 put_page(new_page);
1442 rq->stats.rx_buf_alloc_failure++;
1443 dev_kfree_skb(ctx->skb);
1444 ctx->skb = NULL;
1445 skip_page_frags = true;
1446 goto rcd_done;
1447 }
1448
1449 dma_unmap_page(&adapter->pdev->dev,
1450 rbi->dma_addr, rbi->len,
1451 PCI_DMA_FROMDEVICE);
1452
1453 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1454
1455 /* Immediate refill */
1456 rbi->page = new_page;
1457 rbi->dma_addr = new_dma_addr;
1458 rxd->addr = cpu_to_le64(rbi->dma_addr);
1459 rxd->len = rbi->len;
1460 }
1461 }
1462
1463
1464 skb = ctx->skb;
1465 if (rcd->eop) {
1466 u32 mtu = adapter->netdev->mtu;
1467 skb->len += skb->data_len;
1468
1469 vmxnet3_rx_csum(adapter, skb,
1470 (union Vmxnet3_GenericDesc *)rcd);
1471 skb->protocol = eth_type_trans(skb, adapter->netdev);
1472 if (!rcd->tcp || !adapter->lro)
1473 goto not_lro;
1474
1475 if (segCnt != 0 && mss != 0) {
1476 skb_shinfo(skb)->gso_type = rcd->v4 ?
1477 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1478 skb_shinfo(skb)->gso_size = mss;
1479 skb_shinfo(skb)->gso_segs = segCnt;
1480 } else if (segCnt != 0 || skb->len > mtu) {
1481 u32 hlen;
1482
1483 hlen = vmxnet3_get_hdr_len(adapter, skb,
1484 (union Vmxnet3_GenericDesc *)rcd);
1485 if (hlen == 0)
1486 goto not_lro;
1487
1488 skb_shinfo(skb)->gso_type =
1489 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1490 if (segCnt != 0) {
1491 skb_shinfo(skb)->gso_segs = segCnt;
1492 skb_shinfo(skb)->gso_size =
1493 DIV_ROUND_UP(skb->len -
1494 hlen, segCnt);
1495 } else {
1496 skb_shinfo(skb)->gso_size = mtu - hlen;
1497 }
1498 }
1499 not_lro:
1500 if (unlikely(rcd->ts))
1501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1502
1503 if (adapter->netdev->features & NETIF_F_LRO)
1504 netif_receive_skb(skb);
1505 else
1506 napi_gro_receive(&rq->napi, skb);
1507
1508 ctx->skb = NULL;
1509 num_pkts++;
1510 }
1511
1512 rcd_done:
1513 /* device may have skipped some rx descs */
1514 ring->next2comp = idx;
1515 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1516 ring = rq->rx_ring + ring_idx;
1517 while (num_to_alloc) {
1518 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1519 &rxCmdDesc);
1520 BUG_ON(!rxd->addr);
1521
1522 /* Recv desc is ready to be used by the device */
1523 rxd->gen = ring->gen;
1524 vmxnet3_cmd_ring_adv_next2fill(ring);
1525 num_to_alloc--;
1526 }
1527
1528 /* if needed, update the register */
1529 if (unlikely(rq->shared->updateRxProd)) {
1530 VMXNET3_WRITE_BAR0_REG(adapter,
1531 rxprod_reg[ring_idx] + rq->qid * 8,
1532 ring->next2fill);
1533 }
1534
1535 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1536 vmxnet3_getRxComp(rcd,
1537 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1538 }
1539
1540 return num_pkts;
1541 }
1542
1543
1544 static void
1545 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1546 struct vmxnet3_adapter *adapter)
1547 {
1548 u32 i, ring_idx;
1549 struct Vmxnet3_RxDesc *rxd;
1550
1551 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1552 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1553 #ifdef __BIG_ENDIAN_BITFIELD
1554 struct Vmxnet3_RxDesc rxDesc;
1555 #endif
1556 vmxnet3_getRxDesc(rxd,
1557 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1558
1559 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1560 rq->buf_info[ring_idx][i].skb) {
1561 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1562 rxd->len, PCI_DMA_FROMDEVICE);
1563 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1564 rq->buf_info[ring_idx][i].skb = NULL;
1565 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1566 rq->buf_info[ring_idx][i].page) {
1567 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1568 rxd->len, PCI_DMA_FROMDEVICE);
1569 put_page(rq->buf_info[ring_idx][i].page);
1570 rq->buf_info[ring_idx][i].page = NULL;
1571 }
1572 }
1573
1574 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1575 rq->rx_ring[ring_idx].next2fill =
1576 rq->rx_ring[ring_idx].next2comp = 0;
1577 }
1578
1579 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1580 rq->comp_ring.next2proc = 0;
1581 }
1582
1583
1584 static void
1585 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1586 {
1587 int i;
1588
1589 for (i = 0; i < adapter->num_rx_queues; i++)
1590 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1591 }
1592
1593
1594 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1595 struct vmxnet3_adapter *adapter)
1596 {
1597 int i;
1598 int j;
1599
1600 /* all rx buffers must have already been freed */
1601 for (i = 0; i < 2; i++) {
1602 if (rq->buf_info[i]) {
1603 for (j = 0; j < rq->rx_ring[i].size; j++)
1604 BUG_ON(rq->buf_info[i][j].page != NULL);
1605 }
1606 }
1607
1608
1609 for (i = 0; i < 2; i++) {
1610 if (rq->rx_ring[i].base) {
1611 dma_free_coherent(&adapter->pdev->dev,
1612 rq->rx_ring[i].size
1613 * sizeof(struct Vmxnet3_RxDesc),
1614 rq->rx_ring[i].base,
1615 rq->rx_ring[i].basePA);
1616 rq->rx_ring[i].base = NULL;
1617 }
1618 rq->buf_info[i] = NULL;
1619 }
1620
1621 if (rq->data_ring.base) {
1622 dma_free_coherent(&adapter->pdev->dev,
1623 rq->rx_ring[0].size * rq->data_ring.desc_size,
1624 rq->data_ring.base, rq->data_ring.basePA);
1625 rq->data_ring.base = NULL;
1626 }
1627
1628 if (rq->comp_ring.base) {
1629 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1630 * sizeof(struct Vmxnet3_RxCompDesc),
1631 rq->comp_ring.base, rq->comp_ring.basePA);
1632 rq->comp_ring.base = NULL;
1633 }
1634
1635 if (rq->buf_info[0]) {
1636 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1637 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1638 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1639 rq->buf_info_pa);
1640 }
1641 }
1642
1643 void
1644 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1645 {
1646 int i;
1647
1648 for (i = 0; i < adapter->num_rx_queues; i++) {
1649 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1650
1651 if (rq->data_ring.base) {
1652 dma_free_coherent(&adapter->pdev->dev,
1653 (rq->rx_ring[0].size *
1654 rq->data_ring.desc_size),
1655 rq->data_ring.base,
1656 rq->data_ring.basePA);
1657 rq->data_ring.base = NULL;
1658 rq->data_ring.desc_size = 0;
1659 }
1660 }
1661 }
1662
1663 static int
1664 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1665 struct vmxnet3_adapter *adapter)
1666 {
1667 int i;
1668
1669 /* initialize buf_info */
1670 for (i = 0; i < rq->rx_ring[0].size; i++) {
1671
1672 /* 1st buf for a pkt is skbuff */
1673 if (i % adapter->rx_buf_per_pkt == 0) {
1674 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1675 rq->buf_info[0][i].len = adapter->skb_buf_size;
1676 } else { /* subsequent bufs for a pkt is frag */
1677 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1678 rq->buf_info[0][i].len = PAGE_SIZE;
1679 }
1680 }
1681 for (i = 0; i < rq->rx_ring[1].size; i++) {
1682 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1683 rq->buf_info[1][i].len = PAGE_SIZE;
1684 }
1685
1686 /* reset internal state and allocate buffers for both rings */
1687 for (i = 0; i < 2; i++) {
1688 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1689
1690 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1691 sizeof(struct Vmxnet3_RxDesc));
1692 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1693 }
1694 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1695 adapter) == 0) {
1696 /* at least has 1 rx buffer for the 1st ring */
1697 return -ENOMEM;
1698 }
1699 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1700
1701 /* reset the comp ring */
1702 rq->comp_ring.next2proc = 0;
1703 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1704 sizeof(struct Vmxnet3_RxCompDesc));
1705 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1706
1707 /* reset rxctx */
1708 rq->rx_ctx.skb = NULL;
1709
1710 /* stats are not reset */
1711 return 0;
1712 }
1713
1714
1715 static int
1716 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1717 {
1718 int i, err = 0;
1719
1720 for (i = 0; i < adapter->num_rx_queues; i++) {
1721 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1722 if (unlikely(err)) {
1723 dev_err(&adapter->netdev->dev, "%s: failed to "
1724 "initialize rx queue%i\n",
1725 adapter->netdev->name, i);
1726 break;
1727 }
1728 }
1729 return err;
1730
1731 }
1732
1733
1734 static int
1735 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1736 {
1737 int i;
1738 size_t sz;
1739 struct vmxnet3_rx_buf_info *bi;
1740
1741 for (i = 0; i < 2; i++) {
1742
1743 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1744 rq->rx_ring[i].base = dma_alloc_coherent(
1745 &adapter->pdev->dev, sz,
1746 &rq->rx_ring[i].basePA,
1747 GFP_KERNEL);
1748 if (!rq->rx_ring[i].base) {
1749 netdev_err(adapter->netdev,
1750 "failed to allocate rx ring %d\n", i);
1751 goto err;
1752 }
1753 }
1754
1755 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1756 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1757 rq->data_ring.base =
1758 dma_alloc_coherent(&adapter->pdev->dev, sz,
1759 &rq->data_ring.basePA,
1760 GFP_KERNEL);
1761 if (!rq->data_ring.base) {
1762 netdev_err(adapter->netdev,
1763 "rx data ring will be disabled\n");
1764 adapter->rxdataring_enabled = false;
1765 }
1766 } else {
1767 rq->data_ring.base = NULL;
1768 rq->data_ring.desc_size = 0;
1769 }
1770
1771 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1772 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1773 &rq->comp_ring.basePA,
1774 GFP_KERNEL);
1775 if (!rq->comp_ring.base) {
1776 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1777 goto err;
1778 }
1779
1780 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1781 rq->rx_ring[1].size);
1782 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1783 GFP_KERNEL);
1784 if (!bi)
1785 goto err;
1786
1787 rq->buf_info[0] = bi;
1788 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1789
1790 return 0;
1791
1792 err:
1793 vmxnet3_rq_destroy(rq, adapter);
1794 return -ENOMEM;
1795 }
1796
1797
1798 static int
1799 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1800 {
1801 int i, err = 0;
1802
1803 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1804
1805 for (i = 0; i < adapter->num_rx_queues; i++) {
1806 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1807 if (unlikely(err)) {
1808 dev_err(&adapter->netdev->dev,
1809 "%s: failed to create rx queue%i\n",
1810 adapter->netdev->name, i);
1811 goto err_out;
1812 }
1813 }
1814
1815 if (!adapter->rxdataring_enabled)
1816 vmxnet3_rq_destroy_all_rxdataring(adapter);
1817
1818 return err;
1819 err_out:
1820 vmxnet3_rq_destroy_all(adapter);
1821 return err;
1822
1823 }
1824
1825 /* Multiple queue aware polling function for tx and rx */
1826
1827 static int
1828 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1829 {
1830 int rcd_done = 0, i;
1831 if (unlikely(adapter->shared->ecr))
1832 vmxnet3_process_events(adapter);
1833 for (i = 0; i < adapter->num_tx_queues; i++)
1834 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1835
1836 for (i = 0; i < adapter->num_rx_queues; i++)
1837 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1838 adapter, budget);
1839 return rcd_done;
1840 }
1841
1842
1843 static int
1844 vmxnet3_poll(struct napi_struct *napi, int budget)
1845 {
1846 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1847 struct vmxnet3_rx_queue, napi);
1848 int rxd_done;
1849
1850 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1851
1852 if (rxd_done < budget) {
1853 napi_complete(napi);
1854 vmxnet3_enable_all_intrs(rx_queue->adapter);
1855 }
1856 return rxd_done;
1857 }
1858
1859 /*
1860 * NAPI polling function for MSI-X mode with multiple Rx queues
1861 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1862 */
1863
1864 static int
1865 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1866 {
1867 struct vmxnet3_rx_queue *rq = container_of(napi,
1868 struct vmxnet3_rx_queue, napi);
1869 struct vmxnet3_adapter *adapter = rq->adapter;
1870 int rxd_done;
1871
1872 /* When sharing interrupt with corresponding tx queue, process
1873 * tx completions in that queue as well
1874 */
1875 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1876 struct vmxnet3_tx_queue *tq =
1877 &adapter->tx_queue[rq - adapter->rx_queue];
1878 vmxnet3_tq_tx_complete(tq, adapter);
1879 }
1880
1881 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1882
1883 if (rxd_done < budget) {
1884 napi_complete(napi);
1885 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1886 }
1887 return rxd_done;
1888 }
1889
1890
1891 #ifdef CONFIG_PCI_MSI
1892
1893 /*
1894 * Handle completion interrupts on tx queues
1895 * Returns whether or not the intr is handled
1896 */
1897
1898 static irqreturn_t
1899 vmxnet3_msix_tx(int irq, void *data)
1900 {
1901 struct vmxnet3_tx_queue *tq = data;
1902 struct vmxnet3_adapter *adapter = tq->adapter;
1903
1904 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1905 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1906
1907 /* Handle the case where only one irq is allocate for all tx queues */
1908 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1909 int i;
1910 for (i = 0; i < adapter->num_tx_queues; i++) {
1911 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1912 vmxnet3_tq_tx_complete(txq, adapter);
1913 }
1914 } else {
1915 vmxnet3_tq_tx_complete(tq, adapter);
1916 }
1917 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1918
1919 return IRQ_HANDLED;
1920 }
1921
1922
1923 /*
1924 * Handle completion interrupts on rx queues. Returns whether or not the
1925 * intr is handled
1926 */
1927
1928 static irqreturn_t
1929 vmxnet3_msix_rx(int irq, void *data)
1930 {
1931 struct vmxnet3_rx_queue *rq = data;
1932 struct vmxnet3_adapter *adapter = rq->adapter;
1933
1934 /* disable intr if needed */
1935 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1936 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1937 napi_schedule(&rq->napi);
1938
1939 return IRQ_HANDLED;
1940 }
1941
1942 /*
1943 *----------------------------------------------------------------------------
1944 *
1945 * vmxnet3_msix_event --
1946 *
1947 * vmxnet3 msix event intr handler
1948 *
1949 * Result:
1950 * whether or not the intr is handled
1951 *
1952 *----------------------------------------------------------------------------
1953 */
1954
1955 static irqreturn_t
1956 vmxnet3_msix_event(int irq, void *data)
1957 {
1958 struct net_device *dev = data;
1959 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1960
1961 /* disable intr if needed */
1962 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1963 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1964
1965 if (adapter->shared->ecr)
1966 vmxnet3_process_events(adapter);
1967
1968 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1969
1970 return IRQ_HANDLED;
1971 }
1972
1973 #endif /* CONFIG_PCI_MSI */
1974
1975
1976 /* Interrupt handler for vmxnet3 */
1977 static irqreturn_t
1978 vmxnet3_intr(int irq, void *dev_id)
1979 {
1980 struct net_device *dev = dev_id;
1981 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1982
1983 if (adapter->intr.type == VMXNET3_IT_INTX) {
1984 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1985 if (unlikely(icr == 0))
1986 /* not ours */
1987 return IRQ_NONE;
1988 }
1989
1990
1991 /* disable intr if needed */
1992 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1993 vmxnet3_disable_all_intrs(adapter);
1994
1995 napi_schedule(&adapter->rx_queue[0].napi);
1996
1997 return IRQ_HANDLED;
1998 }
1999
2000 #ifdef CONFIG_NET_POLL_CONTROLLER
2001
2002 /* netpoll callback. */
2003 static void
2004 vmxnet3_netpoll(struct net_device *netdev)
2005 {
2006 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2007
2008 switch (adapter->intr.type) {
2009 #ifdef CONFIG_PCI_MSI
2010 case VMXNET3_IT_MSIX: {
2011 int i;
2012 for (i = 0; i < adapter->num_rx_queues; i++)
2013 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2014 break;
2015 }
2016 #endif
2017 case VMXNET3_IT_MSI:
2018 default:
2019 vmxnet3_intr(0, adapter->netdev);
2020 break;
2021 }
2022
2023 }
2024 #endif /* CONFIG_NET_POLL_CONTROLLER */
2025
2026 static int
2027 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2028 {
2029 struct vmxnet3_intr *intr = &adapter->intr;
2030 int err = 0, i;
2031 int vector = 0;
2032
2033 #ifdef CONFIG_PCI_MSI
2034 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2035 for (i = 0; i < adapter->num_tx_queues; i++) {
2036 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2037 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2038 adapter->netdev->name, vector);
2039 err = request_irq(
2040 intr->msix_entries[vector].vector,
2041 vmxnet3_msix_tx, 0,
2042 adapter->tx_queue[i].name,
2043 &adapter->tx_queue[i]);
2044 } else {
2045 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2046 adapter->netdev->name, vector);
2047 }
2048 if (err) {
2049 dev_err(&adapter->netdev->dev,
2050 "Failed to request irq for MSIX, %s, "
2051 "error %d\n",
2052 adapter->tx_queue[i].name, err);
2053 return err;
2054 }
2055
2056 /* Handle the case where only 1 MSIx was allocated for
2057 * all tx queues */
2058 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2059 for (; i < adapter->num_tx_queues; i++)
2060 adapter->tx_queue[i].comp_ring.intr_idx
2061 = vector;
2062 vector++;
2063 break;
2064 } else {
2065 adapter->tx_queue[i].comp_ring.intr_idx
2066 = vector++;
2067 }
2068 }
2069 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2070 vector = 0;
2071
2072 for (i = 0; i < adapter->num_rx_queues; i++) {
2073 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2074 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2075 adapter->netdev->name, vector);
2076 else
2077 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2078 adapter->netdev->name, vector);
2079 err = request_irq(intr->msix_entries[vector].vector,
2080 vmxnet3_msix_rx, 0,
2081 adapter->rx_queue[i].name,
2082 &(adapter->rx_queue[i]));
2083 if (err) {
2084 netdev_err(adapter->netdev,
2085 "Failed to request irq for MSIX, "
2086 "%s, error %d\n",
2087 adapter->rx_queue[i].name, err);
2088 return err;
2089 }
2090
2091 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2092 }
2093
2094 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2095 adapter->netdev->name, vector);
2096 err = request_irq(intr->msix_entries[vector].vector,
2097 vmxnet3_msix_event, 0,
2098 intr->event_msi_vector_name, adapter->netdev);
2099 intr->event_intr_idx = vector;
2100
2101 } else if (intr->type == VMXNET3_IT_MSI) {
2102 adapter->num_rx_queues = 1;
2103 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2104 adapter->netdev->name, adapter->netdev);
2105 } else {
2106 #endif
2107 adapter->num_rx_queues = 1;
2108 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2109 IRQF_SHARED, adapter->netdev->name,
2110 adapter->netdev);
2111 #ifdef CONFIG_PCI_MSI
2112 }
2113 #endif
2114 intr->num_intrs = vector + 1;
2115 if (err) {
2116 netdev_err(adapter->netdev,
2117 "Failed to request irq (intr type:%d), error %d\n",
2118 intr->type, err);
2119 } else {
2120 /* Number of rx queues will not change after this */
2121 for (i = 0; i < adapter->num_rx_queues; i++) {
2122 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2123 rq->qid = i;
2124 rq->qid2 = i + adapter->num_rx_queues;
2125 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2126 }
2127
2128 /* init our intr settings */
2129 for (i = 0; i < intr->num_intrs; i++)
2130 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2131 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2132 adapter->intr.event_intr_idx = 0;
2133 for (i = 0; i < adapter->num_tx_queues; i++)
2134 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2135 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2136 }
2137
2138 netdev_info(adapter->netdev,
2139 "intr type %u, mode %u, %u vectors allocated\n",
2140 intr->type, intr->mask_mode, intr->num_intrs);
2141 }
2142
2143 return err;
2144 }
2145
2146
2147 static void
2148 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2149 {
2150 struct vmxnet3_intr *intr = &adapter->intr;
2151 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2152
2153 switch (intr->type) {
2154 #ifdef CONFIG_PCI_MSI
2155 case VMXNET3_IT_MSIX:
2156 {
2157 int i, vector = 0;
2158
2159 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2160 for (i = 0; i < adapter->num_tx_queues; i++) {
2161 free_irq(intr->msix_entries[vector++].vector,
2162 &(adapter->tx_queue[i]));
2163 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2164 break;
2165 }
2166 }
2167
2168 for (i = 0; i < adapter->num_rx_queues; i++) {
2169 free_irq(intr->msix_entries[vector++].vector,
2170 &(adapter->rx_queue[i]));
2171 }
2172
2173 free_irq(intr->msix_entries[vector].vector,
2174 adapter->netdev);
2175 BUG_ON(vector >= intr->num_intrs);
2176 break;
2177 }
2178 #endif
2179 case VMXNET3_IT_MSI:
2180 free_irq(adapter->pdev->irq, adapter->netdev);
2181 break;
2182 case VMXNET3_IT_INTX:
2183 free_irq(adapter->pdev->irq, adapter->netdev);
2184 break;
2185 default:
2186 BUG();
2187 }
2188 }
2189
2190
2191 static void
2192 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2193 {
2194 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2195 u16 vid;
2196
2197 /* allow untagged pkts */
2198 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2199
2200 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2201 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2202 }
2203
2204
2205 static int
2206 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2207 {
2208 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2209
2210 if (!(netdev->flags & IFF_PROMISC)) {
2211 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2212 unsigned long flags;
2213
2214 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2215 spin_lock_irqsave(&adapter->cmd_lock, flags);
2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2217 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2219 }
2220
2221 set_bit(vid, adapter->active_vlans);
2222
2223 return 0;
2224 }
2225
2226
2227 static int
2228 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2229 {
2230 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2231
2232 if (!(netdev->flags & IFF_PROMISC)) {
2233 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2234 unsigned long flags;
2235
2236 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2237 spin_lock_irqsave(&adapter->cmd_lock, flags);
2238 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2239 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2240 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2241 }
2242
2243 clear_bit(vid, adapter->active_vlans);
2244
2245 return 0;
2246 }
2247
2248
2249 static u8 *
2250 vmxnet3_copy_mc(struct net_device *netdev)
2251 {
2252 u8 *buf = NULL;
2253 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2254
2255 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2256 if (sz <= 0xffff) {
2257 /* We may be called with BH disabled */
2258 buf = kmalloc(sz, GFP_ATOMIC);
2259 if (buf) {
2260 struct netdev_hw_addr *ha;
2261 int i = 0;
2262
2263 netdev_for_each_mc_addr(ha, netdev)
2264 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2265 ETH_ALEN);
2266 }
2267 }
2268 return buf;
2269 }
2270
2271
2272 static void
2273 vmxnet3_set_mc(struct net_device *netdev)
2274 {
2275 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2276 unsigned long flags;
2277 struct Vmxnet3_RxFilterConf *rxConf =
2278 &adapter->shared->devRead.rxFilterConf;
2279 u8 *new_table = NULL;
2280 dma_addr_t new_table_pa = 0;
2281 u32 new_mode = VMXNET3_RXM_UCAST;
2282
2283 if (netdev->flags & IFF_PROMISC) {
2284 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2285 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2286
2287 new_mode |= VMXNET3_RXM_PROMISC;
2288 } else {
2289 vmxnet3_restore_vlan(adapter);
2290 }
2291
2292 if (netdev->flags & IFF_BROADCAST)
2293 new_mode |= VMXNET3_RXM_BCAST;
2294
2295 if (netdev->flags & IFF_ALLMULTI)
2296 new_mode |= VMXNET3_RXM_ALL_MULTI;
2297 else
2298 if (!netdev_mc_empty(netdev)) {
2299 new_table = vmxnet3_copy_mc(netdev);
2300 if (new_table) {
2301 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2302
2303 rxConf->mfTableLen = cpu_to_le16(sz);
2304 new_table_pa = dma_map_single(
2305 &adapter->pdev->dev,
2306 new_table,
2307 sz,
2308 PCI_DMA_TODEVICE);
2309 }
2310
2311 if (!dma_mapping_error(&adapter->pdev->dev,
2312 new_table_pa)) {
2313 new_mode |= VMXNET3_RXM_MCAST;
2314 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2315 } else {
2316 netdev_info(netdev,
2317 "failed to copy mcast list, setting ALL_MULTI\n");
2318 new_mode |= VMXNET3_RXM_ALL_MULTI;
2319 }
2320 }
2321
2322 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2323 rxConf->mfTableLen = 0;
2324 rxConf->mfTablePA = 0;
2325 }
2326
2327 spin_lock_irqsave(&adapter->cmd_lock, flags);
2328 if (new_mode != rxConf->rxMode) {
2329 rxConf->rxMode = cpu_to_le32(new_mode);
2330 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2331 VMXNET3_CMD_UPDATE_RX_MODE);
2332 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2333 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2334 }
2335
2336 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2337 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2338 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2339
2340 if (new_table_pa)
2341 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2342 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2343 kfree(new_table);
2344 }
2345
2346 void
2347 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2348 {
2349 int i;
2350
2351 for (i = 0; i < adapter->num_rx_queues; i++)
2352 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2353 }
2354
2355
2356 /*
2357 * Set up driver_shared based on settings in adapter.
2358 */
2359
2360 static void
2361 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2362 {
2363 struct Vmxnet3_DriverShared *shared = adapter->shared;
2364 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2365 struct Vmxnet3_TxQueueConf *tqc;
2366 struct Vmxnet3_RxQueueConf *rqc;
2367 int i;
2368
2369 memset(shared, 0, sizeof(*shared));
2370
2371 /* driver settings */
2372 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2373 devRead->misc.driverInfo.version = cpu_to_le32(
2374 VMXNET3_DRIVER_VERSION_NUM);
2375 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2376 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2377 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2378 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2379 *((u32 *)&devRead->misc.driverInfo.gos));
2380 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2381 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2382
2383 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2384 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2385
2386 /* set up feature flags */
2387 if (adapter->netdev->features & NETIF_F_RXCSUM)
2388 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2389
2390 if (adapter->netdev->features & NETIF_F_LRO) {
2391 devRead->misc.uptFeatures |= UPT1_F_LRO;
2392 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2393 }
2394 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2395 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2396
2397 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2398 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2399 devRead->misc.queueDescLen = cpu_to_le32(
2400 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2401 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2402
2403 /* tx queue settings */
2404 devRead->misc.numTxQueues = adapter->num_tx_queues;
2405 for (i = 0; i < adapter->num_tx_queues; i++) {
2406 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2407 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2408 tqc = &adapter->tqd_start[i].conf;
2409 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2410 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2411 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2412 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2413 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2414 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2415 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2416 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2417 tqc->ddLen = cpu_to_le32(
2418 sizeof(struct vmxnet3_tx_buf_info) *
2419 tqc->txRingSize);
2420 tqc->intrIdx = tq->comp_ring.intr_idx;
2421 }
2422
2423 /* rx queue settings */
2424 devRead->misc.numRxQueues = adapter->num_rx_queues;
2425 for (i = 0; i < adapter->num_rx_queues; i++) {
2426 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2427 rqc = &adapter->rqd_start[i].conf;
2428 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2429 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2430 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2431 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2432 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2433 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2434 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2435 rqc->ddLen = cpu_to_le32(
2436 sizeof(struct vmxnet3_rx_buf_info) *
2437 (rqc->rxRingSize[0] +
2438 rqc->rxRingSize[1]));
2439 rqc->intrIdx = rq->comp_ring.intr_idx;
2440 if (VMXNET3_VERSION_GE_3(adapter)) {
2441 rqc->rxDataRingBasePA =
2442 cpu_to_le64(rq->data_ring.basePA);
2443 rqc->rxDataRingDescSize =
2444 cpu_to_le16(rq->data_ring.desc_size);
2445 }
2446 }
2447
2448 #ifdef VMXNET3_RSS
2449 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2450
2451 if (adapter->rss) {
2452 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2453
2454 devRead->misc.uptFeatures |= UPT1_F_RSS;
2455 devRead->misc.numRxQueues = adapter->num_rx_queues;
2456 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2457 UPT1_RSS_HASH_TYPE_IPV4 |
2458 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2459 UPT1_RSS_HASH_TYPE_IPV6;
2460 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2461 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2462 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2463 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2464
2465 for (i = 0; i < rssConf->indTableSize; i++)
2466 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2467 i, adapter->num_rx_queues);
2468
2469 devRead->rssConfDesc.confVer = 1;
2470 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2471 devRead->rssConfDesc.confPA =
2472 cpu_to_le64(adapter->rss_conf_pa);
2473 }
2474
2475 #endif /* VMXNET3_RSS */
2476
2477 /* intr settings */
2478 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2479 VMXNET3_IMM_AUTO;
2480 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2481 for (i = 0; i < adapter->intr.num_intrs; i++)
2482 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2483
2484 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2485 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2486
2487 /* rx filter settings */
2488 devRead->rxFilterConf.rxMode = 0;
2489 vmxnet3_restore_vlan(adapter);
2490 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2491
2492 /* the rest are already zeroed */
2493 }
2494
2495 static void
2496 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2497 {
2498 struct Vmxnet3_DriverShared *shared = adapter->shared;
2499 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2500 unsigned long flags;
2501
2502 if (!VMXNET3_VERSION_GE_3(adapter))
2503 return;
2504
2505 spin_lock_irqsave(&adapter->cmd_lock, flags);
2506 cmdInfo->varConf.confVer = 1;
2507 cmdInfo->varConf.confLen =
2508 cpu_to_le32(sizeof(*adapter->coal_conf));
2509 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2510
2511 if (adapter->default_coal_mode) {
2512 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2513 VMXNET3_CMD_GET_COALESCE);
2514 } else {
2515 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2516 VMXNET3_CMD_SET_COALESCE);
2517 }
2518
2519 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2520 }
2521
2522 int
2523 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2524 {
2525 int err, i;
2526 u32 ret;
2527 unsigned long flags;
2528
2529 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2530 " ring sizes %u %u %u\n", adapter->netdev->name,
2531 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2532 adapter->tx_queue[0].tx_ring.size,
2533 adapter->rx_queue[0].rx_ring[0].size,
2534 adapter->rx_queue[0].rx_ring[1].size);
2535
2536 vmxnet3_tq_init_all(adapter);
2537 err = vmxnet3_rq_init_all(adapter);
2538 if (err) {
2539 netdev_err(adapter->netdev,
2540 "Failed to init rx queue error %d\n", err);
2541 goto rq_err;
2542 }
2543
2544 err = vmxnet3_request_irqs(adapter);
2545 if (err) {
2546 netdev_err(adapter->netdev,
2547 "Failed to setup irq for error %d\n", err);
2548 goto irq_err;
2549 }
2550
2551 vmxnet3_setup_driver_shared(adapter);
2552
2553 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2554 adapter->shared_pa));
2555 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2556 adapter->shared_pa));
2557 spin_lock_irqsave(&adapter->cmd_lock, flags);
2558 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2559 VMXNET3_CMD_ACTIVATE_DEV);
2560 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2561 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2562
2563 if (ret != 0) {
2564 netdev_err(adapter->netdev,
2565 "Failed to activate dev: error %u\n", ret);
2566 err = -EINVAL;
2567 goto activate_err;
2568 }
2569
2570 vmxnet3_init_coalesce(adapter);
2571
2572 for (i = 0; i < adapter->num_rx_queues; i++) {
2573 VMXNET3_WRITE_BAR0_REG(adapter,
2574 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2575 adapter->rx_queue[i].rx_ring[0].next2fill);
2576 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2577 (i * VMXNET3_REG_ALIGN)),
2578 adapter->rx_queue[i].rx_ring[1].next2fill);
2579 }
2580
2581 /* Apply the rx filter settins last. */
2582 vmxnet3_set_mc(adapter->netdev);
2583
2584 /*
2585 * Check link state when first activating device. It will start the
2586 * tx queue if the link is up.
2587 */
2588 vmxnet3_check_link(adapter, true);
2589 for (i = 0; i < adapter->num_rx_queues; i++)
2590 napi_enable(&adapter->rx_queue[i].napi);
2591 vmxnet3_enable_all_intrs(adapter);
2592 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2593 return 0;
2594
2595 activate_err:
2596 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2597 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2598 vmxnet3_free_irqs(adapter);
2599 irq_err:
2600 rq_err:
2601 /* free up buffers we allocated */
2602 vmxnet3_rq_cleanup_all(adapter);
2603 return err;
2604 }
2605
2606
2607 void
2608 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2609 {
2610 unsigned long flags;
2611 spin_lock_irqsave(&adapter->cmd_lock, flags);
2612 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2613 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2614 }
2615
2616
2617 int
2618 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2619 {
2620 int i;
2621 unsigned long flags;
2622 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2623 return 0;
2624
2625
2626 spin_lock_irqsave(&adapter->cmd_lock, flags);
2627 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2628 VMXNET3_CMD_QUIESCE_DEV);
2629 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2630 vmxnet3_disable_all_intrs(adapter);
2631
2632 for (i = 0; i < adapter->num_rx_queues; i++)
2633 napi_disable(&adapter->rx_queue[i].napi);
2634 netif_tx_disable(adapter->netdev);
2635 adapter->link_speed = 0;
2636 netif_carrier_off(adapter->netdev);
2637
2638 vmxnet3_tq_cleanup_all(adapter);
2639 vmxnet3_rq_cleanup_all(adapter);
2640 vmxnet3_free_irqs(adapter);
2641 return 0;
2642 }
2643
2644
2645 static void
2646 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2647 {
2648 u32 tmp;
2649
2650 tmp = *(u32 *)mac;
2651 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2652
2653 tmp = (mac[5] << 8) | mac[4];
2654 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2655 }
2656
2657
2658 static int
2659 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2660 {
2661 struct sockaddr *addr = p;
2662 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2663
2664 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2665 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2666
2667 return 0;
2668 }
2669
2670
2671 /* ==================== initialization and cleanup routines ============ */
2672
2673 static int
2674 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2675 {
2676 int err;
2677 unsigned long mmio_start, mmio_len;
2678 struct pci_dev *pdev = adapter->pdev;
2679
2680 err = pci_enable_device(pdev);
2681 if (err) {
2682 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2683 return err;
2684 }
2685
2686 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2687 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2688 dev_err(&pdev->dev,
2689 "pci_set_consistent_dma_mask failed\n");
2690 err = -EIO;
2691 goto err_set_mask;
2692 }
2693 *dma64 = true;
2694 } else {
2695 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2696 dev_err(&pdev->dev,
2697 "pci_set_dma_mask failed\n");
2698 err = -EIO;
2699 goto err_set_mask;
2700 }
2701 *dma64 = false;
2702 }
2703
2704 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2705 vmxnet3_driver_name);
2706 if (err) {
2707 dev_err(&pdev->dev,
2708 "Failed to request region for adapter: error %d\n", err);
2709 goto err_set_mask;
2710 }
2711
2712 pci_set_master(pdev);
2713
2714 mmio_start = pci_resource_start(pdev, 0);
2715 mmio_len = pci_resource_len(pdev, 0);
2716 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2717 if (!adapter->hw_addr0) {
2718 dev_err(&pdev->dev, "Failed to map bar0\n");
2719 err = -EIO;
2720 goto err_ioremap;
2721 }
2722
2723 mmio_start = pci_resource_start(pdev, 1);
2724 mmio_len = pci_resource_len(pdev, 1);
2725 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2726 if (!adapter->hw_addr1) {
2727 dev_err(&pdev->dev, "Failed to map bar1\n");
2728 err = -EIO;
2729 goto err_bar1;
2730 }
2731 return 0;
2732
2733 err_bar1:
2734 iounmap(adapter->hw_addr0);
2735 err_ioremap:
2736 pci_release_selected_regions(pdev, (1 << 2) - 1);
2737 err_set_mask:
2738 pci_disable_device(pdev);
2739 return err;
2740 }
2741
2742
2743 static void
2744 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2745 {
2746 BUG_ON(!adapter->pdev);
2747
2748 iounmap(adapter->hw_addr0);
2749 iounmap(adapter->hw_addr1);
2750 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2751 pci_disable_device(adapter->pdev);
2752 }
2753
2754
2755 static void
2756 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2757 {
2758 size_t sz, i, ring0_size, ring1_size, comp_size;
2759 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2760
2761
2762 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2763 VMXNET3_MAX_ETH_HDR_SIZE) {
2764 adapter->skb_buf_size = adapter->netdev->mtu +
2765 VMXNET3_MAX_ETH_HDR_SIZE;
2766 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2767 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2768
2769 adapter->rx_buf_per_pkt = 1;
2770 } else {
2771 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2772 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2773 VMXNET3_MAX_ETH_HDR_SIZE;
2774 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2775 }
2776
2777 /*
2778 * for simplicity, force the ring0 size to be a multiple of
2779 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2780 */
2781 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2782 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2783 ring0_size = (ring0_size + sz - 1) / sz * sz;
2784 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2785 sz * sz);
2786 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2787 ring1_size = (ring1_size + sz - 1) / sz * sz;
2788 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2789 sz * sz);
2790 comp_size = ring0_size + ring1_size;
2791
2792 for (i = 0; i < adapter->num_rx_queues; i++) {
2793 rq = &adapter->rx_queue[i];
2794 rq->rx_ring[0].size = ring0_size;
2795 rq->rx_ring[1].size = ring1_size;
2796 rq->comp_ring.size = comp_size;
2797 }
2798 }
2799
2800
2801 int
2802 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2803 u32 rx_ring_size, u32 rx_ring2_size,
2804 u16 txdata_desc_size, u16 rxdata_desc_size)
2805 {
2806 int err = 0, i;
2807
2808 for (i = 0; i < adapter->num_tx_queues; i++) {
2809 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2810 tq->tx_ring.size = tx_ring_size;
2811 tq->data_ring.size = tx_ring_size;
2812 tq->comp_ring.size = tx_ring_size;
2813 tq->txdata_desc_size = txdata_desc_size;
2814 tq->shared = &adapter->tqd_start[i].ctrl;
2815 tq->stopped = true;
2816 tq->adapter = adapter;
2817 tq->qid = i;
2818 err = vmxnet3_tq_create(tq, adapter);
2819 /*
2820 * Too late to change num_tx_queues. We cannot do away with
2821 * lesser number of queues than what we asked for
2822 */
2823 if (err)
2824 goto queue_err;
2825 }
2826
2827 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2828 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2829 vmxnet3_adjust_rx_ring_size(adapter);
2830
2831 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2832 for (i = 0; i < adapter->num_rx_queues; i++) {
2833 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2834 /* qid and qid2 for rx queues will be assigned later when num
2835 * of rx queues is finalized after allocating intrs */
2836 rq->shared = &adapter->rqd_start[i].ctrl;
2837 rq->adapter = adapter;
2838 rq->data_ring.desc_size = rxdata_desc_size;
2839 err = vmxnet3_rq_create(rq, adapter);
2840 if (err) {
2841 if (i == 0) {
2842 netdev_err(adapter->netdev,
2843 "Could not allocate any rx queues. "
2844 "Aborting.\n");
2845 goto queue_err;
2846 } else {
2847 netdev_info(adapter->netdev,
2848 "Number of rx queues changed "
2849 "to : %d.\n", i);
2850 adapter->num_rx_queues = i;
2851 err = 0;
2852 break;
2853 }
2854 }
2855 }
2856
2857 if (!adapter->rxdataring_enabled)
2858 vmxnet3_rq_destroy_all_rxdataring(adapter);
2859
2860 return err;
2861 queue_err:
2862 vmxnet3_tq_destroy_all(adapter);
2863 return err;
2864 }
2865
2866 static int
2867 vmxnet3_open(struct net_device *netdev)
2868 {
2869 struct vmxnet3_adapter *adapter;
2870 int err, i;
2871
2872 adapter = netdev_priv(netdev);
2873
2874 for (i = 0; i < adapter->num_tx_queues; i++)
2875 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2876
2877 if (VMXNET3_VERSION_GE_3(adapter)) {
2878 unsigned long flags;
2879 u16 txdata_desc_size;
2880
2881 spin_lock_irqsave(&adapter->cmd_lock, flags);
2882 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2883 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2884 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2885 VMXNET3_REG_CMD);
2886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2887
2888 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2889 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2890 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2891 adapter->txdata_desc_size =
2892 sizeof(struct Vmxnet3_TxDataDesc);
2893 } else {
2894 adapter->txdata_desc_size = txdata_desc_size;
2895 }
2896 } else {
2897 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2898 }
2899
2900 err = vmxnet3_create_queues(adapter,
2901 adapter->tx_ring_size,
2902 adapter->rx_ring_size,
2903 adapter->rx_ring2_size,
2904 adapter->txdata_desc_size,
2905 adapter->rxdata_desc_size);
2906 if (err)
2907 goto queue_err;
2908
2909 err = vmxnet3_activate_dev(adapter);
2910 if (err)
2911 goto activate_err;
2912
2913 return 0;
2914
2915 activate_err:
2916 vmxnet3_rq_destroy_all(adapter);
2917 vmxnet3_tq_destroy_all(adapter);
2918 queue_err:
2919 return err;
2920 }
2921
2922
2923 static int
2924 vmxnet3_close(struct net_device *netdev)
2925 {
2926 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2927
2928 /*
2929 * Reset_work may be in the middle of resetting the device, wait for its
2930 * completion.
2931 */
2932 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2933 msleep(1);
2934
2935 vmxnet3_quiesce_dev(adapter);
2936
2937 vmxnet3_rq_destroy_all(adapter);
2938 vmxnet3_tq_destroy_all(adapter);
2939
2940 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2941
2942
2943 return 0;
2944 }
2945
2946
2947 void
2948 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2949 {
2950 int i;
2951
2952 /*
2953 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2954 * vmxnet3_close() will deadlock.
2955 */
2956 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2957
2958 /* we need to enable NAPI, otherwise dev_close will deadlock */
2959 for (i = 0; i < adapter->num_rx_queues; i++)
2960 napi_enable(&adapter->rx_queue[i].napi);
2961 dev_close(adapter->netdev);
2962 }
2963
2964
2965 static int
2966 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2967 {
2968 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2969 int err = 0;
2970
2971 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2972 return -EINVAL;
2973
2974 netdev->mtu = new_mtu;
2975
2976 /*
2977 * Reset_work may be in the middle of resetting the device, wait for its
2978 * completion.
2979 */
2980 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2981 msleep(1);
2982
2983 if (netif_running(netdev)) {
2984 vmxnet3_quiesce_dev(adapter);
2985 vmxnet3_reset_dev(adapter);
2986
2987 /* we need to re-create the rx queue based on the new mtu */
2988 vmxnet3_rq_destroy_all(adapter);
2989 vmxnet3_adjust_rx_ring_size(adapter);
2990 err = vmxnet3_rq_create_all(adapter);
2991 if (err) {
2992 netdev_err(netdev,
2993 "failed to re-create rx queues, "
2994 " error %d. Closing it.\n", err);
2995 goto out;
2996 }
2997
2998 err = vmxnet3_activate_dev(adapter);
2999 if (err) {
3000 netdev_err(netdev,
3001 "failed to re-activate, error %d. "
3002 "Closing it\n", err);
3003 goto out;
3004 }
3005 }
3006
3007 out:
3008 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3009 if (err)
3010 vmxnet3_force_close(adapter);
3011
3012 return err;
3013 }
3014
3015
3016 static void
3017 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3018 {
3019 struct net_device *netdev = adapter->netdev;
3020
3021 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3022 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3023 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3024 NETIF_F_LRO;
3025 if (dma64)
3026 netdev->hw_features |= NETIF_F_HIGHDMA;
3027 netdev->vlan_features = netdev->hw_features &
3028 ~(NETIF_F_HW_VLAN_CTAG_TX |
3029 NETIF_F_HW_VLAN_CTAG_RX);
3030 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3031 }
3032
3033
3034 static void
3035 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3036 {
3037 u32 tmp;
3038
3039 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3040 *(u32 *)mac = tmp;
3041
3042 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3043 mac[4] = tmp & 0xff;
3044 mac[5] = (tmp >> 8) & 0xff;
3045 }
3046
3047 #ifdef CONFIG_PCI_MSI
3048
3049 /*
3050 * Enable MSIx vectors.
3051 * Returns :
3052 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3053 * were enabled.
3054 * number of vectors which were enabled otherwise (this number is greater
3055 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3056 */
3057
3058 static int
3059 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3060 {
3061 int ret = pci_enable_msix_range(adapter->pdev,
3062 adapter->intr.msix_entries, nvec, nvec);
3063
3064 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3065 dev_err(&adapter->netdev->dev,
3066 "Failed to enable %d MSI-X, trying %d\n",
3067 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3068
3069 ret = pci_enable_msix_range(adapter->pdev,
3070 adapter->intr.msix_entries,
3071 VMXNET3_LINUX_MIN_MSIX_VECT,
3072 VMXNET3_LINUX_MIN_MSIX_VECT);
3073 }
3074
3075 if (ret < 0) {
3076 dev_err(&adapter->netdev->dev,
3077 "Failed to enable MSI-X, error: %d\n", ret);
3078 }
3079
3080 return ret;
3081 }
3082
3083
3084 #endif /* CONFIG_PCI_MSI */
3085
3086 static void
3087 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3088 {
3089 u32 cfg;
3090 unsigned long flags;
3091
3092 /* intr settings */
3093 spin_lock_irqsave(&adapter->cmd_lock, flags);
3094 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3095 VMXNET3_CMD_GET_CONF_INTR);
3096 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3097 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3098 adapter->intr.type = cfg & 0x3;
3099 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3100
3101 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3102 adapter->intr.type = VMXNET3_IT_MSIX;
3103 }
3104
3105 #ifdef CONFIG_PCI_MSI
3106 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3107 int i, nvec;
3108
3109 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3110 1 : adapter->num_tx_queues;
3111 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3112 0 : adapter->num_rx_queues;
3113 nvec += 1; /* for link event */
3114 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3115 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3116
3117 for (i = 0; i < nvec; i++)
3118 adapter->intr.msix_entries[i].entry = i;
3119
3120 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3121 if (nvec < 0)
3122 goto msix_err;
3123
3124 /* If we cannot allocate one MSIx vector per queue
3125 * then limit the number of rx queues to 1
3126 */
3127 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
3128 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3129 || adapter->num_rx_queues != 1) {
3130 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3131 netdev_err(adapter->netdev,
3132 "Number of rx queues : 1\n");
3133 adapter->num_rx_queues = 1;
3134 }
3135 }
3136
3137 adapter->intr.num_intrs = nvec;
3138 return;
3139
3140 msix_err:
3141 /* If we cannot allocate MSIx vectors use only one rx queue */
3142 dev_info(&adapter->pdev->dev,
3143 "Failed to enable MSI-X, error %d. "
3144 "Limiting #rx queues to 1, try MSI.\n", nvec);
3145
3146 adapter->intr.type = VMXNET3_IT_MSI;
3147 }
3148
3149 if (adapter->intr.type == VMXNET3_IT_MSI) {
3150 if (!pci_enable_msi(adapter->pdev)) {
3151 adapter->num_rx_queues = 1;
3152 adapter->intr.num_intrs = 1;
3153 return;
3154 }
3155 }
3156 #endif /* CONFIG_PCI_MSI */
3157
3158 adapter->num_rx_queues = 1;
3159 dev_info(&adapter->netdev->dev,
3160 "Using INTx interrupt, #Rx queues: 1.\n");
3161 adapter->intr.type = VMXNET3_IT_INTX;
3162
3163 /* INT-X related setting */
3164 adapter->intr.num_intrs = 1;
3165 }
3166
3167
3168 static void
3169 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3170 {
3171 if (adapter->intr.type == VMXNET3_IT_MSIX)
3172 pci_disable_msix(adapter->pdev);
3173 else if (adapter->intr.type == VMXNET3_IT_MSI)
3174 pci_disable_msi(adapter->pdev);
3175 else
3176 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3177 }
3178
3179
3180 static void
3181 vmxnet3_tx_timeout(struct net_device *netdev)
3182 {
3183 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3184 adapter->tx_timeout_count++;
3185
3186 netdev_err(adapter->netdev, "tx hang\n");
3187 schedule_work(&adapter->work);
3188 netif_wake_queue(adapter->netdev);
3189 }
3190
3191
3192 static void
3193 vmxnet3_reset_work(struct work_struct *data)
3194 {
3195 struct vmxnet3_adapter *adapter;
3196
3197 adapter = container_of(data, struct vmxnet3_adapter, work);
3198
3199 /* if another thread is resetting the device, no need to proceed */
3200 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3201 return;
3202
3203 /* if the device is closed, we must leave it alone */
3204 rtnl_lock();
3205 if (netif_running(adapter->netdev)) {
3206 netdev_notice(adapter->netdev, "resetting\n");
3207 vmxnet3_quiesce_dev(adapter);
3208 vmxnet3_reset_dev(adapter);
3209 vmxnet3_activate_dev(adapter);
3210 } else {
3211 netdev_info(adapter->netdev, "already closed\n");
3212 }
3213 rtnl_unlock();
3214
3215 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3216 }
3217
3218
3219 static int
3220 vmxnet3_probe_device(struct pci_dev *pdev,
3221 const struct pci_device_id *id)
3222 {
3223 static const struct net_device_ops vmxnet3_netdev_ops = {
3224 .ndo_open = vmxnet3_open,
3225 .ndo_stop = vmxnet3_close,
3226 .ndo_start_xmit = vmxnet3_xmit_frame,
3227 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3228 .ndo_change_mtu = vmxnet3_change_mtu,
3229 .ndo_set_features = vmxnet3_set_features,
3230 .ndo_get_stats64 = vmxnet3_get_stats64,
3231 .ndo_tx_timeout = vmxnet3_tx_timeout,
3232 .ndo_set_rx_mode = vmxnet3_set_mc,
3233 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3234 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3235 #ifdef CONFIG_NET_POLL_CONTROLLER
3236 .ndo_poll_controller = vmxnet3_netpoll,
3237 #endif
3238 };
3239 int err;
3240 bool dma64 = false; /* stupid gcc */
3241 u32 ver;
3242 struct net_device *netdev;
3243 struct vmxnet3_adapter *adapter;
3244 u8 mac[ETH_ALEN];
3245 int size;
3246 int num_tx_queues;
3247 int num_rx_queues;
3248
3249 if (!pci_msi_enabled())
3250 enable_mq = 0;
3251
3252 #ifdef VMXNET3_RSS
3253 if (enable_mq)
3254 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3255 (int)num_online_cpus());
3256 else
3257 #endif
3258 num_rx_queues = 1;
3259 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3260
3261 if (enable_mq)
3262 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3263 (int)num_online_cpus());
3264 else
3265 num_tx_queues = 1;
3266
3267 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3268 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3269 max(num_tx_queues, num_rx_queues));
3270 dev_info(&pdev->dev,
3271 "# of Tx queues : %d, # of Rx queues : %d\n",
3272 num_tx_queues, num_rx_queues);
3273
3274 if (!netdev)
3275 return -ENOMEM;
3276
3277 pci_set_drvdata(pdev, netdev);
3278 adapter = netdev_priv(netdev);
3279 adapter->netdev = netdev;
3280 adapter->pdev = pdev;
3281
3282 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3283 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3284 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3285
3286 spin_lock_init(&adapter->cmd_lock);
3287 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3288 sizeof(struct vmxnet3_adapter),
3289 PCI_DMA_TODEVICE);
3290 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3291 dev_err(&pdev->dev, "Failed to map dma\n");
3292 err = -EFAULT;
3293 goto err_dma_map;
3294 }
3295 adapter->shared = dma_alloc_coherent(
3296 &adapter->pdev->dev,
3297 sizeof(struct Vmxnet3_DriverShared),
3298 &adapter->shared_pa, GFP_KERNEL);
3299 if (!adapter->shared) {
3300 dev_err(&pdev->dev, "Failed to allocate memory\n");
3301 err = -ENOMEM;
3302 goto err_alloc_shared;
3303 }
3304
3305 adapter->num_rx_queues = num_rx_queues;
3306 adapter->num_tx_queues = num_tx_queues;
3307 adapter->rx_buf_per_pkt = 1;
3308
3309 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3310 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3311 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3312 &adapter->queue_desc_pa,
3313 GFP_KERNEL);
3314
3315 if (!adapter->tqd_start) {
3316 dev_err(&pdev->dev, "Failed to allocate memory\n");
3317 err = -ENOMEM;
3318 goto err_alloc_queue_desc;
3319 }
3320 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3321 adapter->num_tx_queues);
3322
3323 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3324 sizeof(struct Vmxnet3_PMConf),
3325 &adapter->pm_conf_pa,
3326 GFP_KERNEL);
3327 if (adapter->pm_conf == NULL) {
3328 err = -ENOMEM;
3329 goto err_alloc_pm;
3330 }
3331
3332 #ifdef VMXNET3_RSS
3333
3334 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3335 sizeof(struct UPT1_RSSConf),
3336 &adapter->rss_conf_pa,
3337 GFP_KERNEL);
3338 if (adapter->rss_conf == NULL) {
3339 err = -ENOMEM;
3340 goto err_alloc_rss;
3341 }
3342 #endif /* VMXNET3_RSS */
3343
3344 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3345 if (err < 0)
3346 goto err_alloc_pci;
3347
3348 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3349 if (ver & (1 << VMXNET3_REV_3)) {
3350 VMXNET3_WRITE_BAR1_REG(adapter,
3351 VMXNET3_REG_VRRS,
3352 1 << VMXNET3_REV_3);
3353 adapter->version = VMXNET3_REV_3 + 1;
3354 } else if (ver & (1 << VMXNET3_REV_2)) {
3355 VMXNET3_WRITE_BAR1_REG(adapter,
3356 VMXNET3_REG_VRRS,
3357 1 << VMXNET3_REV_2);
3358 adapter->version = VMXNET3_REV_2 + 1;
3359 } else if (ver & (1 << VMXNET3_REV_1)) {
3360 VMXNET3_WRITE_BAR1_REG(adapter,
3361 VMXNET3_REG_VRRS,
3362 1 << VMXNET3_REV_1);
3363 adapter->version = VMXNET3_REV_1 + 1;
3364 } else {
3365 dev_err(&pdev->dev,
3366 "Incompatible h/w version (0x%x) for adapter\n", ver);
3367 err = -EBUSY;
3368 goto err_ver;
3369 }
3370 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3371
3372 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3373 if (ver & 1) {
3374 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3375 } else {
3376 dev_err(&pdev->dev,
3377 "Incompatible upt version (0x%x) for adapter\n", ver);
3378 err = -EBUSY;
3379 goto err_ver;
3380 }
3381
3382 if (VMXNET3_VERSION_GE_3(adapter)) {
3383 adapter->coal_conf =
3384 dma_alloc_coherent(&adapter->pdev->dev,
3385 sizeof(struct Vmxnet3_CoalesceScheme)
3386 ,
3387 &adapter->coal_conf_pa,
3388 GFP_KERNEL);
3389 if (!adapter->coal_conf) {
3390 err = -ENOMEM;
3391 goto err_ver;
3392 }
3393 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
3394 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3395 adapter->default_coal_mode = true;
3396 }
3397
3398 SET_NETDEV_DEV(netdev, &pdev->dev);
3399 vmxnet3_declare_features(adapter, dma64);
3400
3401 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3402 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3403
3404 if (adapter->num_tx_queues == adapter->num_rx_queues)
3405 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3406 else
3407 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3408
3409 vmxnet3_alloc_intr_resources(adapter);
3410
3411 #ifdef VMXNET3_RSS
3412 if (adapter->num_rx_queues > 1 &&
3413 adapter->intr.type == VMXNET3_IT_MSIX) {
3414 adapter->rss = true;
3415 netdev->hw_features |= NETIF_F_RXHASH;
3416 netdev->features |= NETIF_F_RXHASH;
3417 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3418 } else {
3419 adapter->rss = false;
3420 }
3421 #endif
3422
3423 vmxnet3_read_mac_addr(adapter, mac);
3424 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3425
3426 netdev->netdev_ops = &vmxnet3_netdev_ops;
3427 vmxnet3_set_ethtool_ops(netdev);
3428 netdev->watchdog_timeo = 5 * HZ;
3429
3430 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3431 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3432
3433 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3434 int i;
3435 for (i = 0; i < adapter->num_rx_queues; i++) {
3436 netif_napi_add(adapter->netdev,
3437 &adapter->rx_queue[i].napi,
3438 vmxnet3_poll_rx_only, 64);
3439 }
3440 } else {
3441 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3442 vmxnet3_poll, 64);
3443 }
3444
3445 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3446 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3447
3448 netif_carrier_off(netdev);
3449 err = register_netdev(netdev);
3450
3451 if (err) {
3452 dev_err(&pdev->dev, "Failed to register adapter\n");
3453 goto err_register;
3454 }
3455
3456 vmxnet3_check_link(adapter, false);
3457 return 0;
3458
3459 err_register:
3460 if (VMXNET3_VERSION_GE_3(adapter)) {
3461 dma_free_coherent(&adapter->pdev->dev,
3462 sizeof(struct Vmxnet3_CoalesceScheme),
3463 adapter->coal_conf, adapter->coal_conf_pa);
3464 }
3465 vmxnet3_free_intr_resources(adapter);
3466 err_ver:
3467 vmxnet3_free_pci_resources(adapter);
3468 err_alloc_pci:
3469 #ifdef VMXNET3_RSS
3470 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3471 adapter->rss_conf, adapter->rss_conf_pa);
3472 err_alloc_rss:
3473 #endif
3474 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3475 adapter->pm_conf, adapter->pm_conf_pa);
3476 err_alloc_pm:
3477 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3478 adapter->queue_desc_pa);
3479 err_alloc_queue_desc:
3480 dma_free_coherent(&adapter->pdev->dev,
3481 sizeof(struct Vmxnet3_DriverShared),
3482 adapter->shared, adapter->shared_pa);
3483 err_alloc_shared:
3484 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3485 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3486 err_dma_map:
3487 free_netdev(netdev);
3488 return err;
3489 }
3490
3491
3492 static void
3493 vmxnet3_remove_device(struct pci_dev *pdev)
3494 {
3495 struct net_device *netdev = pci_get_drvdata(pdev);
3496 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3497 int size = 0;
3498 int num_rx_queues;
3499
3500 #ifdef VMXNET3_RSS
3501 if (enable_mq)
3502 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3503 (int)num_online_cpus());
3504 else
3505 #endif
3506 num_rx_queues = 1;
3507 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3508
3509 cancel_work_sync(&adapter->work);
3510
3511 unregister_netdev(netdev);
3512
3513 vmxnet3_free_intr_resources(adapter);
3514 vmxnet3_free_pci_resources(adapter);
3515 if (VMXNET3_VERSION_GE_3(adapter)) {
3516 dma_free_coherent(&adapter->pdev->dev,
3517 sizeof(struct Vmxnet3_CoalesceScheme),
3518 adapter->coal_conf, adapter->coal_conf_pa);
3519 }
3520 #ifdef VMXNET3_RSS
3521 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3522 adapter->rss_conf, adapter->rss_conf_pa);
3523 #endif
3524 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3525 adapter->pm_conf, adapter->pm_conf_pa);
3526
3527 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3528 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3529 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3530 adapter->queue_desc_pa);
3531 dma_free_coherent(&adapter->pdev->dev,
3532 sizeof(struct Vmxnet3_DriverShared),
3533 adapter->shared, adapter->shared_pa);
3534 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3535 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3536 free_netdev(netdev);
3537 }
3538
3539 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3540 {
3541 struct net_device *netdev = pci_get_drvdata(pdev);
3542 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3543 unsigned long flags;
3544
3545 /* Reset_work may be in the middle of resetting the device, wait for its
3546 * completion.
3547 */
3548 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3549 msleep(1);
3550
3551 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3552 &adapter->state)) {
3553 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3554 return;
3555 }
3556 spin_lock_irqsave(&adapter->cmd_lock, flags);
3557 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3558 VMXNET3_CMD_QUIESCE_DEV);
3559 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3560 vmxnet3_disable_all_intrs(adapter);
3561
3562 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3563 }
3564
3565
3566 #ifdef CONFIG_PM
3567
3568 static int
3569 vmxnet3_suspend(struct device *device)
3570 {
3571 struct pci_dev *pdev = to_pci_dev(device);
3572 struct net_device *netdev = pci_get_drvdata(pdev);
3573 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3574 struct Vmxnet3_PMConf *pmConf;
3575 struct ethhdr *ehdr;
3576 struct arphdr *ahdr;
3577 u8 *arpreq;
3578 struct in_device *in_dev;
3579 struct in_ifaddr *ifa;
3580 unsigned long flags;
3581 int i = 0;
3582
3583 if (!netif_running(netdev))
3584 return 0;
3585
3586 for (i = 0; i < adapter->num_rx_queues; i++)
3587 napi_disable(&adapter->rx_queue[i].napi);
3588
3589 vmxnet3_disable_all_intrs(adapter);
3590 vmxnet3_free_irqs(adapter);
3591 vmxnet3_free_intr_resources(adapter);
3592
3593 netif_device_detach(netdev);
3594 netif_tx_stop_all_queues(netdev);
3595
3596 /* Create wake-up filters. */
3597 pmConf = adapter->pm_conf;
3598 memset(pmConf, 0, sizeof(*pmConf));
3599
3600 if (adapter->wol & WAKE_UCAST) {
3601 pmConf->filters[i].patternSize = ETH_ALEN;
3602 pmConf->filters[i].maskSize = 1;
3603 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3604 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3605
3606 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3607 i++;
3608 }
3609
3610 if (adapter->wol & WAKE_ARP) {
3611 in_dev = in_dev_get(netdev);
3612 if (!in_dev)
3613 goto skip_arp;
3614
3615 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3616 if (!ifa)
3617 goto skip_arp;
3618
3619 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3620 sizeof(struct arphdr) + /* ARP header */
3621 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3622 2 * sizeof(u32); /*2 IPv4 addresses */
3623 pmConf->filters[i].maskSize =
3624 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3625
3626 /* ETH_P_ARP in Ethernet header. */
3627 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3628 ehdr->h_proto = htons(ETH_P_ARP);
3629
3630 /* ARPOP_REQUEST in ARP header. */
3631 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3632 ahdr->ar_op = htons(ARPOP_REQUEST);
3633 arpreq = (u8 *)(ahdr + 1);
3634
3635 /* The Unicast IPv4 address in 'tip' field. */
3636 arpreq += 2 * ETH_ALEN + sizeof(u32);
3637 *(u32 *)arpreq = ifa->ifa_address;
3638
3639 /* The mask for the relevant bits. */
3640 pmConf->filters[i].mask[0] = 0x00;
3641 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3642 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3643 pmConf->filters[i].mask[3] = 0x00;
3644 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3645 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3646 in_dev_put(in_dev);
3647
3648 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3649 i++;
3650 }
3651
3652 skip_arp:
3653 if (adapter->wol & WAKE_MAGIC)
3654 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3655
3656 pmConf->numFilters = i;
3657
3658 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3659 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3660 *pmConf));
3661 adapter->shared->devRead.pmConfDesc.confPA =
3662 cpu_to_le64(adapter->pm_conf_pa);
3663
3664 spin_lock_irqsave(&adapter->cmd_lock, flags);
3665 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3666 VMXNET3_CMD_UPDATE_PMCFG);
3667 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3668
3669 pci_save_state(pdev);
3670 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3671 adapter->wol);
3672 pci_disable_device(pdev);
3673 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3674
3675 return 0;
3676 }
3677
3678
3679 static int
3680 vmxnet3_resume(struct device *device)
3681 {
3682 int err;
3683 unsigned long flags;
3684 struct pci_dev *pdev = to_pci_dev(device);
3685 struct net_device *netdev = pci_get_drvdata(pdev);
3686 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3687
3688 if (!netif_running(netdev))
3689 return 0;
3690
3691 pci_set_power_state(pdev, PCI_D0);
3692 pci_restore_state(pdev);
3693 err = pci_enable_device_mem(pdev);
3694 if (err != 0)
3695 return err;
3696
3697 pci_enable_wake(pdev, PCI_D0, 0);
3698
3699 vmxnet3_alloc_intr_resources(adapter);
3700
3701 /* During hibernate and suspend, device has to be reinitialized as the
3702 * device state need not be preserved.
3703 */
3704
3705 /* Need not check adapter state as other reset tasks cannot run during
3706 * device resume.
3707 */
3708 spin_lock_irqsave(&adapter->cmd_lock, flags);
3709 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3710 VMXNET3_CMD_QUIESCE_DEV);
3711 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3712 vmxnet3_tq_cleanup_all(adapter);
3713 vmxnet3_rq_cleanup_all(adapter);
3714
3715 vmxnet3_reset_dev(adapter);
3716 err = vmxnet3_activate_dev(adapter);
3717 if (err != 0) {
3718 netdev_err(netdev,
3719 "failed to re-activate on resume, error: %d", err);
3720 vmxnet3_force_close(adapter);
3721 return err;
3722 }
3723 netif_device_attach(netdev);
3724
3725 return 0;
3726 }
3727
3728 static const struct dev_pm_ops vmxnet3_pm_ops = {
3729 .suspend = vmxnet3_suspend,
3730 .resume = vmxnet3_resume,
3731 .freeze = vmxnet3_suspend,
3732 .restore = vmxnet3_resume,
3733 };
3734 #endif
3735
3736 static struct pci_driver vmxnet3_driver = {
3737 .name = vmxnet3_driver_name,
3738 .id_table = vmxnet3_pciid_table,
3739 .probe = vmxnet3_probe_device,
3740 .remove = vmxnet3_remove_device,
3741 .shutdown = vmxnet3_shutdown_device,
3742 #ifdef CONFIG_PM
3743 .driver.pm = &vmxnet3_pm_ops,
3744 #endif
3745 };
3746
3747
3748 static int __init
3749 vmxnet3_init_module(void)
3750 {
3751 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3752 VMXNET3_DRIVER_VERSION_REPORT);
3753 return pci_register_driver(&vmxnet3_driver);
3754 }
3755
3756 module_init(vmxnet3_init_module);
3757
3758
3759 static void
3760 vmxnet3_exit_module(void)
3761 {
3762 pci_unregister_driver(&vmxnet3_driver);
3763 }
3764
3765 module_exit(vmxnet3_exit_module);
3766
3767 MODULE_AUTHOR("VMware, Inc.");
3768 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3769 MODULE_LICENSE("GPL v2");
3770 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
3771
3772
3773
3774
3775
3776 /* LDV_COMMENT_BEGIN_MAIN */
3777 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
3778
3779 /*###########################################################################*/
3780
3781 /*############## Driver Environment Generator 0.2 output ####################*/
3782
3783 /*###########################################################################*/
3784
3785
3786
3787 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
3788 void ldv_check_final_state(void);
3789
3790 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
3791 void ldv_check_return_value(int res);
3792
3793 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
3794 void ldv_check_return_value_probe(int res);
3795
3796 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
3797 void ldv_initialize(void);
3798
3799 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
3800 void ldv_handler_precall(void);
3801
3802 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
3803 int nondet_int(void);
3804
3805 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
3806 int LDV_IN_INTERRUPT;
3807
3808 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
3809 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
3810
3811
3812
3813 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
3814 /*============================= VARIABLE DECLARATION PART =============================*/
3815 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
3816 /* content: static int vmxnet3_open(struct net_device *netdev)*/
3817 /* LDV_COMMENT_BEGIN_PREP */
3818 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
3819 #ifdef __BIG_ENDIAN_BITFIELD
3820 #endif
3821 #ifdef __BIG_ENDIAN_BITFIELD
3822 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
3823 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
3824 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
3825 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
3826 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
3827 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
3828 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
3829 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
3830 VMXNET3_TCD_GEN_SIZE)
3831 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
3832 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
3833 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
3834 (dstrcd) = (tmp); \
3835 vmxnet3_RxCompToCPU((rcd), (tmp)); \
3836 } while (0)
3837 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
3838 (dstrxd) = (tmp); \
3839 vmxnet3_RxDescToCPU((rxd), (tmp)); \
3840 } while (0)
3841 #else
3842 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
3843 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
3844 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
3845 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
3846 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
3847 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
3848 #endif
3849 #ifdef __BIG_ENDIAN_BITFIELD
3850 #endif
3851 #ifdef __BIG_ENDIAN_BITFIELD
3852 #else
3853 #endif
3854 #ifdef __BIG_ENDIAN_BITFIELD
3855 #endif
3856 #ifdef __BIG_ENDIAN_BITFIELD
3857 #endif
3858 #ifdef VMXNET3_RSS
3859 #endif
3860 #ifdef __BIG_ENDIAN_BITFIELD
3861 #endif
3862 #ifdef CONFIG_PCI_MSI
3863 #endif
3864 #ifdef CONFIG_NET_POLL_CONTROLLER
3865 #ifdef CONFIG_PCI_MSI
3866 #endif
3867 #endif
3868 #ifdef CONFIG_PCI_MSI
3869 #endif
3870 #ifdef CONFIG_PCI_MSI
3871 #endif
3872 #ifdef CONFIG_PCI_MSI
3873 #endif
3874 #ifdef VMXNET3_RSS
3875 #endif
3876 /* LDV_COMMENT_END_PREP */
3877 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_open" */
3878 struct net_device * var_group1;
3879 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vmxnet3_open" */
3880 static int res_vmxnet3_open_73;
3881 /* LDV_COMMENT_BEGIN_PREP */
3882 #ifdef CONFIG_PCI_MSI
3883 #endif
3884 #ifdef CONFIG_PCI_MSI
3885 #endif
3886 #ifdef CONFIG_NET_POLL_CONTROLLER
3887 #endif
3888 #ifdef VMXNET3_RSS
3889 #endif
3890 #ifdef VMXNET3_RSS
3891 #endif
3892 #ifdef VMXNET3_RSS
3893 #endif
3894 #ifdef VMXNET3_RSS
3895 #endif
3896 #ifdef VMXNET3_RSS
3897 #endif
3898 #ifdef VMXNET3_RSS
3899 #endif
3900 #ifdef CONFIG_PM
3901 #endif
3902 #ifdef CONFIG_PM
3903 #endif
3904 /* LDV_COMMENT_END_PREP */
3905 /* content: static int vmxnet3_close(struct net_device *netdev)*/
3906 /* LDV_COMMENT_BEGIN_PREP */
3907 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
3908 #ifdef __BIG_ENDIAN_BITFIELD
3909 #endif
3910 #ifdef __BIG_ENDIAN_BITFIELD
3911 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
3912 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
3913 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
3914 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
3915 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
3916 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
3917 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
3918 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
3919 VMXNET3_TCD_GEN_SIZE)
3920 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
3921 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
3922 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
3923 (dstrcd) = (tmp); \
3924 vmxnet3_RxCompToCPU((rcd), (tmp)); \
3925 } while (0)
3926 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
3927 (dstrxd) = (tmp); \
3928 vmxnet3_RxDescToCPU((rxd), (tmp)); \
3929 } while (0)
3930 #else
3931 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
3932 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
3933 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
3934 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
3935 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
3936 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
3937 #endif
3938 #ifdef __BIG_ENDIAN_BITFIELD
3939 #endif
3940 #ifdef __BIG_ENDIAN_BITFIELD
3941 #else
3942 #endif
3943 #ifdef __BIG_ENDIAN_BITFIELD
3944 #endif
3945 #ifdef __BIG_ENDIAN_BITFIELD
3946 #endif
3947 #ifdef VMXNET3_RSS
3948 #endif
3949 #ifdef __BIG_ENDIAN_BITFIELD
3950 #endif
3951 #ifdef CONFIG_PCI_MSI
3952 #endif
3953 #ifdef CONFIG_NET_POLL_CONTROLLER
3954 #ifdef CONFIG_PCI_MSI
3955 #endif
3956 #endif
3957 #ifdef CONFIG_PCI_MSI
3958 #endif
3959 #ifdef CONFIG_PCI_MSI
3960 #endif
3961 #ifdef CONFIG_PCI_MSI
3962 #endif
3963 #ifdef VMXNET3_RSS
3964 #endif
3965 /* LDV_COMMENT_END_PREP */
3966 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vmxnet3_close" */
3967 static int res_vmxnet3_close_74;
3968 /* LDV_COMMENT_BEGIN_PREP */
3969 #ifdef CONFIG_PCI_MSI
3970 #endif
3971 #ifdef CONFIG_PCI_MSI
3972 #endif
3973 #ifdef CONFIG_NET_POLL_CONTROLLER
3974 #endif
3975 #ifdef VMXNET3_RSS
3976 #endif
3977 #ifdef VMXNET3_RSS
3978 #endif
3979 #ifdef VMXNET3_RSS
3980 #endif
3981 #ifdef VMXNET3_RSS
3982 #endif
3983 #ifdef VMXNET3_RSS
3984 #endif
3985 #ifdef VMXNET3_RSS
3986 #endif
3987 #ifdef CONFIG_PM
3988 #endif
3989 #ifdef CONFIG_PM
3990 #endif
3991 /* LDV_COMMENT_END_PREP */
3992 /* content: static netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)*/
3993 /* LDV_COMMENT_BEGIN_PREP */
3994 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
3995 #ifdef __BIG_ENDIAN_BITFIELD
3996 #endif
3997 #ifdef __BIG_ENDIAN_BITFIELD
3998 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
3999 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4000 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4001 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4002 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4003 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4004 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4005 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4006 VMXNET3_TCD_GEN_SIZE)
4007 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4008 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4009 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4010 (dstrcd) = (tmp); \
4011 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4012 } while (0)
4013 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4014 (dstrxd) = (tmp); \
4015 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4016 } while (0)
4017 #else
4018 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4019 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4020 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4021 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4022 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4023 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4024 #endif
4025 #ifdef __BIG_ENDIAN_BITFIELD
4026 #endif
4027 #ifdef __BIG_ENDIAN_BITFIELD
4028 #else
4029 #endif
4030 #ifdef __BIG_ENDIAN_BITFIELD
4031 #endif
4032 /* LDV_COMMENT_END_PREP */
4033 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_xmit_frame" */
4034 struct sk_buff * var_group2;
4035 /* LDV_COMMENT_BEGIN_PREP */
4036 #ifdef __BIG_ENDIAN_BITFIELD
4037 #endif
4038 #ifdef VMXNET3_RSS
4039 #endif
4040 #ifdef __BIG_ENDIAN_BITFIELD
4041 #endif
4042 #ifdef CONFIG_PCI_MSI
4043 #endif
4044 #ifdef CONFIG_NET_POLL_CONTROLLER
4045 #ifdef CONFIG_PCI_MSI
4046 #endif
4047 #endif
4048 #ifdef CONFIG_PCI_MSI
4049 #endif
4050 #ifdef CONFIG_PCI_MSI
4051 #endif
4052 #ifdef CONFIG_PCI_MSI
4053 #endif
4054 #ifdef VMXNET3_RSS
4055 #endif
4056 #ifdef CONFIG_PCI_MSI
4057 #endif
4058 #ifdef CONFIG_PCI_MSI
4059 #endif
4060 #ifdef CONFIG_NET_POLL_CONTROLLER
4061 #endif
4062 #ifdef VMXNET3_RSS
4063 #endif
4064 #ifdef VMXNET3_RSS
4065 #endif
4066 #ifdef VMXNET3_RSS
4067 #endif
4068 #ifdef VMXNET3_RSS
4069 #endif
4070 #ifdef VMXNET3_RSS
4071 #endif
4072 #ifdef VMXNET3_RSS
4073 #endif
4074 #ifdef CONFIG_PM
4075 #endif
4076 #ifdef CONFIG_PM
4077 #endif
4078 /* LDV_COMMENT_END_PREP */
4079 /* content: static int vmxnet3_set_mac_addr(struct net_device *netdev, void *p)*/
4080 /* LDV_COMMENT_BEGIN_PREP */
4081 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4082 #ifdef __BIG_ENDIAN_BITFIELD
4083 #endif
4084 #ifdef __BIG_ENDIAN_BITFIELD
4085 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4086 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4087 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4088 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4089 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4090 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4091 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4092 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4093 VMXNET3_TCD_GEN_SIZE)
4094 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4095 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4096 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4097 (dstrcd) = (tmp); \
4098 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4099 } while (0)
4100 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4101 (dstrxd) = (tmp); \
4102 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4103 } while (0)
4104 #else
4105 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4106 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4107 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4108 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4109 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4110 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4111 #endif
4112 #ifdef __BIG_ENDIAN_BITFIELD
4113 #endif
4114 #ifdef __BIG_ENDIAN_BITFIELD
4115 #else
4116 #endif
4117 #ifdef __BIG_ENDIAN_BITFIELD
4118 #endif
4119 #ifdef __BIG_ENDIAN_BITFIELD
4120 #endif
4121 #ifdef VMXNET3_RSS
4122 #endif
4123 #ifdef __BIG_ENDIAN_BITFIELD
4124 #endif
4125 #ifdef CONFIG_PCI_MSI
4126 #endif
4127 #ifdef CONFIG_NET_POLL_CONTROLLER
4128 #ifdef CONFIG_PCI_MSI
4129 #endif
4130 #endif
4131 #ifdef CONFIG_PCI_MSI
4132 #endif
4133 #ifdef CONFIG_PCI_MSI
4134 #endif
4135 #ifdef CONFIG_PCI_MSI
4136 #endif
4137 #ifdef VMXNET3_RSS
4138 #endif
4139 /* LDV_COMMENT_END_PREP */
4140 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_set_mac_addr" */
4141 void * var_vmxnet3_set_mac_addr_68_p1;
4142 /* LDV_COMMENT_BEGIN_PREP */
4143 #ifdef CONFIG_PCI_MSI
4144 #endif
4145 #ifdef CONFIG_PCI_MSI
4146 #endif
4147 #ifdef CONFIG_NET_POLL_CONTROLLER
4148 #endif
4149 #ifdef VMXNET3_RSS
4150 #endif
4151 #ifdef VMXNET3_RSS
4152 #endif
4153 #ifdef VMXNET3_RSS
4154 #endif
4155 #ifdef VMXNET3_RSS
4156 #endif
4157 #ifdef VMXNET3_RSS
4158 #endif
4159 #ifdef VMXNET3_RSS
4160 #endif
4161 #ifdef CONFIG_PM
4162 #endif
4163 #ifdef CONFIG_PM
4164 #endif
4165 /* LDV_COMMENT_END_PREP */
4166 /* content: static int vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)*/
4167 /* LDV_COMMENT_BEGIN_PREP */
4168 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4169 #ifdef __BIG_ENDIAN_BITFIELD
4170 #endif
4171 #ifdef __BIG_ENDIAN_BITFIELD
4172 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4173 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4174 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4175 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4176 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4177 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4178 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4179 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4180 VMXNET3_TCD_GEN_SIZE)
4181 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4182 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4183 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4184 (dstrcd) = (tmp); \
4185 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4186 } while (0)
4187 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4188 (dstrxd) = (tmp); \
4189 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4190 } while (0)
4191 #else
4192 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4193 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4194 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4195 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4196 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4197 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4198 #endif
4199 #ifdef __BIG_ENDIAN_BITFIELD
4200 #endif
4201 #ifdef __BIG_ENDIAN_BITFIELD
4202 #else
4203 #endif
4204 #ifdef __BIG_ENDIAN_BITFIELD
4205 #endif
4206 #ifdef __BIG_ENDIAN_BITFIELD
4207 #endif
4208 #ifdef VMXNET3_RSS
4209 #endif
4210 #ifdef __BIG_ENDIAN_BITFIELD
4211 #endif
4212 #ifdef CONFIG_PCI_MSI
4213 #endif
4214 #ifdef CONFIG_NET_POLL_CONTROLLER
4215 #ifdef CONFIG_PCI_MSI
4216 #endif
4217 #endif
4218 #ifdef CONFIG_PCI_MSI
4219 #endif
4220 #ifdef CONFIG_PCI_MSI
4221 #endif
4222 #ifdef CONFIG_PCI_MSI
4223 #endif
4224 #ifdef VMXNET3_RSS
4225 #endif
4226 /* LDV_COMMENT_END_PREP */
4227 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_change_mtu" */
4228 int var_vmxnet3_change_mtu_76_p1;
4229 /* LDV_COMMENT_BEGIN_PREP */
4230 #ifdef CONFIG_PCI_MSI
4231 #endif
4232 #ifdef CONFIG_PCI_MSI
4233 #endif
4234 #ifdef CONFIG_NET_POLL_CONTROLLER
4235 #endif
4236 #ifdef VMXNET3_RSS
4237 #endif
4238 #ifdef VMXNET3_RSS
4239 #endif
4240 #ifdef VMXNET3_RSS
4241 #endif
4242 #ifdef VMXNET3_RSS
4243 #endif
4244 #ifdef VMXNET3_RSS
4245 #endif
4246 #ifdef VMXNET3_RSS
4247 #endif
4248 #ifdef CONFIG_PM
4249 #endif
4250 #ifdef CONFIG_PM
4251 #endif
4252 /* LDV_COMMENT_END_PREP */
4253 /* content: static void vmxnet3_tx_timeout(struct net_device *netdev)*/
4254 /* LDV_COMMENT_BEGIN_PREP */
4255 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4256 #ifdef __BIG_ENDIAN_BITFIELD
4257 #endif
4258 #ifdef __BIG_ENDIAN_BITFIELD
4259 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4260 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4261 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4262 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4263 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4264 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4265 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4266 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4267 VMXNET3_TCD_GEN_SIZE)
4268 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4269 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4270 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4271 (dstrcd) = (tmp); \
4272 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4273 } while (0)
4274 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4275 (dstrxd) = (tmp); \
4276 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4277 } while (0)
4278 #else
4279 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4280 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4281 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4282 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4283 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4284 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4285 #endif
4286 #ifdef __BIG_ENDIAN_BITFIELD
4287 #endif
4288 #ifdef __BIG_ENDIAN_BITFIELD
4289 #else
4290 #endif
4291 #ifdef __BIG_ENDIAN_BITFIELD
4292 #endif
4293 #ifdef __BIG_ENDIAN_BITFIELD
4294 #endif
4295 #ifdef VMXNET3_RSS
4296 #endif
4297 #ifdef __BIG_ENDIAN_BITFIELD
4298 #endif
4299 #ifdef CONFIG_PCI_MSI
4300 #endif
4301 #ifdef CONFIG_NET_POLL_CONTROLLER
4302 #ifdef CONFIG_PCI_MSI
4303 #endif
4304 #endif
4305 #ifdef CONFIG_PCI_MSI
4306 #endif
4307 #ifdef CONFIG_PCI_MSI
4308 #endif
4309 #ifdef CONFIG_PCI_MSI
4310 #endif
4311 #ifdef VMXNET3_RSS
4312 #endif
4313 #ifdef CONFIG_PCI_MSI
4314 #endif
4315 #ifdef CONFIG_PCI_MSI
4316 #endif
4317 /* LDV_COMMENT_END_PREP */
4318 /* LDV_COMMENT_BEGIN_PREP */
4319 #ifdef CONFIG_NET_POLL_CONTROLLER
4320 #endif
4321 #ifdef VMXNET3_RSS
4322 #endif
4323 #ifdef VMXNET3_RSS
4324 #endif
4325 #ifdef VMXNET3_RSS
4326 #endif
4327 #ifdef VMXNET3_RSS
4328 #endif
4329 #ifdef VMXNET3_RSS
4330 #endif
4331 #ifdef VMXNET3_RSS
4332 #endif
4333 #ifdef CONFIG_PM
4334 #endif
4335 #ifdef CONFIG_PM
4336 #endif
4337 /* LDV_COMMENT_END_PREP */
4338 /* content: static void vmxnet3_set_mc(struct net_device *netdev)*/
4339 /* LDV_COMMENT_BEGIN_PREP */
4340 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4341 #ifdef __BIG_ENDIAN_BITFIELD
4342 #endif
4343 #ifdef __BIG_ENDIAN_BITFIELD
4344 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4345 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4346 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4347 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4348 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4349 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4350 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4351 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4352 VMXNET3_TCD_GEN_SIZE)
4353 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4354 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4355 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4356 (dstrcd) = (tmp); \
4357 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4358 } while (0)
4359 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4360 (dstrxd) = (tmp); \
4361 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4362 } while (0)
4363 #else
4364 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4365 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4366 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4367 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4368 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4369 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4370 #endif
4371 #ifdef __BIG_ENDIAN_BITFIELD
4372 #endif
4373 #ifdef __BIG_ENDIAN_BITFIELD
4374 #else
4375 #endif
4376 #ifdef __BIG_ENDIAN_BITFIELD
4377 #endif
4378 #ifdef __BIG_ENDIAN_BITFIELD
4379 #endif
4380 #ifdef VMXNET3_RSS
4381 #endif
4382 #ifdef __BIG_ENDIAN_BITFIELD
4383 #endif
4384 #ifdef CONFIG_PCI_MSI
4385 #endif
4386 #ifdef CONFIG_NET_POLL_CONTROLLER
4387 #ifdef CONFIG_PCI_MSI
4388 #endif
4389 #endif
4390 #ifdef CONFIG_PCI_MSI
4391 #endif
4392 #ifdef CONFIG_PCI_MSI
4393 #endif
4394 #ifdef CONFIG_PCI_MSI
4395 #endif
4396 /* LDV_COMMENT_END_PREP */
4397 /* LDV_COMMENT_BEGIN_PREP */
4398 #ifdef VMXNET3_RSS
4399 #endif
4400 #ifdef CONFIG_PCI_MSI
4401 #endif
4402 #ifdef CONFIG_PCI_MSI
4403 #endif
4404 #ifdef CONFIG_NET_POLL_CONTROLLER
4405 #endif
4406 #ifdef VMXNET3_RSS
4407 #endif
4408 #ifdef VMXNET3_RSS
4409 #endif
4410 #ifdef VMXNET3_RSS
4411 #endif
4412 #ifdef VMXNET3_RSS
4413 #endif
4414 #ifdef VMXNET3_RSS
4415 #endif
4416 #ifdef VMXNET3_RSS
4417 #endif
4418 #ifdef CONFIG_PM
4419 #endif
4420 #ifdef CONFIG_PM
4421 #endif
4422 /* LDV_COMMENT_END_PREP */
4423 /* content: static int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
4424 /* LDV_COMMENT_BEGIN_PREP */
4425 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4426 #ifdef __BIG_ENDIAN_BITFIELD
4427 #endif
4428 #ifdef __BIG_ENDIAN_BITFIELD
4429 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4430 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4431 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4432 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4433 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4434 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4435 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4436 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4437 VMXNET3_TCD_GEN_SIZE)
4438 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4439 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4440 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4441 (dstrcd) = (tmp); \
4442 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4443 } while (0)
4444 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4445 (dstrxd) = (tmp); \
4446 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4447 } while (0)
4448 #else
4449 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4450 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4451 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4452 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4453 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4454 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4455 #endif
4456 #ifdef __BIG_ENDIAN_BITFIELD
4457 #endif
4458 #ifdef __BIG_ENDIAN_BITFIELD
4459 #else
4460 #endif
4461 #ifdef __BIG_ENDIAN_BITFIELD
4462 #endif
4463 #ifdef __BIG_ENDIAN_BITFIELD
4464 #endif
4465 #ifdef VMXNET3_RSS
4466 #endif
4467 #ifdef __BIG_ENDIAN_BITFIELD
4468 #endif
4469 #ifdef CONFIG_PCI_MSI
4470 #endif
4471 #ifdef CONFIG_NET_POLL_CONTROLLER
4472 #ifdef CONFIG_PCI_MSI
4473 #endif
4474 #endif
4475 #ifdef CONFIG_PCI_MSI
4476 #endif
4477 #ifdef CONFIG_PCI_MSI
4478 #endif
4479 #ifdef CONFIG_PCI_MSI
4480 #endif
4481 /* LDV_COMMENT_END_PREP */
4482 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_add_vid" */
4483 __be16 var_vmxnet3_vlan_rx_add_vid_57_p1;
4484 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_add_vid" */
4485 u16 var_vmxnet3_vlan_rx_add_vid_57_p2;
4486 /* LDV_COMMENT_BEGIN_PREP */
4487 #ifdef VMXNET3_RSS
4488 #endif
4489 #ifdef CONFIG_PCI_MSI
4490 #endif
4491 #ifdef CONFIG_PCI_MSI
4492 #endif
4493 #ifdef CONFIG_NET_POLL_CONTROLLER
4494 #endif
4495 #ifdef VMXNET3_RSS
4496 #endif
4497 #ifdef VMXNET3_RSS
4498 #endif
4499 #ifdef VMXNET3_RSS
4500 #endif
4501 #ifdef VMXNET3_RSS
4502 #endif
4503 #ifdef VMXNET3_RSS
4504 #endif
4505 #ifdef VMXNET3_RSS
4506 #endif
4507 #ifdef CONFIG_PM
4508 #endif
4509 #ifdef CONFIG_PM
4510 #endif
4511 /* LDV_COMMENT_END_PREP */
4512 /* content: static int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
4513 /* LDV_COMMENT_BEGIN_PREP */
4514 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4515 #ifdef __BIG_ENDIAN_BITFIELD
4516 #endif
4517 #ifdef __BIG_ENDIAN_BITFIELD
4518 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4519 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4520 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4521 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4522 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4523 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4524 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4525 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4526 VMXNET3_TCD_GEN_SIZE)
4527 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4528 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4529 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4530 (dstrcd) = (tmp); \
4531 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4532 } while (0)
4533 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4534 (dstrxd) = (tmp); \
4535 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4536 } while (0)
4537 #else
4538 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4539 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4540 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4541 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4542 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4543 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4544 #endif
4545 #ifdef __BIG_ENDIAN_BITFIELD
4546 #endif
4547 #ifdef __BIG_ENDIAN_BITFIELD
4548 #else
4549 #endif
4550 #ifdef __BIG_ENDIAN_BITFIELD
4551 #endif
4552 #ifdef __BIG_ENDIAN_BITFIELD
4553 #endif
4554 #ifdef VMXNET3_RSS
4555 #endif
4556 #ifdef __BIG_ENDIAN_BITFIELD
4557 #endif
4558 #ifdef CONFIG_PCI_MSI
4559 #endif
4560 #ifdef CONFIG_NET_POLL_CONTROLLER
4561 #ifdef CONFIG_PCI_MSI
4562 #endif
4563 #endif
4564 #ifdef CONFIG_PCI_MSI
4565 #endif
4566 #ifdef CONFIG_PCI_MSI
4567 #endif
4568 #ifdef CONFIG_PCI_MSI
4569 #endif
4570 /* LDV_COMMENT_END_PREP */
4571 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_kill_vid" */
4572 __be16 var_vmxnet3_vlan_rx_kill_vid_58_p1;
4573 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_kill_vid" */
4574 u16 var_vmxnet3_vlan_rx_kill_vid_58_p2;
4575 /* LDV_COMMENT_BEGIN_PREP */
4576 #ifdef VMXNET3_RSS
4577 #endif
4578 #ifdef CONFIG_PCI_MSI
4579 #endif
4580 #ifdef CONFIG_PCI_MSI
4581 #endif
4582 #ifdef CONFIG_NET_POLL_CONTROLLER
4583 #endif
4584 #ifdef VMXNET3_RSS
4585 #endif
4586 #ifdef VMXNET3_RSS
4587 #endif
4588 #ifdef VMXNET3_RSS
4589 #endif
4590 #ifdef VMXNET3_RSS
4591 #endif
4592 #ifdef VMXNET3_RSS
4593 #endif
4594 #ifdef VMXNET3_RSS
4595 #endif
4596 #ifdef CONFIG_PM
4597 #endif
4598 #ifdef CONFIG_PM
4599 #endif
4600 /* LDV_COMMENT_END_PREP */
4601 /* content: static void vmxnet3_netpoll(struct net_device *netdev)*/
4602 /* LDV_COMMENT_BEGIN_PREP */
4603 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4604 #ifdef __BIG_ENDIAN_BITFIELD
4605 #endif
4606 #ifdef __BIG_ENDIAN_BITFIELD
4607 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4608 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4609 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4610 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4611 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4612 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4613 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4614 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4615 VMXNET3_TCD_GEN_SIZE)
4616 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4617 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4618 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4619 (dstrcd) = (tmp); \
4620 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4621 } while (0)
4622 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4623 (dstrxd) = (tmp); \
4624 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4625 } while (0)
4626 #else
4627 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4628 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4629 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4630 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4631 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4632 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4633 #endif
4634 #ifdef __BIG_ENDIAN_BITFIELD
4635 #endif
4636 #ifdef __BIG_ENDIAN_BITFIELD
4637 #else
4638 #endif
4639 #ifdef __BIG_ENDIAN_BITFIELD
4640 #endif
4641 #ifdef __BIG_ENDIAN_BITFIELD
4642 #endif
4643 #ifdef VMXNET3_RSS
4644 #endif
4645 #ifdef __BIG_ENDIAN_BITFIELD
4646 #endif
4647 #ifdef CONFIG_PCI_MSI
4648 #endif
4649 #ifdef CONFIG_NET_POLL_CONTROLLER
4650 /* LDV_COMMENT_END_PREP */
4651 /* LDV_COMMENT_BEGIN_PREP */
4652 #endif
4653 #ifdef CONFIG_PCI_MSI
4654 #endif
4655 #ifdef CONFIG_PCI_MSI
4656 #endif
4657 #ifdef CONFIG_PCI_MSI
4658 #endif
4659 #ifdef VMXNET3_RSS
4660 #endif
4661 #ifdef CONFIG_PCI_MSI
4662 #endif
4663 #ifdef CONFIG_PCI_MSI
4664 #endif
4665 #ifdef CONFIG_NET_POLL_CONTROLLER
4666 #endif
4667 #ifdef VMXNET3_RSS
4668 #endif
4669 #ifdef VMXNET3_RSS
4670 #endif
4671 #ifdef VMXNET3_RSS
4672 #endif
4673 #ifdef VMXNET3_RSS
4674 #endif
4675 #ifdef VMXNET3_RSS
4676 #endif
4677 #ifdef VMXNET3_RSS
4678 #endif
4679 #ifdef CONFIG_PM
4680 #endif
4681 #ifdef CONFIG_PM
4682 #endif
4683 /* LDV_COMMENT_END_PREP */
4684
4685 /** STRUCT: struct type: dev_pm_ops, struct name: vmxnet3_pm_ops **/
4686 /* content: static int vmxnet3_suspend(struct device *device)*/
4687 /* LDV_COMMENT_BEGIN_PREP */
4688 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4689 #ifdef __BIG_ENDIAN_BITFIELD
4690 #endif
4691 #ifdef __BIG_ENDIAN_BITFIELD
4692 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4693 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4694 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4695 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4696 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4697 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4698 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4699 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4700 VMXNET3_TCD_GEN_SIZE)
4701 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4702 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4703 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4704 (dstrcd) = (tmp); \
4705 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4706 } while (0)
4707 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4708 (dstrxd) = (tmp); \
4709 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4710 } while (0)
4711 #else
4712 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4713 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4714 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4715 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4716 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4717 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4718 #endif
4719 #ifdef __BIG_ENDIAN_BITFIELD
4720 #endif
4721 #ifdef __BIG_ENDIAN_BITFIELD
4722 #else
4723 #endif
4724 #ifdef __BIG_ENDIAN_BITFIELD
4725 #endif
4726 #ifdef __BIG_ENDIAN_BITFIELD
4727 #endif
4728 #ifdef VMXNET3_RSS
4729 #endif
4730 #ifdef __BIG_ENDIAN_BITFIELD
4731 #endif
4732 #ifdef CONFIG_PCI_MSI
4733 #endif
4734 #ifdef CONFIG_NET_POLL_CONTROLLER
4735 #ifdef CONFIG_PCI_MSI
4736 #endif
4737 #endif
4738 #ifdef CONFIG_PCI_MSI
4739 #endif
4740 #ifdef CONFIG_PCI_MSI
4741 #endif
4742 #ifdef CONFIG_PCI_MSI
4743 #endif
4744 #ifdef VMXNET3_RSS
4745 #endif
4746 #ifdef CONFIG_PCI_MSI
4747 #endif
4748 #ifdef CONFIG_PCI_MSI
4749 #endif
4750 #ifdef CONFIG_NET_POLL_CONTROLLER
4751 #endif
4752 #ifdef VMXNET3_RSS
4753 #endif
4754 #ifdef VMXNET3_RSS
4755 #endif
4756 #ifdef VMXNET3_RSS
4757 #endif
4758 #ifdef VMXNET3_RSS
4759 #endif
4760 #ifdef VMXNET3_RSS
4761 #endif
4762 #ifdef VMXNET3_RSS
4763 #endif
4764 #ifdef CONFIG_PM
4765 /* LDV_COMMENT_END_PREP */
4766 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_suspend" */
4767 struct device * var_group3;
4768 /* LDV_COMMENT_BEGIN_PREP */
4769 #endif
4770 #ifdef CONFIG_PM
4771 #endif
4772 /* LDV_COMMENT_END_PREP */
4773 /* content: static int vmxnet3_resume(struct device *device)*/
4774 /* LDV_COMMENT_BEGIN_PREP */
4775 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4776 #ifdef __BIG_ENDIAN_BITFIELD
4777 #endif
4778 #ifdef __BIG_ENDIAN_BITFIELD
4779 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4780 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4781 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4782 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4783 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4784 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4785 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4786 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4787 VMXNET3_TCD_GEN_SIZE)
4788 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4789 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4790 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4791 (dstrcd) = (tmp); \
4792 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4793 } while (0)
4794 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4795 (dstrxd) = (tmp); \
4796 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4797 } while (0)
4798 #else
4799 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4800 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4801 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4802 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4803 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4804 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4805 #endif
4806 #ifdef __BIG_ENDIAN_BITFIELD
4807 #endif
4808 #ifdef __BIG_ENDIAN_BITFIELD
4809 #else
4810 #endif
4811 #ifdef __BIG_ENDIAN_BITFIELD
4812 #endif
4813 #ifdef __BIG_ENDIAN_BITFIELD
4814 #endif
4815 #ifdef VMXNET3_RSS
4816 #endif
4817 #ifdef __BIG_ENDIAN_BITFIELD
4818 #endif
4819 #ifdef CONFIG_PCI_MSI
4820 #endif
4821 #ifdef CONFIG_NET_POLL_CONTROLLER
4822 #ifdef CONFIG_PCI_MSI
4823 #endif
4824 #endif
4825 #ifdef CONFIG_PCI_MSI
4826 #endif
4827 #ifdef CONFIG_PCI_MSI
4828 #endif
4829 #ifdef CONFIG_PCI_MSI
4830 #endif
4831 #ifdef VMXNET3_RSS
4832 #endif
4833 #ifdef CONFIG_PCI_MSI
4834 #endif
4835 #ifdef CONFIG_PCI_MSI
4836 #endif
4837 #ifdef CONFIG_NET_POLL_CONTROLLER
4838 #endif
4839 #ifdef VMXNET3_RSS
4840 #endif
4841 #ifdef VMXNET3_RSS
4842 #endif
4843 #ifdef VMXNET3_RSS
4844 #endif
4845 #ifdef VMXNET3_RSS
4846 #endif
4847 #ifdef VMXNET3_RSS
4848 #endif
4849 #ifdef VMXNET3_RSS
4850 #endif
4851 #ifdef CONFIG_PM
4852 /* LDV_COMMENT_END_PREP */
4853 /* LDV_COMMENT_BEGIN_PREP */
4854 #endif
4855 #ifdef CONFIG_PM
4856 #endif
4857 /* LDV_COMMENT_END_PREP */
4858
4859 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
4860 /* content: static int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id)*/
4861 /* LDV_COMMENT_BEGIN_PREP */
4862 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4863 #ifdef __BIG_ENDIAN_BITFIELD
4864 #endif
4865 #ifdef __BIG_ENDIAN_BITFIELD
4866 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4867 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4868 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4869 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4870 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4871 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4872 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4873 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4874 VMXNET3_TCD_GEN_SIZE)
4875 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4876 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4877 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4878 (dstrcd) = (tmp); \
4879 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4880 } while (0)
4881 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4882 (dstrxd) = (tmp); \
4883 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4884 } while (0)
4885 #else
4886 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4887 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4888 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4889 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4890 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4891 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4892 #endif
4893 #ifdef __BIG_ENDIAN_BITFIELD
4894 #endif
4895 #ifdef __BIG_ENDIAN_BITFIELD
4896 #else
4897 #endif
4898 #ifdef __BIG_ENDIAN_BITFIELD
4899 #endif
4900 #ifdef __BIG_ENDIAN_BITFIELD
4901 #endif
4902 #ifdef VMXNET3_RSS
4903 #endif
4904 #ifdef __BIG_ENDIAN_BITFIELD
4905 #endif
4906 #ifdef CONFIG_PCI_MSI
4907 #endif
4908 #ifdef CONFIG_NET_POLL_CONTROLLER
4909 #ifdef CONFIG_PCI_MSI
4910 #endif
4911 #endif
4912 #ifdef CONFIG_PCI_MSI
4913 #endif
4914 #ifdef CONFIG_PCI_MSI
4915 #endif
4916 #ifdef CONFIG_PCI_MSI
4917 #endif
4918 #ifdef VMXNET3_RSS
4919 #endif
4920 #ifdef CONFIG_PCI_MSI
4921 #endif
4922 #ifdef CONFIG_PCI_MSI
4923 #endif
4924 /* LDV_COMMENT_END_PREP */
4925 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_probe_device" */
4926 struct pci_dev * var_group4;
4927 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_probe_device" */
4928 const struct pci_device_id * var_vmxnet3_probe_device_84_p1;
4929 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vmxnet3_probe_device" */
4930 static int res_vmxnet3_probe_device_84;
4931 /* LDV_COMMENT_BEGIN_PREP */
4932 #ifdef VMXNET3_RSS
4933 #endif
4934 #ifdef VMXNET3_RSS
4935 #endif
4936 #ifdef CONFIG_PM
4937 #endif
4938 #ifdef CONFIG_PM
4939 #endif
4940 /* LDV_COMMENT_END_PREP */
4941 /* content: static void vmxnet3_remove_device(struct pci_dev *pdev)*/
4942 /* LDV_COMMENT_BEGIN_PREP */
4943 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4944 #ifdef __BIG_ENDIAN_BITFIELD
4945 #endif
4946 #ifdef __BIG_ENDIAN_BITFIELD
4947 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4948 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4949 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4950 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4951 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4952 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4953 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4954 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4955 VMXNET3_TCD_GEN_SIZE)
4956 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4957 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4958 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4959 (dstrcd) = (tmp); \
4960 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4961 } while (0)
4962 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4963 (dstrxd) = (tmp); \
4964 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4965 } while (0)
4966 #else
4967 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4968 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4969 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4970 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4971 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4972 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4973 #endif
4974 #ifdef __BIG_ENDIAN_BITFIELD
4975 #endif
4976 #ifdef __BIG_ENDIAN_BITFIELD
4977 #else
4978 #endif
4979 #ifdef __BIG_ENDIAN_BITFIELD
4980 #endif
4981 #ifdef __BIG_ENDIAN_BITFIELD
4982 #endif
4983 #ifdef VMXNET3_RSS
4984 #endif
4985 #ifdef __BIG_ENDIAN_BITFIELD
4986 #endif
4987 #ifdef CONFIG_PCI_MSI
4988 #endif
4989 #ifdef CONFIG_NET_POLL_CONTROLLER
4990 #ifdef CONFIG_PCI_MSI
4991 #endif
4992 #endif
4993 #ifdef CONFIG_PCI_MSI
4994 #endif
4995 #ifdef CONFIG_PCI_MSI
4996 #endif
4997 #ifdef CONFIG_PCI_MSI
4998 #endif
4999 #ifdef VMXNET3_RSS
5000 #endif
5001 #ifdef CONFIG_PCI_MSI
5002 #endif
5003 #ifdef CONFIG_PCI_MSI
5004 #endif
5005 #ifdef CONFIG_NET_POLL_CONTROLLER
5006 #endif
5007 #ifdef VMXNET3_RSS
5008 #endif
5009 #ifdef VMXNET3_RSS
5010 #endif
5011 #ifdef VMXNET3_RSS
5012 #endif
5013 #ifdef VMXNET3_RSS
5014 #endif
5015 /* LDV_COMMENT_END_PREP */
5016 /* LDV_COMMENT_BEGIN_PREP */
5017 #ifdef CONFIG_PM
5018 #endif
5019 #ifdef CONFIG_PM
5020 #endif
5021 /* LDV_COMMENT_END_PREP */
5022 /* content: static void vmxnet3_shutdown_device(struct pci_dev *pdev)*/
5023 /* LDV_COMMENT_BEGIN_PREP */
5024 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5025 #ifdef __BIG_ENDIAN_BITFIELD
5026 #endif
5027 #ifdef __BIG_ENDIAN_BITFIELD
5028 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5029 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5030 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5031 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5032 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5033 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5034 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5035 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5036 VMXNET3_TCD_GEN_SIZE)
5037 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5038 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5039 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5040 (dstrcd) = (tmp); \
5041 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5042 } while (0)
5043 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5044 (dstrxd) = (tmp); \
5045 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5046 } while (0)
5047 #else
5048 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5049 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5050 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5051 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5052 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5053 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5054 #endif
5055 #ifdef __BIG_ENDIAN_BITFIELD
5056 #endif
5057 #ifdef __BIG_ENDIAN_BITFIELD
5058 #else
5059 #endif
5060 #ifdef __BIG_ENDIAN_BITFIELD
5061 #endif
5062 #ifdef __BIG_ENDIAN_BITFIELD
5063 #endif
5064 #ifdef VMXNET3_RSS
5065 #endif
5066 #ifdef __BIG_ENDIAN_BITFIELD
5067 #endif
5068 #ifdef CONFIG_PCI_MSI
5069 #endif
5070 #ifdef CONFIG_NET_POLL_CONTROLLER
5071 #ifdef CONFIG_PCI_MSI
5072 #endif
5073 #endif
5074 #ifdef CONFIG_PCI_MSI
5075 #endif
5076 #ifdef CONFIG_PCI_MSI
5077 #endif
5078 #ifdef CONFIG_PCI_MSI
5079 #endif
5080 #ifdef VMXNET3_RSS
5081 #endif
5082 #ifdef CONFIG_PCI_MSI
5083 #endif
5084 #ifdef CONFIG_PCI_MSI
5085 #endif
5086 #ifdef CONFIG_NET_POLL_CONTROLLER
5087 #endif
5088 #ifdef VMXNET3_RSS
5089 #endif
5090 #ifdef VMXNET3_RSS
5091 #endif
5092 #ifdef VMXNET3_RSS
5093 #endif
5094 #ifdef VMXNET3_RSS
5095 #endif
5096 #ifdef VMXNET3_RSS
5097 #endif
5098 #ifdef VMXNET3_RSS
5099 #endif
5100 /* LDV_COMMENT_END_PREP */
5101 /* LDV_COMMENT_BEGIN_PREP */
5102 #ifdef CONFIG_PM
5103 #endif
5104 #ifdef CONFIG_PM
5105 #endif
5106 /* LDV_COMMENT_END_PREP */
5107
5108 /** CALLBACK SECTION request_irq **/
5109 /* content: static irqreturn_t vmxnet3_intr(int irq, void *dev_id)*/
5110 /* LDV_COMMENT_BEGIN_PREP */
5111 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5112 #ifdef __BIG_ENDIAN_BITFIELD
5113 #endif
5114 #ifdef __BIG_ENDIAN_BITFIELD
5115 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5116 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5117 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5118 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5119 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5120 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5121 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5122 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5123 VMXNET3_TCD_GEN_SIZE)
5124 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5125 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5126 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5127 (dstrcd) = (tmp); \
5128 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5129 } while (0)
5130 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5131 (dstrxd) = (tmp); \
5132 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5133 } while (0)
5134 #else
5135 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5136 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5137 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5138 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5139 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5140 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5141 #endif
5142 #ifdef __BIG_ENDIAN_BITFIELD
5143 #endif
5144 #ifdef __BIG_ENDIAN_BITFIELD
5145 #else
5146 #endif
5147 #ifdef __BIG_ENDIAN_BITFIELD
5148 #endif
5149 #ifdef __BIG_ENDIAN_BITFIELD
5150 #endif
5151 #ifdef VMXNET3_RSS
5152 #endif
5153 #ifdef __BIG_ENDIAN_BITFIELD
5154 #endif
5155 #ifdef CONFIG_PCI_MSI
5156 #endif
5157 /* LDV_COMMENT_END_PREP */
5158 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_intr" */
5159 int var_vmxnet3_intr_52_p0;
5160 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_intr" */
5161 void * var_vmxnet3_intr_52_p1;
5162 /* LDV_COMMENT_BEGIN_PREP */
5163 #ifdef CONFIG_NET_POLL_CONTROLLER
5164 #ifdef CONFIG_PCI_MSI
5165 #endif
5166 #endif
5167 #ifdef CONFIG_PCI_MSI
5168 #endif
5169 #ifdef CONFIG_PCI_MSI
5170 #endif
5171 #ifdef CONFIG_PCI_MSI
5172 #endif
5173 #ifdef VMXNET3_RSS
5174 #endif
5175 #ifdef CONFIG_PCI_MSI
5176 #endif
5177 #ifdef CONFIG_PCI_MSI
5178 #endif
5179 #ifdef CONFIG_NET_POLL_CONTROLLER
5180 #endif
5181 #ifdef VMXNET3_RSS
5182 #endif
5183 #ifdef VMXNET3_RSS
5184 #endif
5185 #ifdef VMXNET3_RSS
5186 #endif
5187 #ifdef VMXNET3_RSS
5188 #endif
5189 #ifdef VMXNET3_RSS
5190 #endif
5191 #ifdef VMXNET3_RSS
5192 #endif
5193 #ifdef CONFIG_PM
5194 #endif
5195 #ifdef CONFIG_PM
5196 #endif
5197 /* LDV_COMMENT_END_PREP */
5198 /* content: static irqreturn_t vmxnet3_msix_event(int irq, void *data)*/
5199 /* LDV_COMMENT_BEGIN_PREP */
5200 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5201 #ifdef __BIG_ENDIAN_BITFIELD
5202 #endif
5203 #ifdef __BIG_ENDIAN_BITFIELD
5204 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5205 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5206 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5207 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5208 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5209 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5210 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5211 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5212 VMXNET3_TCD_GEN_SIZE)
5213 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5214 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5215 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5216 (dstrcd) = (tmp); \
5217 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5218 } while (0)
5219 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5220 (dstrxd) = (tmp); \
5221 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5222 } while (0)
5223 #else
5224 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5225 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5226 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5227 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5228 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5229 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5230 #endif
5231 #ifdef __BIG_ENDIAN_BITFIELD
5232 #endif
5233 #ifdef __BIG_ENDIAN_BITFIELD
5234 #else
5235 #endif
5236 #ifdef __BIG_ENDIAN_BITFIELD
5237 #endif
5238 #ifdef __BIG_ENDIAN_BITFIELD
5239 #endif
5240 #ifdef VMXNET3_RSS
5241 #endif
5242 #ifdef __BIG_ENDIAN_BITFIELD
5243 #endif
5244 #ifdef CONFIG_PCI_MSI
5245 /* LDV_COMMENT_END_PREP */
5246 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_event" */
5247 int var_vmxnet3_msix_event_51_p0;
5248 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_event" */
5249 void * var_vmxnet3_msix_event_51_p1;
5250 /* LDV_COMMENT_BEGIN_PREP */
5251 #endif
5252 #ifdef CONFIG_NET_POLL_CONTROLLER
5253 #ifdef CONFIG_PCI_MSI
5254 #endif
5255 #endif
5256 #ifdef CONFIG_PCI_MSI
5257 #endif
5258 #ifdef CONFIG_PCI_MSI
5259 #endif
5260 #ifdef CONFIG_PCI_MSI
5261 #endif
5262 #ifdef VMXNET3_RSS
5263 #endif
5264 #ifdef CONFIG_PCI_MSI
5265 #endif
5266 #ifdef CONFIG_PCI_MSI
5267 #endif
5268 #ifdef CONFIG_NET_POLL_CONTROLLER
5269 #endif
5270 #ifdef VMXNET3_RSS
5271 #endif
5272 #ifdef VMXNET3_RSS
5273 #endif
5274 #ifdef VMXNET3_RSS
5275 #endif
5276 #ifdef VMXNET3_RSS
5277 #endif
5278 #ifdef VMXNET3_RSS
5279 #endif
5280 #ifdef VMXNET3_RSS
5281 #endif
5282 #ifdef CONFIG_PM
5283 #endif
5284 #ifdef CONFIG_PM
5285 #endif
5286 /* LDV_COMMENT_END_PREP */
5287 /* content: static irqreturn_t vmxnet3_msix_rx(int irq, void *data)*/
5288 /* LDV_COMMENT_BEGIN_PREP */
5289 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5290 #ifdef __BIG_ENDIAN_BITFIELD
5291 #endif
5292 #ifdef __BIG_ENDIAN_BITFIELD
5293 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5294 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5295 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5296 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5297 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5298 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5299 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5300 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5301 VMXNET3_TCD_GEN_SIZE)
5302 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5303 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5304 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5305 (dstrcd) = (tmp); \
5306 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5307 } while (0)
5308 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5309 (dstrxd) = (tmp); \
5310 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5311 } while (0)
5312 #else
5313 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5314 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5315 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5316 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5317 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5318 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5319 #endif
5320 #ifdef __BIG_ENDIAN_BITFIELD
5321 #endif
5322 #ifdef __BIG_ENDIAN_BITFIELD
5323 #else
5324 #endif
5325 #ifdef __BIG_ENDIAN_BITFIELD
5326 #endif
5327 #ifdef __BIG_ENDIAN_BITFIELD
5328 #endif
5329 #ifdef VMXNET3_RSS
5330 #endif
5331 #ifdef __BIG_ENDIAN_BITFIELD
5332 #endif
5333 #ifdef CONFIG_PCI_MSI
5334 /* LDV_COMMENT_END_PREP */
5335 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_rx" */
5336 int var_vmxnet3_msix_rx_50_p0;
5337 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_rx" */
5338 void * var_vmxnet3_msix_rx_50_p1;
5339 /* LDV_COMMENT_BEGIN_PREP */
5340 #endif
5341 #ifdef CONFIG_NET_POLL_CONTROLLER
5342 #ifdef CONFIG_PCI_MSI
5343 #endif
5344 #endif
5345 #ifdef CONFIG_PCI_MSI
5346 #endif
5347 #ifdef CONFIG_PCI_MSI
5348 #endif
5349 #ifdef CONFIG_PCI_MSI
5350 #endif
5351 #ifdef VMXNET3_RSS
5352 #endif
5353 #ifdef CONFIG_PCI_MSI
5354 #endif
5355 #ifdef CONFIG_PCI_MSI
5356 #endif
5357 #ifdef CONFIG_NET_POLL_CONTROLLER
5358 #endif
5359 #ifdef VMXNET3_RSS
5360 #endif
5361 #ifdef VMXNET3_RSS
5362 #endif
5363 #ifdef VMXNET3_RSS
5364 #endif
5365 #ifdef VMXNET3_RSS
5366 #endif
5367 #ifdef VMXNET3_RSS
5368 #endif
5369 #ifdef VMXNET3_RSS
5370 #endif
5371 #ifdef CONFIG_PM
5372 #endif
5373 #ifdef CONFIG_PM
5374 #endif
5375 /* LDV_COMMENT_END_PREP */
5376 /* content: static irqreturn_t vmxnet3_msix_tx(int irq, void *data)*/
5377 /* LDV_COMMENT_BEGIN_PREP */
5378 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5379 #ifdef __BIG_ENDIAN_BITFIELD
5380 #endif
5381 #ifdef __BIG_ENDIAN_BITFIELD
5382 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5383 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5384 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5385 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5386 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5387 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5388 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5389 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5390 VMXNET3_TCD_GEN_SIZE)
5391 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5392 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5393 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5394 (dstrcd) = (tmp); \
5395 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5396 } while (0)
5397 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5398 (dstrxd) = (tmp); \
5399 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5400 } while (0)
5401 #else
5402 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5403 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5404 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5405 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5406 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5407 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5408 #endif
5409 #ifdef __BIG_ENDIAN_BITFIELD
5410 #endif
5411 #ifdef __BIG_ENDIAN_BITFIELD
5412 #else
5413 #endif
5414 #ifdef __BIG_ENDIAN_BITFIELD
5415 #endif
5416 #ifdef __BIG_ENDIAN_BITFIELD
5417 #endif
5418 #ifdef VMXNET3_RSS
5419 #endif
5420 #ifdef __BIG_ENDIAN_BITFIELD
5421 #endif
5422 #ifdef CONFIG_PCI_MSI
5423 /* LDV_COMMENT_END_PREP */
5424 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_tx" */
5425 int var_vmxnet3_msix_tx_49_p0;
5426 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_tx" */
5427 void * var_vmxnet3_msix_tx_49_p1;
5428 /* LDV_COMMENT_BEGIN_PREP */
5429 #endif
5430 #ifdef CONFIG_NET_POLL_CONTROLLER
5431 #ifdef CONFIG_PCI_MSI
5432 #endif
5433 #endif
5434 #ifdef CONFIG_PCI_MSI
5435 #endif
5436 #ifdef CONFIG_PCI_MSI
5437 #endif
5438 #ifdef CONFIG_PCI_MSI
5439 #endif
5440 #ifdef VMXNET3_RSS
5441 #endif
5442 #ifdef CONFIG_PCI_MSI
5443 #endif
5444 #ifdef CONFIG_PCI_MSI
5445 #endif
5446 #ifdef CONFIG_NET_POLL_CONTROLLER
5447 #endif
5448 #ifdef VMXNET3_RSS
5449 #endif
5450 #ifdef VMXNET3_RSS
5451 #endif
5452 #ifdef VMXNET3_RSS
5453 #endif
5454 #ifdef VMXNET3_RSS
5455 #endif
5456 #ifdef VMXNET3_RSS
5457 #endif
5458 #ifdef VMXNET3_RSS
5459 #endif
5460 #ifdef CONFIG_PM
5461 #endif
5462 #ifdef CONFIG_PM
5463 #endif
5464 /* LDV_COMMENT_END_PREP */
5465
5466
5467
5468
5469 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
5470 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
5471 /*============================= VARIABLE INITIALIZING PART =============================*/
5472 LDV_IN_INTERRUPT=1;
5473
5474
5475
5476
5477 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
5478 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
5479 /*============================= FUNCTION CALL SECTION =============================*/
5480 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
5481 ldv_initialize();
5482
5483 /** INIT: init_type: ST_MODULE_INIT **/
5484 /* content: static int __init vmxnet3_init_module(void)*/
5485 /* LDV_COMMENT_BEGIN_PREP */
5486 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5487 #ifdef __BIG_ENDIAN_BITFIELD
5488 #endif
5489 #ifdef __BIG_ENDIAN_BITFIELD
5490 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5491 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5492 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5493 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5494 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5495 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5496 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5497 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5498 VMXNET3_TCD_GEN_SIZE)
5499 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5500 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5501 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5502 (dstrcd) = (tmp); \
5503 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5504 } while (0)
5505 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5506 (dstrxd) = (tmp); \
5507 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5508 } while (0)
5509 #else
5510 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5511 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5512 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5513 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5514 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5515 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5516 #endif
5517 #ifdef __BIG_ENDIAN_BITFIELD
5518 #endif
5519 #ifdef __BIG_ENDIAN_BITFIELD
5520 #else
5521 #endif
5522 #ifdef __BIG_ENDIAN_BITFIELD
5523 #endif
5524 #ifdef __BIG_ENDIAN_BITFIELD
5525 #endif
5526 #ifdef VMXNET3_RSS
5527 #endif
5528 #ifdef __BIG_ENDIAN_BITFIELD
5529 #endif
5530 #ifdef CONFIG_PCI_MSI
5531 #endif
5532 #ifdef CONFIG_NET_POLL_CONTROLLER
5533 #ifdef CONFIG_PCI_MSI
5534 #endif
5535 #endif
5536 #ifdef CONFIG_PCI_MSI
5537 #endif
5538 #ifdef CONFIG_PCI_MSI
5539 #endif
5540 #ifdef CONFIG_PCI_MSI
5541 #endif
5542 #ifdef VMXNET3_RSS
5543 #endif
5544 #ifdef CONFIG_PCI_MSI
5545 #endif
5546 #ifdef CONFIG_PCI_MSI
5547 #endif
5548 #ifdef CONFIG_NET_POLL_CONTROLLER
5549 #endif
5550 #ifdef VMXNET3_RSS
5551 #endif
5552 #ifdef VMXNET3_RSS
5553 #endif
5554 #ifdef VMXNET3_RSS
5555 #endif
5556 #ifdef VMXNET3_RSS
5557 #endif
5558 #ifdef VMXNET3_RSS
5559 #endif
5560 #ifdef VMXNET3_RSS
5561 #endif
5562 #ifdef CONFIG_PM
5563 #endif
5564 #ifdef CONFIG_PM
5565 #endif
5566 /* LDV_COMMENT_END_PREP */
5567 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
5568 ldv_handler_precall();
5569 if(vmxnet3_init_module())
5570 goto ldv_final;
5571 int ldv_s_vmxnet3_netdev_ops_net_device_ops = 0;
5572
5573
5574
5575
5576 int ldv_s_vmxnet3_driver_pci_driver = 0;
5577
5578
5579
5580
5581
5582 while( nondet_int()
5583 || !(ldv_s_vmxnet3_netdev_ops_net_device_ops == 0)
5584 || !(ldv_s_vmxnet3_driver_pci_driver == 0)
5585 ) {
5586
5587 switch(nondet_int()) {
5588
5589 case 0: {
5590
5591 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5592 if(ldv_s_vmxnet3_netdev_ops_net_device_ops==0) {
5593
5594 /* content: static int vmxnet3_open(struct net_device *netdev)*/
5595 /* LDV_COMMENT_BEGIN_PREP */
5596 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5597 #ifdef __BIG_ENDIAN_BITFIELD
5598 #endif
5599 #ifdef __BIG_ENDIAN_BITFIELD
5600 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5601 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5602 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5603 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5604 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5605 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5606 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5607 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5608 VMXNET3_TCD_GEN_SIZE)
5609 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5610 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5611 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5612 (dstrcd) = (tmp); \
5613 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5614 } while (0)
5615 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5616 (dstrxd) = (tmp); \
5617 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5618 } while (0)
5619 #else
5620 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5621 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5622 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5623 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5624 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5625 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5626 #endif
5627 #ifdef __BIG_ENDIAN_BITFIELD
5628 #endif
5629 #ifdef __BIG_ENDIAN_BITFIELD
5630 #else
5631 #endif
5632 #ifdef __BIG_ENDIAN_BITFIELD
5633 #endif
5634 #ifdef __BIG_ENDIAN_BITFIELD
5635 #endif
5636 #ifdef VMXNET3_RSS
5637 #endif
5638 #ifdef __BIG_ENDIAN_BITFIELD
5639 #endif
5640 #ifdef CONFIG_PCI_MSI
5641 #endif
5642 #ifdef CONFIG_NET_POLL_CONTROLLER
5643 #ifdef CONFIG_PCI_MSI
5644 #endif
5645 #endif
5646 #ifdef CONFIG_PCI_MSI
5647 #endif
5648 #ifdef CONFIG_PCI_MSI
5649 #endif
5650 #ifdef CONFIG_PCI_MSI
5651 #endif
5652 #ifdef VMXNET3_RSS
5653 #endif
5654 /* LDV_COMMENT_END_PREP */
5655 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "vmxnet3_netdev_ops". Standart function test for correct return result. */
5656 ldv_handler_precall();
5657 res_vmxnet3_open_73 = vmxnet3_open( var_group1);
5658 ldv_check_return_value(res_vmxnet3_open_73);
5659 if(res_vmxnet3_open_73 < 0)
5660 goto ldv_module_exit;
5661 /* LDV_COMMENT_BEGIN_PREP */
5662 #ifdef CONFIG_PCI_MSI
5663 #endif
5664 #ifdef CONFIG_PCI_MSI
5665 #endif
5666 #ifdef CONFIG_NET_POLL_CONTROLLER
5667 #endif
5668 #ifdef VMXNET3_RSS
5669 #endif
5670 #ifdef VMXNET3_RSS
5671 #endif
5672 #ifdef VMXNET3_RSS
5673 #endif
5674 #ifdef VMXNET3_RSS
5675 #endif
5676 #ifdef VMXNET3_RSS
5677 #endif
5678 #ifdef VMXNET3_RSS
5679 #endif
5680 #ifdef CONFIG_PM
5681 #endif
5682 #ifdef CONFIG_PM
5683 #endif
5684 /* LDV_COMMENT_END_PREP */
5685 ldv_s_vmxnet3_netdev_ops_net_device_ops++;
5686
5687 }
5688
5689 }
5690
5691 break;
5692 case 1: {
5693
5694 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5695 if(ldv_s_vmxnet3_netdev_ops_net_device_ops==1) {
5696
5697 /* content: static int vmxnet3_close(struct net_device *netdev)*/
5698 /* LDV_COMMENT_BEGIN_PREP */
5699 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5700 #ifdef __BIG_ENDIAN_BITFIELD
5701 #endif
5702 #ifdef __BIG_ENDIAN_BITFIELD
5703 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5704 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5705 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5706 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5707 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5708 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5709 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5710 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5711 VMXNET3_TCD_GEN_SIZE)
5712 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5713 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5714 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5715 (dstrcd) = (tmp); \
5716 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5717 } while (0)
5718 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5719 (dstrxd) = (tmp); \
5720 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5721 } while (0)
5722 #else
5723 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5724 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5725 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5726 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5727 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5728 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5729 #endif
5730 #ifdef __BIG_ENDIAN_BITFIELD
5731 #endif
5732 #ifdef __BIG_ENDIAN_BITFIELD
5733 #else
5734 #endif
5735 #ifdef __BIG_ENDIAN_BITFIELD
5736 #endif
5737 #ifdef __BIG_ENDIAN_BITFIELD
5738 #endif
5739 #ifdef VMXNET3_RSS
5740 #endif
5741 #ifdef __BIG_ENDIAN_BITFIELD
5742 #endif
5743 #ifdef CONFIG_PCI_MSI
5744 #endif
5745 #ifdef CONFIG_NET_POLL_CONTROLLER
5746 #ifdef CONFIG_PCI_MSI
5747 #endif
5748 #endif
5749 #ifdef CONFIG_PCI_MSI
5750 #endif
5751 #ifdef CONFIG_PCI_MSI
5752 #endif
5753 #ifdef CONFIG_PCI_MSI
5754 #endif
5755 #ifdef VMXNET3_RSS
5756 #endif
5757 /* LDV_COMMENT_END_PREP */
5758 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "vmxnet3_netdev_ops". Standart function test for correct return result. */
5759 ldv_handler_precall();
5760 res_vmxnet3_close_74 = vmxnet3_close( var_group1);
5761 ldv_check_return_value(res_vmxnet3_close_74);
5762 if(res_vmxnet3_close_74)
5763 goto ldv_module_exit;
5764 /* LDV_COMMENT_BEGIN_PREP */
5765 #ifdef CONFIG_PCI_MSI
5766 #endif
5767 #ifdef CONFIG_PCI_MSI
5768 #endif
5769 #ifdef CONFIG_NET_POLL_CONTROLLER
5770 #endif
5771 #ifdef VMXNET3_RSS
5772 #endif
5773 #ifdef VMXNET3_RSS
5774 #endif
5775 #ifdef VMXNET3_RSS
5776 #endif
5777 #ifdef VMXNET3_RSS
5778 #endif
5779 #ifdef VMXNET3_RSS
5780 #endif
5781 #ifdef VMXNET3_RSS
5782 #endif
5783 #ifdef CONFIG_PM
5784 #endif
5785 #ifdef CONFIG_PM
5786 #endif
5787 /* LDV_COMMENT_END_PREP */
5788 ldv_s_vmxnet3_netdev_ops_net_device_ops=0;
5789
5790 }
5791
5792 }
5793
5794 break;
5795 case 2: {
5796
5797 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5798
5799
5800 /* content: static netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)*/
5801 /* LDV_COMMENT_BEGIN_PREP */
5802 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5803 #ifdef __BIG_ENDIAN_BITFIELD
5804 #endif
5805 #ifdef __BIG_ENDIAN_BITFIELD
5806 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5807 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5808 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5809 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5810 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5811 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5812 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5813 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5814 VMXNET3_TCD_GEN_SIZE)
5815 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5816 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5817 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5818 (dstrcd) = (tmp); \
5819 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5820 } while (0)
5821 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5822 (dstrxd) = (tmp); \
5823 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5824 } while (0)
5825 #else
5826 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5827 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5828 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5829 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5830 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5831 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5832 #endif
5833 #ifdef __BIG_ENDIAN_BITFIELD
5834 #endif
5835 #ifdef __BIG_ENDIAN_BITFIELD
5836 #else
5837 #endif
5838 #ifdef __BIG_ENDIAN_BITFIELD
5839 #endif
5840 /* LDV_COMMENT_END_PREP */
5841 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "vmxnet3_netdev_ops" */
5842 ldv_handler_precall();
5843 vmxnet3_xmit_frame( var_group2, var_group1);
5844 /* LDV_COMMENT_BEGIN_PREP */
5845 #ifdef __BIG_ENDIAN_BITFIELD
5846 #endif
5847 #ifdef VMXNET3_RSS
5848 #endif
5849 #ifdef __BIG_ENDIAN_BITFIELD
5850 #endif
5851 #ifdef CONFIG_PCI_MSI
5852 #endif
5853 #ifdef CONFIG_NET_POLL_CONTROLLER
5854 #ifdef CONFIG_PCI_MSI
5855 #endif
5856 #endif
5857 #ifdef CONFIG_PCI_MSI
5858 #endif
5859 #ifdef CONFIG_PCI_MSI
5860 #endif
5861 #ifdef CONFIG_PCI_MSI
5862 #endif
5863 #ifdef VMXNET3_RSS
5864 #endif
5865 #ifdef CONFIG_PCI_MSI
5866 #endif
5867 #ifdef CONFIG_PCI_MSI
5868 #endif
5869 #ifdef CONFIG_NET_POLL_CONTROLLER
5870 #endif
5871 #ifdef VMXNET3_RSS
5872 #endif
5873 #ifdef VMXNET3_RSS
5874 #endif
5875 #ifdef VMXNET3_RSS
5876 #endif
5877 #ifdef VMXNET3_RSS
5878 #endif
5879 #ifdef VMXNET3_RSS
5880 #endif
5881 #ifdef VMXNET3_RSS
5882 #endif
5883 #ifdef CONFIG_PM
5884 #endif
5885 #ifdef CONFIG_PM
5886 #endif
5887 /* LDV_COMMENT_END_PREP */
5888
5889
5890
5891
5892 }
5893
5894 break;
5895 case 3: {
5896
5897 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5898
5899
5900 /* content: static int vmxnet3_set_mac_addr(struct net_device *netdev, void *p)*/
5901 /* LDV_COMMENT_BEGIN_PREP */
5902 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5903 #ifdef __BIG_ENDIAN_BITFIELD
5904 #endif
5905 #ifdef __BIG_ENDIAN_BITFIELD
5906 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5907 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5908 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5909 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5910 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5911 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5912 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5913 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5914 VMXNET3_TCD_GEN_SIZE)
5915 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5916 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5917 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5918 (dstrcd) = (tmp); \
5919 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5920 } while (0)
5921 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5922 (dstrxd) = (tmp); \
5923 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5924 } while (0)
5925 #else
5926 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5927 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5928 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5929 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5930 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5931 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5932 #endif
5933 #ifdef __BIG_ENDIAN_BITFIELD
5934 #endif
5935 #ifdef __BIG_ENDIAN_BITFIELD
5936 #else
5937 #endif
5938 #ifdef __BIG_ENDIAN_BITFIELD
5939 #endif
5940 #ifdef __BIG_ENDIAN_BITFIELD
5941 #endif
5942 #ifdef VMXNET3_RSS
5943 #endif
5944 #ifdef __BIG_ENDIAN_BITFIELD
5945 #endif
5946 #ifdef CONFIG_PCI_MSI
5947 #endif
5948 #ifdef CONFIG_NET_POLL_CONTROLLER
5949 #ifdef CONFIG_PCI_MSI
5950 #endif
5951 #endif
5952 #ifdef CONFIG_PCI_MSI
5953 #endif
5954 #ifdef CONFIG_PCI_MSI
5955 #endif
5956 #ifdef CONFIG_PCI_MSI
5957 #endif
5958 #ifdef VMXNET3_RSS
5959 #endif
5960 /* LDV_COMMENT_END_PREP */
5961 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_mac_address" from driver structure with callbacks "vmxnet3_netdev_ops" */
5962 ldv_handler_precall();
5963 vmxnet3_set_mac_addr( var_group1, var_vmxnet3_set_mac_addr_68_p1);
5964 /* LDV_COMMENT_BEGIN_PREP */
5965 #ifdef CONFIG_PCI_MSI
5966 #endif
5967 #ifdef CONFIG_PCI_MSI
5968 #endif
5969 #ifdef CONFIG_NET_POLL_CONTROLLER
5970 #endif
5971 #ifdef VMXNET3_RSS
5972 #endif
5973 #ifdef VMXNET3_RSS
5974 #endif
5975 #ifdef VMXNET3_RSS
5976 #endif
5977 #ifdef VMXNET3_RSS
5978 #endif
5979 #ifdef VMXNET3_RSS
5980 #endif
5981 #ifdef VMXNET3_RSS
5982 #endif
5983 #ifdef CONFIG_PM
5984 #endif
5985 #ifdef CONFIG_PM
5986 #endif
5987 /* LDV_COMMENT_END_PREP */
5988
5989
5990
5991
5992 }
5993
5994 break;
5995 case 4: {
5996
5997 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5998
5999
6000 /* content: static int vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)*/
6001 /* LDV_COMMENT_BEGIN_PREP */
6002 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6003 #ifdef __BIG_ENDIAN_BITFIELD
6004 #endif
6005 #ifdef __BIG_ENDIAN_BITFIELD
6006 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6007 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6008 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6009 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6010 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6011 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6012 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6013 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6014 VMXNET3_TCD_GEN_SIZE)
6015 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6016 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6017 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6018 (dstrcd) = (tmp); \
6019 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6020 } while (0)
6021 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6022 (dstrxd) = (tmp); \
6023 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6024 } while (0)
6025 #else
6026 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6027 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6028 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6029 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6030 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6031 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6032 #endif
6033 #ifdef __BIG_ENDIAN_BITFIELD
6034 #endif
6035 #ifdef __BIG_ENDIAN_BITFIELD
6036 #else
6037 #endif
6038 #ifdef __BIG_ENDIAN_BITFIELD
6039 #endif
6040 #ifdef __BIG_ENDIAN_BITFIELD
6041 #endif
6042 #ifdef VMXNET3_RSS
6043 #endif
6044 #ifdef __BIG_ENDIAN_BITFIELD
6045 #endif
6046 #ifdef CONFIG_PCI_MSI
6047 #endif
6048 #ifdef CONFIG_NET_POLL_CONTROLLER
6049 #ifdef CONFIG_PCI_MSI
6050 #endif
6051 #endif
6052 #ifdef CONFIG_PCI_MSI
6053 #endif
6054 #ifdef CONFIG_PCI_MSI
6055 #endif
6056 #ifdef CONFIG_PCI_MSI
6057 #endif
6058 #ifdef VMXNET3_RSS
6059 #endif
6060 /* LDV_COMMENT_END_PREP */
6061 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_change_mtu" from driver structure with callbacks "vmxnet3_netdev_ops" */
6062 ldv_handler_precall();
6063 vmxnet3_change_mtu( var_group1, var_vmxnet3_change_mtu_76_p1);
6064 /* LDV_COMMENT_BEGIN_PREP */
6065 #ifdef CONFIG_PCI_MSI
6066 #endif
6067 #ifdef CONFIG_PCI_MSI
6068 #endif
6069 #ifdef CONFIG_NET_POLL_CONTROLLER
6070 #endif
6071 #ifdef VMXNET3_RSS
6072 #endif
6073 #ifdef VMXNET3_RSS
6074 #endif
6075 #ifdef VMXNET3_RSS
6076 #endif
6077 #ifdef VMXNET3_RSS
6078 #endif
6079 #ifdef VMXNET3_RSS
6080 #endif
6081 #ifdef VMXNET3_RSS
6082 #endif
6083 #ifdef CONFIG_PM
6084 #endif
6085 #ifdef CONFIG_PM
6086 #endif
6087 /* LDV_COMMENT_END_PREP */
6088
6089
6090
6091
6092 }
6093
6094 break;
6095 case 5: {
6096
6097 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6098
6099
6100 /* content: static void vmxnet3_tx_timeout(struct net_device *netdev)*/
6101 /* LDV_COMMENT_BEGIN_PREP */
6102 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6103 #ifdef __BIG_ENDIAN_BITFIELD
6104 #endif
6105 #ifdef __BIG_ENDIAN_BITFIELD
6106 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6107 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6108 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6109 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6110 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6111 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6112 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6113 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6114 VMXNET3_TCD_GEN_SIZE)
6115 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6116 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6117 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6118 (dstrcd) = (tmp); \
6119 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6120 } while (0)
6121 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6122 (dstrxd) = (tmp); \
6123 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6124 } while (0)
6125 #else
6126 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6127 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6128 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6129 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6130 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6131 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6132 #endif
6133 #ifdef __BIG_ENDIAN_BITFIELD
6134 #endif
6135 #ifdef __BIG_ENDIAN_BITFIELD
6136 #else
6137 #endif
6138 #ifdef __BIG_ENDIAN_BITFIELD
6139 #endif
6140 #ifdef __BIG_ENDIAN_BITFIELD
6141 #endif
6142 #ifdef VMXNET3_RSS
6143 #endif
6144 #ifdef __BIG_ENDIAN_BITFIELD
6145 #endif
6146 #ifdef CONFIG_PCI_MSI
6147 #endif
6148 #ifdef CONFIG_NET_POLL_CONTROLLER
6149 #ifdef CONFIG_PCI_MSI
6150 #endif
6151 #endif
6152 #ifdef CONFIG_PCI_MSI
6153 #endif
6154 #ifdef CONFIG_PCI_MSI
6155 #endif
6156 #ifdef CONFIG_PCI_MSI
6157 #endif
6158 #ifdef VMXNET3_RSS
6159 #endif
6160 #ifdef CONFIG_PCI_MSI
6161 #endif
6162 #ifdef CONFIG_PCI_MSI
6163 #endif
6164 /* LDV_COMMENT_END_PREP */
6165 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "vmxnet3_netdev_ops" */
6166 ldv_handler_precall();
6167 vmxnet3_tx_timeout( var_group1);
6168 /* LDV_COMMENT_BEGIN_PREP */
6169 #ifdef CONFIG_NET_POLL_CONTROLLER
6170 #endif
6171 #ifdef VMXNET3_RSS
6172 #endif
6173 #ifdef VMXNET3_RSS
6174 #endif
6175 #ifdef VMXNET3_RSS
6176 #endif
6177 #ifdef VMXNET3_RSS
6178 #endif
6179 #ifdef VMXNET3_RSS
6180 #endif
6181 #ifdef VMXNET3_RSS
6182 #endif
6183 #ifdef CONFIG_PM
6184 #endif
6185 #ifdef CONFIG_PM
6186 #endif
6187 /* LDV_COMMENT_END_PREP */
6188
6189
6190
6191
6192 }
6193
6194 break;
6195 case 6: {
6196
6197 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6198
6199
6200 /* content: static void vmxnet3_set_mc(struct net_device *netdev)*/
6201 /* LDV_COMMENT_BEGIN_PREP */
6202 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6203 #ifdef __BIG_ENDIAN_BITFIELD
6204 #endif
6205 #ifdef __BIG_ENDIAN_BITFIELD
6206 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6207 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6208 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6209 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6210 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6211 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6212 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6213 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6214 VMXNET3_TCD_GEN_SIZE)
6215 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6216 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6217 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6218 (dstrcd) = (tmp); \
6219 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6220 } while (0)
6221 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6222 (dstrxd) = (tmp); \
6223 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6224 } while (0)
6225 #else
6226 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6227 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6228 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6229 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6230 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6231 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6232 #endif
6233 #ifdef __BIG_ENDIAN_BITFIELD
6234 #endif
6235 #ifdef __BIG_ENDIAN_BITFIELD
6236 #else
6237 #endif
6238 #ifdef __BIG_ENDIAN_BITFIELD
6239 #endif
6240 #ifdef __BIG_ENDIAN_BITFIELD
6241 #endif
6242 #ifdef VMXNET3_RSS
6243 #endif
6244 #ifdef __BIG_ENDIAN_BITFIELD
6245 #endif
6246 #ifdef CONFIG_PCI_MSI
6247 #endif
6248 #ifdef CONFIG_NET_POLL_CONTROLLER
6249 #ifdef CONFIG_PCI_MSI
6250 #endif
6251 #endif
6252 #ifdef CONFIG_PCI_MSI
6253 #endif
6254 #ifdef CONFIG_PCI_MSI
6255 #endif
6256 #ifdef CONFIG_PCI_MSI
6257 #endif
6258 /* LDV_COMMENT_END_PREP */
6259 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "vmxnet3_netdev_ops" */
6260 ldv_handler_precall();
6261 vmxnet3_set_mc( var_group1);
6262 /* LDV_COMMENT_BEGIN_PREP */
6263 #ifdef VMXNET3_RSS
6264 #endif
6265 #ifdef CONFIG_PCI_MSI
6266 #endif
6267 #ifdef CONFIG_PCI_MSI
6268 #endif
6269 #ifdef CONFIG_NET_POLL_CONTROLLER
6270 #endif
6271 #ifdef VMXNET3_RSS
6272 #endif
6273 #ifdef VMXNET3_RSS
6274 #endif
6275 #ifdef VMXNET3_RSS
6276 #endif
6277 #ifdef VMXNET3_RSS
6278 #endif
6279 #ifdef VMXNET3_RSS
6280 #endif
6281 #ifdef VMXNET3_RSS
6282 #endif
6283 #ifdef CONFIG_PM
6284 #endif
6285 #ifdef CONFIG_PM
6286 #endif
6287 /* LDV_COMMENT_END_PREP */
6288
6289
6290
6291
6292 }
6293
6294 break;
6295 case 7: {
6296
6297 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6298
6299
6300 /* content: static int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
6301 /* LDV_COMMENT_BEGIN_PREP */
6302 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6303 #ifdef __BIG_ENDIAN_BITFIELD
6304 #endif
6305 #ifdef __BIG_ENDIAN_BITFIELD
6306 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6307 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6308 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6309 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6310 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6311 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6312 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6313 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6314 VMXNET3_TCD_GEN_SIZE)
6315 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6316 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6317 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6318 (dstrcd) = (tmp); \
6319 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6320 } while (0)
6321 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6322 (dstrxd) = (tmp); \
6323 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6324 } while (0)
6325 #else
6326 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6327 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6328 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6329 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6330 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6331 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6332 #endif
6333 #ifdef __BIG_ENDIAN_BITFIELD
6334 #endif
6335 #ifdef __BIG_ENDIAN_BITFIELD
6336 #else
6337 #endif
6338 #ifdef __BIG_ENDIAN_BITFIELD
6339 #endif
6340 #ifdef __BIG_ENDIAN_BITFIELD
6341 #endif
6342 #ifdef VMXNET3_RSS
6343 #endif
6344 #ifdef __BIG_ENDIAN_BITFIELD
6345 #endif
6346 #ifdef CONFIG_PCI_MSI
6347 #endif
6348 #ifdef CONFIG_NET_POLL_CONTROLLER
6349 #ifdef CONFIG_PCI_MSI
6350 #endif
6351 #endif
6352 #ifdef CONFIG_PCI_MSI
6353 #endif
6354 #ifdef CONFIG_PCI_MSI
6355 #endif
6356 #ifdef CONFIG_PCI_MSI
6357 #endif
6358 /* LDV_COMMENT_END_PREP */
6359 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_add_vid" from driver structure with callbacks "vmxnet3_netdev_ops" */
6360 ldv_handler_precall();
6361 vmxnet3_vlan_rx_add_vid( var_group1, var_vmxnet3_vlan_rx_add_vid_57_p1, var_vmxnet3_vlan_rx_add_vid_57_p2);
6362 /* LDV_COMMENT_BEGIN_PREP */
6363 #ifdef VMXNET3_RSS
6364 #endif
6365 #ifdef CONFIG_PCI_MSI
6366 #endif
6367 #ifdef CONFIG_PCI_MSI
6368 #endif
6369 #ifdef CONFIG_NET_POLL_CONTROLLER
6370 #endif
6371 #ifdef VMXNET3_RSS
6372 #endif
6373 #ifdef VMXNET3_RSS
6374 #endif
6375 #ifdef VMXNET3_RSS
6376 #endif
6377 #ifdef VMXNET3_RSS
6378 #endif
6379 #ifdef VMXNET3_RSS
6380 #endif
6381 #ifdef VMXNET3_RSS
6382 #endif
6383 #ifdef CONFIG_PM
6384 #endif
6385 #ifdef CONFIG_PM
6386 #endif
6387 /* LDV_COMMENT_END_PREP */
6388
6389
6390
6391
6392 }
6393
6394 break;
6395 case 8: {
6396
6397 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6398
6399
6400 /* content: static int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
6401 /* LDV_COMMENT_BEGIN_PREP */
6402 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6403 #ifdef __BIG_ENDIAN_BITFIELD
6404 #endif
6405 #ifdef __BIG_ENDIAN_BITFIELD
6406 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6407 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6408 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6409 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6410 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6411 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6412 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6413 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6414 VMXNET3_TCD_GEN_SIZE)
6415 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6416 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6417 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6418 (dstrcd) = (tmp); \
6419 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6420 } while (0)
6421 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6422 (dstrxd) = (tmp); \
6423 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6424 } while (0)
6425 #else
6426 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6427 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6428 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6429 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6430 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6431 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6432 #endif
6433 #ifdef __BIG_ENDIAN_BITFIELD
6434 #endif
6435 #ifdef __BIG_ENDIAN_BITFIELD
6436 #else
6437 #endif
6438 #ifdef __BIG_ENDIAN_BITFIELD
6439 #endif
6440 #ifdef __BIG_ENDIAN_BITFIELD
6441 #endif
6442 #ifdef VMXNET3_RSS
6443 #endif
6444 #ifdef __BIG_ENDIAN_BITFIELD
6445 #endif
6446 #ifdef CONFIG_PCI_MSI
6447 #endif
6448 #ifdef CONFIG_NET_POLL_CONTROLLER
6449 #ifdef CONFIG_PCI_MSI
6450 #endif
6451 #endif
6452 #ifdef CONFIG_PCI_MSI
6453 #endif
6454 #ifdef CONFIG_PCI_MSI
6455 #endif
6456 #ifdef CONFIG_PCI_MSI
6457 #endif
6458 /* LDV_COMMENT_END_PREP */
6459 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_kill_vid" from driver structure with callbacks "vmxnet3_netdev_ops" */
6460 ldv_handler_precall();
6461 vmxnet3_vlan_rx_kill_vid( var_group1, var_vmxnet3_vlan_rx_kill_vid_58_p1, var_vmxnet3_vlan_rx_kill_vid_58_p2);
6462 /* LDV_COMMENT_BEGIN_PREP */
6463 #ifdef VMXNET3_RSS
6464 #endif
6465 #ifdef CONFIG_PCI_MSI
6466 #endif
6467 #ifdef CONFIG_PCI_MSI
6468 #endif
6469 #ifdef CONFIG_NET_POLL_CONTROLLER
6470 #endif
6471 #ifdef VMXNET3_RSS
6472 #endif
6473 #ifdef VMXNET3_RSS
6474 #endif
6475 #ifdef VMXNET3_RSS
6476 #endif
6477 #ifdef VMXNET3_RSS
6478 #endif
6479 #ifdef VMXNET3_RSS
6480 #endif
6481 #ifdef VMXNET3_RSS
6482 #endif
6483 #ifdef CONFIG_PM
6484 #endif
6485 #ifdef CONFIG_PM
6486 #endif
6487 /* LDV_COMMENT_END_PREP */
6488
6489
6490
6491
6492 }
6493
6494 break;
6495 case 9: {
6496
6497 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6498
6499
6500 /* content: static void vmxnet3_netpoll(struct net_device *netdev)*/
6501 /* LDV_COMMENT_BEGIN_PREP */
6502 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6503 #ifdef __BIG_ENDIAN_BITFIELD
6504 #endif
6505 #ifdef __BIG_ENDIAN_BITFIELD
6506 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6507 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6508 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6509 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6510 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6511 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6512 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6513 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6514 VMXNET3_TCD_GEN_SIZE)
6515 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6516 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6517 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6518 (dstrcd) = (tmp); \
6519 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6520 } while (0)
6521 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6522 (dstrxd) = (tmp); \
6523 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6524 } while (0)
6525 #else
6526 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6527 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6528 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6529 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6530 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6531 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6532 #endif
6533 #ifdef __BIG_ENDIAN_BITFIELD
6534 #endif
6535 #ifdef __BIG_ENDIAN_BITFIELD
6536 #else
6537 #endif
6538 #ifdef __BIG_ENDIAN_BITFIELD
6539 #endif
6540 #ifdef __BIG_ENDIAN_BITFIELD
6541 #endif
6542 #ifdef VMXNET3_RSS
6543 #endif
6544 #ifdef __BIG_ENDIAN_BITFIELD
6545 #endif
6546 #ifdef CONFIG_PCI_MSI
6547 #endif
6548 #ifdef CONFIG_NET_POLL_CONTROLLER
6549 /* LDV_COMMENT_END_PREP */
6550 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_poll_controller" from driver structure with callbacks "vmxnet3_netdev_ops" */
6551 ldv_handler_precall();
6552 vmxnet3_netpoll( var_group1);
6553 /* LDV_COMMENT_BEGIN_PREP */
6554 #endif
6555 #ifdef CONFIG_PCI_MSI
6556 #endif
6557 #ifdef CONFIG_PCI_MSI
6558 #endif
6559 #ifdef CONFIG_PCI_MSI
6560 #endif
6561 #ifdef VMXNET3_RSS
6562 #endif
6563 #ifdef CONFIG_PCI_MSI
6564 #endif
6565 #ifdef CONFIG_PCI_MSI
6566 #endif
6567 #ifdef CONFIG_NET_POLL_CONTROLLER
6568 #endif
6569 #ifdef VMXNET3_RSS
6570 #endif
6571 #ifdef VMXNET3_RSS
6572 #endif
6573 #ifdef VMXNET3_RSS
6574 #endif
6575 #ifdef VMXNET3_RSS
6576 #endif
6577 #ifdef VMXNET3_RSS
6578 #endif
6579 #ifdef VMXNET3_RSS
6580 #endif
6581 #ifdef CONFIG_PM
6582 #endif
6583 #ifdef CONFIG_PM
6584 #endif
6585 /* LDV_COMMENT_END_PREP */
6586
6587
6588
6589
6590 }
6591
6592 break;
6593 case 10: {
6594
6595 /** STRUCT: struct type: dev_pm_ops, struct name: vmxnet3_pm_ops **/
6596
6597
6598 /* content: static int vmxnet3_suspend(struct device *device)*/
6599 /* LDV_COMMENT_BEGIN_PREP */
6600 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6601 #ifdef __BIG_ENDIAN_BITFIELD
6602 #endif
6603 #ifdef __BIG_ENDIAN_BITFIELD
6604 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6605 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6606 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6607 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6608 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6609 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6610 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6611 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6612 VMXNET3_TCD_GEN_SIZE)
6613 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6614 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6615 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6616 (dstrcd) = (tmp); \
6617 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6618 } while (0)
6619 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6620 (dstrxd) = (tmp); \
6621 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6622 } while (0)
6623 #else
6624 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6625 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6626 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6627 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6628 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6629 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6630 #endif
6631 #ifdef __BIG_ENDIAN_BITFIELD
6632 #endif
6633 #ifdef __BIG_ENDIAN_BITFIELD
6634 #else
6635 #endif
6636 #ifdef __BIG_ENDIAN_BITFIELD
6637 #endif
6638 #ifdef __BIG_ENDIAN_BITFIELD
6639 #endif
6640 #ifdef VMXNET3_RSS
6641 #endif
6642 #ifdef __BIG_ENDIAN_BITFIELD
6643 #endif
6644 #ifdef CONFIG_PCI_MSI
6645 #endif
6646 #ifdef CONFIG_NET_POLL_CONTROLLER
6647 #ifdef CONFIG_PCI_MSI
6648 #endif
6649 #endif
6650 #ifdef CONFIG_PCI_MSI
6651 #endif
6652 #ifdef CONFIG_PCI_MSI
6653 #endif
6654 #ifdef CONFIG_PCI_MSI
6655 #endif
6656 #ifdef VMXNET3_RSS
6657 #endif
6658 #ifdef CONFIG_PCI_MSI
6659 #endif
6660 #ifdef CONFIG_PCI_MSI
6661 #endif
6662 #ifdef CONFIG_NET_POLL_CONTROLLER
6663 #endif
6664 #ifdef VMXNET3_RSS
6665 #endif
6666 #ifdef VMXNET3_RSS
6667 #endif
6668 #ifdef VMXNET3_RSS
6669 #endif
6670 #ifdef VMXNET3_RSS
6671 #endif
6672 #ifdef VMXNET3_RSS
6673 #endif
6674 #ifdef VMXNET3_RSS
6675 #endif
6676 #ifdef CONFIG_PM
6677 /* LDV_COMMENT_END_PREP */
6678 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "vmxnet3_pm_ops" */
6679 ldv_handler_precall();
6680 vmxnet3_suspend( var_group3);
6681 /* LDV_COMMENT_BEGIN_PREP */
6682 #endif
6683 #ifdef CONFIG_PM
6684 #endif
6685 /* LDV_COMMENT_END_PREP */
6686
6687
6688
6689
6690 }
6691
6692 break;
6693 case 11: {
6694
6695 /** STRUCT: struct type: dev_pm_ops, struct name: vmxnet3_pm_ops **/
6696
6697
6698 /* content: static int vmxnet3_resume(struct device *device)*/
6699 /* LDV_COMMENT_BEGIN_PREP */
6700 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6701 #ifdef __BIG_ENDIAN_BITFIELD
6702 #endif
6703 #ifdef __BIG_ENDIAN_BITFIELD
6704 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6705 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6706 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6707 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6708 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6709 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6710 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6711 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6712 VMXNET3_TCD_GEN_SIZE)
6713 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6714 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6715 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6716 (dstrcd) = (tmp); \
6717 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6718 } while (0)
6719 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6720 (dstrxd) = (tmp); \
6721 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6722 } while (0)
6723 #else
6724 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6725 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6726 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6727 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6728 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6729 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6730 #endif
6731 #ifdef __BIG_ENDIAN_BITFIELD
6732 #endif
6733 #ifdef __BIG_ENDIAN_BITFIELD
6734 #else
6735 #endif
6736 #ifdef __BIG_ENDIAN_BITFIELD
6737 #endif
6738 #ifdef __BIG_ENDIAN_BITFIELD
6739 #endif
6740 #ifdef VMXNET3_RSS
6741 #endif
6742 #ifdef __BIG_ENDIAN_BITFIELD
6743 #endif
6744 #ifdef CONFIG_PCI_MSI
6745 #endif
6746 #ifdef CONFIG_NET_POLL_CONTROLLER
6747 #ifdef CONFIG_PCI_MSI
6748 #endif
6749 #endif
6750 #ifdef CONFIG_PCI_MSI
6751 #endif
6752 #ifdef CONFIG_PCI_MSI
6753 #endif
6754 #ifdef CONFIG_PCI_MSI
6755 #endif
6756 #ifdef VMXNET3_RSS
6757 #endif
6758 #ifdef CONFIG_PCI_MSI
6759 #endif
6760 #ifdef CONFIG_PCI_MSI
6761 #endif
6762 #ifdef CONFIG_NET_POLL_CONTROLLER
6763 #endif
6764 #ifdef VMXNET3_RSS
6765 #endif
6766 #ifdef VMXNET3_RSS
6767 #endif
6768 #ifdef VMXNET3_RSS
6769 #endif
6770 #ifdef VMXNET3_RSS
6771 #endif
6772 #ifdef VMXNET3_RSS
6773 #endif
6774 #ifdef VMXNET3_RSS
6775 #endif
6776 #ifdef CONFIG_PM
6777 /* LDV_COMMENT_END_PREP */
6778 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "vmxnet3_pm_ops" */
6779 ldv_handler_precall();
6780 vmxnet3_resume( var_group3);
6781 /* LDV_COMMENT_BEGIN_PREP */
6782 #endif
6783 #ifdef CONFIG_PM
6784 #endif
6785 /* LDV_COMMENT_END_PREP */
6786
6787
6788
6789
6790 }
6791
6792 break;
6793 case 12: {
6794
6795 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
6796 if(ldv_s_vmxnet3_driver_pci_driver==0) {
6797
6798 /* content: static int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id)*/
6799 /* LDV_COMMENT_BEGIN_PREP */
6800 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6801 #ifdef __BIG_ENDIAN_BITFIELD
6802 #endif
6803 #ifdef __BIG_ENDIAN_BITFIELD
6804 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6805 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6806 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6807 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6808 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6809 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6810 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6811 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6812 VMXNET3_TCD_GEN_SIZE)
6813 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6814 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6815 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6816 (dstrcd) = (tmp); \
6817 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6818 } while (0)
6819 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6820 (dstrxd) = (tmp); \
6821 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6822 } while (0)
6823 #else
6824 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6825 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6826 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6827 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6828 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6829 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6830 #endif
6831 #ifdef __BIG_ENDIAN_BITFIELD
6832 #endif
6833 #ifdef __BIG_ENDIAN_BITFIELD
6834 #else
6835 #endif
6836 #ifdef __BIG_ENDIAN_BITFIELD
6837 #endif
6838 #ifdef __BIG_ENDIAN_BITFIELD
6839 #endif
6840 #ifdef VMXNET3_RSS
6841 #endif
6842 #ifdef __BIG_ENDIAN_BITFIELD
6843 #endif
6844 #ifdef CONFIG_PCI_MSI
6845 #endif
6846 #ifdef CONFIG_NET_POLL_CONTROLLER
6847 #ifdef CONFIG_PCI_MSI
6848 #endif
6849 #endif
6850 #ifdef CONFIG_PCI_MSI
6851 #endif
6852 #ifdef CONFIG_PCI_MSI
6853 #endif
6854 #ifdef CONFIG_PCI_MSI
6855 #endif
6856 #ifdef VMXNET3_RSS
6857 #endif
6858 #ifdef CONFIG_PCI_MSI
6859 #endif
6860 #ifdef CONFIG_PCI_MSI
6861 #endif
6862 /* LDV_COMMENT_END_PREP */
6863 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "vmxnet3_driver". Standart function test for correct return result. */
6864 res_vmxnet3_probe_device_84 = vmxnet3_probe_device( var_group4, var_vmxnet3_probe_device_84_p1);
6865 ldv_check_return_value(res_vmxnet3_probe_device_84);
6866 ldv_check_return_value_probe(res_vmxnet3_probe_device_84);
6867 if(res_vmxnet3_probe_device_84)
6868 goto ldv_module_exit;
6869 /* LDV_COMMENT_BEGIN_PREP */
6870 #ifdef VMXNET3_RSS
6871 #endif
6872 #ifdef VMXNET3_RSS
6873 #endif
6874 #ifdef CONFIG_PM
6875 #endif
6876 #ifdef CONFIG_PM
6877 #endif
6878 /* LDV_COMMENT_END_PREP */
6879 ldv_s_vmxnet3_driver_pci_driver++;
6880
6881 }
6882
6883 }
6884
6885 break;
6886 case 13: {
6887
6888 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
6889 if(ldv_s_vmxnet3_driver_pci_driver==1) {
6890
6891 /* content: static void vmxnet3_remove_device(struct pci_dev *pdev)*/
6892 /* LDV_COMMENT_BEGIN_PREP */
6893 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6894 #ifdef __BIG_ENDIAN_BITFIELD
6895 #endif
6896 #ifdef __BIG_ENDIAN_BITFIELD
6897 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6898 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6899 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6900 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6901 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6902 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6903 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6904 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6905 VMXNET3_TCD_GEN_SIZE)
6906 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6907 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6908 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6909 (dstrcd) = (tmp); \
6910 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6911 } while (0)
6912 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6913 (dstrxd) = (tmp); \
6914 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6915 } while (0)
6916 #else
6917 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6918 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6919 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6920 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6921 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6922 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6923 #endif
6924 #ifdef __BIG_ENDIAN_BITFIELD
6925 #endif
6926 #ifdef __BIG_ENDIAN_BITFIELD
6927 #else
6928 #endif
6929 #ifdef __BIG_ENDIAN_BITFIELD
6930 #endif
6931 #ifdef __BIG_ENDIAN_BITFIELD
6932 #endif
6933 #ifdef VMXNET3_RSS
6934 #endif
6935 #ifdef __BIG_ENDIAN_BITFIELD
6936 #endif
6937 #ifdef CONFIG_PCI_MSI
6938 #endif
6939 #ifdef CONFIG_NET_POLL_CONTROLLER
6940 #ifdef CONFIG_PCI_MSI
6941 #endif
6942 #endif
6943 #ifdef CONFIG_PCI_MSI
6944 #endif
6945 #ifdef CONFIG_PCI_MSI
6946 #endif
6947 #ifdef CONFIG_PCI_MSI
6948 #endif
6949 #ifdef VMXNET3_RSS
6950 #endif
6951 #ifdef CONFIG_PCI_MSI
6952 #endif
6953 #ifdef CONFIG_PCI_MSI
6954 #endif
6955 #ifdef CONFIG_NET_POLL_CONTROLLER
6956 #endif
6957 #ifdef VMXNET3_RSS
6958 #endif
6959 #ifdef VMXNET3_RSS
6960 #endif
6961 #ifdef VMXNET3_RSS
6962 #endif
6963 #ifdef VMXNET3_RSS
6964 #endif
6965 /* LDV_COMMENT_END_PREP */
6966 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "vmxnet3_driver" */
6967 ldv_handler_precall();
6968 vmxnet3_remove_device( var_group4);
6969 /* LDV_COMMENT_BEGIN_PREP */
6970 #ifdef CONFIG_PM
6971 #endif
6972 #ifdef CONFIG_PM
6973 #endif
6974 /* LDV_COMMENT_END_PREP */
6975 ldv_s_vmxnet3_driver_pci_driver=0;
6976
6977 }
6978
6979 }
6980
6981 break;
6982 case 14: {
6983
6984 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
6985
6986
6987 /* content: static void vmxnet3_shutdown_device(struct pci_dev *pdev)*/
6988 /* LDV_COMMENT_BEGIN_PREP */
6989 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6990 #ifdef __BIG_ENDIAN_BITFIELD
6991 #endif
6992 #ifdef __BIG_ENDIAN_BITFIELD
6993 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6994 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6995 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6996 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6997 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6998 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6999 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7000 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7001 VMXNET3_TCD_GEN_SIZE)
7002 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7003 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7004 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7005 (dstrcd) = (tmp); \
7006 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7007 } while (0)
7008 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7009 (dstrxd) = (tmp); \
7010 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7011 } while (0)
7012 #else
7013 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7014 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7015 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7016 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7017 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7018 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7019 #endif
7020 #ifdef __BIG_ENDIAN_BITFIELD
7021 #endif
7022 #ifdef __BIG_ENDIAN_BITFIELD
7023 #else
7024 #endif
7025 #ifdef __BIG_ENDIAN_BITFIELD
7026 #endif
7027 #ifdef __BIG_ENDIAN_BITFIELD
7028 #endif
7029 #ifdef VMXNET3_RSS
7030 #endif
7031 #ifdef __BIG_ENDIAN_BITFIELD
7032 #endif
7033 #ifdef CONFIG_PCI_MSI
7034 #endif
7035 #ifdef CONFIG_NET_POLL_CONTROLLER
7036 #ifdef CONFIG_PCI_MSI
7037 #endif
7038 #endif
7039 #ifdef CONFIG_PCI_MSI
7040 #endif
7041 #ifdef CONFIG_PCI_MSI
7042 #endif
7043 #ifdef CONFIG_PCI_MSI
7044 #endif
7045 #ifdef VMXNET3_RSS
7046 #endif
7047 #ifdef CONFIG_PCI_MSI
7048 #endif
7049 #ifdef CONFIG_PCI_MSI
7050 #endif
7051 #ifdef CONFIG_NET_POLL_CONTROLLER
7052 #endif
7053 #ifdef VMXNET3_RSS
7054 #endif
7055 #ifdef VMXNET3_RSS
7056 #endif
7057 #ifdef VMXNET3_RSS
7058 #endif
7059 #ifdef VMXNET3_RSS
7060 #endif
7061 #ifdef VMXNET3_RSS
7062 #endif
7063 #ifdef VMXNET3_RSS
7064 #endif
7065 /* LDV_COMMENT_END_PREP */
7066 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "vmxnet3_driver" */
7067 ldv_handler_precall();
7068 vmxnet3_shutdown_device( var_group4);
7069 /* LDV_COMMENT_BEGIN_PREP */
7070 #ifdef CONFIG_PM
7071 #endif
7072 #ifdef CONFIG_PM
7073 #endif
7074 /* LDV_COMMENT_END_PREP */
7075
7076
7077
7078
7079 }
7080
7081 break;
7082 case 15: {
7083
7084 /** CALLBACK SECTION request_irq **/
7085 LDV_IN_INTERRUPT=2;
7086
7087 /* content: static irqreturn_t vmxnet3_intr(int irq, void *dev_id)*/
7088 /* LDV_COMMENT_BEGIN_PREP */
7089 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7090 #ifdef __BIG_ENDIAN_BITFIELD
7091 #endif
7092 #ifdef __BIG_ENDIAN_BITFIELD
7093 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7094 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7095 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7096 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7097 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7098 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7099 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7100 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7101 VMXNET3_TCD_GEN_SIZE)
7102 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7103 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7104 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7105 (dstrcd) = (tmp); \
7106 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7107 } while (0)
7108 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7109 (dstrxd) = (tmp); \
7110 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7111 } while (0)
7112 #else
7113 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7114 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7115 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7116 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7117 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7118 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7119 #endif
7120 #ifdef __BIG_ENDIAN_BITFIELD
7121 #endif
7122 #ifdef __BIG_ENDIAN_BITFIELD
7123 #else
7124 #endif
7125 #ifdef __BIG_ENDIAN_BITFIELD
7126 #endif
7127 #ifdef __BIG_ENDIAN_BITFIELD
7128 #endif
7129 #ifdef VMXNET3_RSS
7130 #endif
7131 #ifdef __BIG_ENDIAN_BITFIELD
7132 #endif
7133 #ifdef CONFIG_PCI_MSI
7134 #endif
7135 /* LDV_COMMENT_END_PREP */
7136 /* LDV_COMMENT_FUNCTION_CALL */
7137 ldv_handler_precall();
7138 vmxnet3_intr( var_vmxnet3_intr_52_p0, var_vmxnet3_intr_52_p1);
7139 /* LDV_COMMENT_BEGIN_PREP */
7140 #ifdef CONFIG_NET_POLL_CONTROLLER
7141 #ifdef CONFIG_PCI_MSI
7142 #endif
7143 #endif
7144 #ifdef CONFIG_PCI_MSI
7145 #endif
7146 #ifdef CONFIG_PCI_MSI
7147 #endif
7148 #ifdef CONFIG_PCI_MSI
7149 #endif
7150 #ifdef VMXNET3_RSS
7151 #endif
7152 #ifdef CONFIG_PCI_MSI
7153 #endif
7154 #ifdef CONFIG_PCI_MSI
7155 #endif
7156 #ifdef CONFIG_NET_POLL_CONTROLLER
7157 #endif
7158 #ifdef VMXNET3_RSS
7159 #endif
7160 #ifdef VMXNET3_RSS
7161 #endif
7162 #ifdef VMXNET3_RSS
7163 #endif
7164 #ifdef VMXNET3_RSS
7165 #endif
7166 #ifdef VMXNET3_RSS
7167 #endif
7168 #ifdef VMXNET3_RSS
7169 #endif
7170 #ifdef CONFIG_PM
7171 #endif
7172 #ifdef CONFIG_PM
7173 #endif
7174 /* LDV_COMMENT_END_PREP */
7175 LDV_IN_INTERRUPT=1;
7176
7177
7178
7179 }
7180
7181 break;
7182 case 16: {
7183
7184 /** CALLBACK SECTION request_irq **/
7185 LDV_IN_INTERRUPT=2;
7186
7187 /* content: static irqreturn_t vmxnet3_msix_event(int irq, void *data)*/
7188 /* LDV_COMMENT_BEGIN_PREP */
7189 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7190 #ifdef __BIG_ENDIAN_BITFIELD
7191 #endif
7192 #ifdef __BIG_ENDIAN_BITFIELD
7193 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7194 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7195 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7196 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7197 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7198 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7199 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7200 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7201 VMXNET3_TCD_GEN_SIZE)
7202 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7203 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7204 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7205 (dstrcd) = (tmp); \
7206 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7207 } while (0)
7208 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7209 (dstrxd) = (tmp); \
7210 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7211 } while (0)
7212 #else
7213 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7214 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7215 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7216 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7217 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7218 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7219 #endif
7220 #ifdef __BIG_ENDIAN_BITFIELD
7221 #endif
7222 #ifdef __BIG_ENDIAN_BITFIELD
7223 #else
7224 #endif
7225 #ifdef __BIG_ENDIAN_BITFIELD
7226 #endif
7227 #ifdef __BIG_ENDIAN_BITFIELD
7228 #endif
7229 #ifdef VMXNET3_RSS
7230 #endif
7231 #ifdef __BIG_ENDIAN_BITFIELD
7232 #endif
7233 #ifdef CONFIG_PCI_MSI
7234 /* LDV_COMMENT_END_PREP */
7235 /* LDV_COMMENT_FUNCTION_CALL */
7236 ldv_handler_precall();
7237 vmxnet3_msix_event( var_vmxnet3_msix_event_51_p0, var_vmxnet3_msix_event_51_p1);
7238 /* LDV_COMMENT_BEGIN_PREP */
7239 #endif
7240 #ifdef CONFIG_NET_POLL_CONTROLLER
7241 #ifdef CONFIG_PCI_MSI
7242 #endif
7243 #endif
7244 #ifdef CONFIG_PCI_MSI
7245 #endif
7246 #ifdef CONFIG_PCI_MSI
7247 #endif
7248 #ifdef CONFIG_PCI_MSI
7249 #endif
7250 #ifdef VMXNET3_RSS
7251 #endif
7252 #ifdef CONFIG_PCI_MSI
7253 #endif
7254 #ifdef CONFIG_PCI_MSI
7255 #endif
7256 #ifdef CONFIG_NET_POLL_CONTROLLER
7257 #endif
7258 #ifdef VMXNET3_RSS
7259 #endif
7260 #ifdef VMXNET3_RSS
7261 #endif
7262 #ifdef VMXNET3_RSS
7263 #endif
7264 #ifdef VMXNET3_RSS
7265 #endif
7266 #ifdef VMXNET3_RSS
7267 #endif
7268 #ifdef VMXNET3_RSS
7269 #endif
7270 #ifdef CONFIG_PM
7271 #endif
7272 #ifdef CONFIG_PM
7273 #endif
7274 /* LDV_COMMENT_END_PREP */
7275 LDV_IN_INTERRUPT=1;
7276
7277
7278
7279 }
7280
7281 break;
7282 case 17: {
7283
7284 /** CALLBACK SECTION request_irq **/
7285 LDV_IN_INTERRUPT=2;
7286
7287 /* content: static irqreturn_t vmxnet3_msix_rx(int irq, void *data)*/
7288 /* LDV_COMMENT_BEGIN_PREP */
7289 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7290 #ifdef __BIG_ENDIAN_BITFIELD
7291 #endif
7292 #ifdef __BIG_ENDIAN_BITFIELD
7293 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7294 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7295 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7296 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7297 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7298 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7299 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7300 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7301 VMXNET3_TCD_GEN_SIZE)
7302 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7303 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7304 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7305 (dstrcd) = (tmp); \
7306 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7307 } while (0)
7308 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7309 (dstrxd) = (tmp); \
7310 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7311 } while (0)
7312 #else
7313 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7314 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7315 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7316 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7317 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7318 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7319 #endif
7320 #ifdef __BIG_ENDIAN_BITFIELD
7321 #endif
7322 #ifdef __BIG_ENDIAN_BITFIELD
7323 #else
7324 #endif
7325 #ifdef __BIG_ENDIAN_BITFIELD
7326 #endif
7327 #ifdef __BIG_ENDIAN_BITFIELD
7328 #endif
7329 #ifdef VMXNET3_RSS
7330 #endif
7331 #ifdef __BIG_ENDIAN_BITFIELD
7332 #endif
7333 #ifdef CONFIG_PCI_MSI
7334 /* LDV_COMMENT_END_PREP */
7335 /* LDV_COMMENT_FUNCTION_CALL */
7336 ldv_handler_precall();
7337 vmxnet3_msix_rx( var_vmxnet3_msix_rx_50_p0, var_vmxnet3_msix_rx_50_p1);
7338 /* LDV_COMMENT_BEGIN_PREP */
7339 #endif
7340 #ifdef CONFIG_NET_POLL_CONTROLLER
7341 #ifdef CONFIG_PCI_MSI
7342 #endif
7343 #endif
7344 #ifdef CONFIG_PCI_MSI
7345 #endif
7346 #ifdef CONFIG_PCI_MSI
7347 #endif
7348 #ifdef CONFIG_PCI_MSI
7349 #endif
7350 #ifdef VMXNET3_RSS
7351 #endif
7352 #ifdef CONFIG_PCI_MSI
7353 #endif
7354 #ifdef CONFIG_PCI_MSI
7355 #endif
7356 #ifdef CONFIG_NET_POLL_CONTROLLER
7357 #endif
7358 #ifdef VMXNET3_RSS
7359 #endif
7360 #ifdef VMXNET3_RSS
7361 #endif
7362 #ifdef VMXNET3_RSS
7363 #endif
7364 #ifdef VMXNET3_RSS
7365 #endif
7366 #ifdef VMXNET3_RSS
7367 #endif
7368 #ifdef VMXNET3_RSS
7369 #endif
7370 #ifdef CONFIG_PM
7371 #endif
7372 #ifdef CONFIG_PM
7373 #endif
7374 /* LDV_COMMENT_END_PREP */
7375 LDV_IN_INTERRUPT=1;
7376
7377
7378
7379 }
7380
7381 break;
7382 case 18: {
7383
7384 /** CALLBACK SECTION request_irq **/
7385 LDV_IN_INTERRUPT=2;
7386
7387 /* content: static irqreturn_t vmxnet3_msix_tx(int irq, void *data)*/
7388 /* LDV_COMMENT_BEGIN_PREP */
7389 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7390 #ifdef __BIG_ENDIAN_BITFIELD
7391 #endif
7392 #ifdef __BIG_ENDIAN_BITFIELD
7393 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7394 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7395 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7396 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7397 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7398 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7399 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7400 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7401 VMXNET3_TCD_GEN_SIZE)
7402 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7403 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7404 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7405 (dstrcd) = (tmp); \
7406 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7407 } while (0)
7408 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7409 (dstrxd) = (tmp); \
7410 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7411 } while (0)
7412 #else
7413 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7414 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7415 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7416 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7417 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7418 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7419 #endif
7420 #ifdef __BIG_ENDIAN_BITFIELD
7421 #endif
7422 #ifdef __BIG_ENDIAN_BITFIELD
7423 #else
7424 #endif
7425 #ifdef __BIG_ENDIAN_BITFIELD
7426 #endif
7427 #ifdef __BIG_ENDIAN_BITFIELD
7428 #endif
7429 #ifdef VMXNET3_RSS
7430 #endif
7431 #ifdef __BIG_ENDIAN_BITFIELD
7432 #endif
7433 #ifdef CONFIG_PCI_MSI
7434 /* LDV_COMMENT_END_PREP */
7435 /* LDV_COMMENT_FUNCTION_CALL */
7436 ldv_handler_precall();
7437 vmxnet3_msix_tx( var_vmxnet3_msix_tx_49_p0, var_vmxnet3_msix_tx_49_p1);
7438 /* LDV_COMMENT_BEGIN_PREP */
7439 #endif
7440 #ifdef CONFIG_NET_POLL_CONTROLLER
7441 #ifdef CONFIG_PCI_MSI
7442 #endif
7443 #endif
7444 #ifdef CONFIG_PCI_MSI
7445 #endif
7446 #ifdef CONFIG_PCI_MSI
7447 #endif
7448 #ifdef CONFIG_PCI_MSI
7449 #endif
7450 #ifdef VMXNET3_RSS
7451 #endif
7452 #ifdef CONFIG_PCI_MSI
7453 #endif
7454 #ifdef CONFIG_PCI_MSI
7455 #endif
7456 #ifdef CONFIG_NET_POLL_CONTROLLER
7457 #endif
7458 #ifdef VMXNET3_RSS
7459 #endif
7460 #ifdef VMXNET3_RSS
7461 #endif
7462 #ifdef VMXNET3_RSS
7463 #endif
7464 #ifdef VMXNET3_RSS
7465 #endif
7466 #ifdef VMXNET3_RSS
7467 #endif
7468 #ifdef VMXNET3_RSS
7469 #endif
7470 #ifdef CONFIG_PM
7471 #endif
7472 #ifdef CONFIG_PM
7473 #endif
7474 /* LDV_COMMENT_END_PREP */
7475 LDV_IN_INTERRUPT=1;
7476
7477
7478
7479 }
7480
7481 break;
7482 default: break;
7483
7484 }
7485
7486 }
7487
7488 ldv_module_exit:
7489
7490 /** INIT: init_type: ST_MODULE_EXIT **/
7491 /* content: static void vmxnet3_exit_module(void)*/
7492 /* LDV_COMMENT_BEGIN_PREP */
7493 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7494 #ifdef __BIG_ENDIAN_BITFIELD
7495 #endif
7496 #ifdef __BIG_ENDIAN_BITFIELD
7497 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7498 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7499 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7500 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7501 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7502 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7503 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7504 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7505 VMXNET3_TCD_GEN_SIZE)
7506 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7507 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7508 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7509 (dstrcd) = (tmp); \
7510 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7511 } while (0)
7512 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7513 (dstrxd) = (tmp); \
7514 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7515 } while (0)
7516 #else
7517 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7518 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7519 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7520 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7521 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7522 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7523 #endif
7524 #ifdef __BIG_ENDIAN_BITFIELD
7525 #endif
7526 #ifdef __BIG_ENDIAN_BITFIELD
7527 #else
7528 #endif
7529 #ifdef __BIG_ENDIAN_BITFIELD
7530 #endif
7531 #ifdef __BIG_ENDIAN_BITFIELD
7532 #endif
7533 #ifdef VMXNET3_RSS
7534 #endif
7535 #ifdef __BIG_ENDIAN_BITFIELD
7536 #endif
7537 #ifdef CONFIG_PCI_MSI
7538 #endif
7539 #ifdef CONFIG_NET_POLL_CONTROLLER
7540 #ifdef CONFIG_PCI_MSI
7541 #endif
7542 #endif
7543 #ifdef CONFIG_PCI_MSI
7544 #endif
7545 #ifdef CONFIG_PCI_MSI
7546 #endif
7547 #ifdef CONFIG_PCI_MSI
7548 #endif
7549 #ifdef VMXNET3_RSS
7550 #endif
7551 #ifdef CONFIG_PCI_MSI
7552 #endif
7553 #ifdef CONFIG_PCI_MSI
7554 #endif
7555 #ifdef CONFIG_NET_POLL_CONTROLLER
7556 #endif
7557 #ifdef VMXNET3_RSS
7558 #endif
7559 #ifdef VMXNET3_RSS
7560 #endif
7561 #ifdef VMXNET3_RSS
7562 #endif
7563 #ifdef VMXNET3_RSS
7564 #endif
7565 #ifdef VMXNET3_RSS
7566 #endif
7567 #ifdef VMXNET3_RSS
7568 #endif
7569 #ifdef CONFIG_PM
7570 #endif
7571 #ifdef CONFIG_PM
7572 #endif
7573 /* LDV_COMMENT_END_PREP */
7574 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
7575 ldv_handler_precall();
7576 vmxnet3_exit_module();
7577
7578 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
7579 ldv_final: ldv_check_final_state();
7580
7581 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
7582 return;
7583
7584 }
7585 #endif
7586
7587 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 extern void ldv_dma_map_page(void);
9 extern void ldv_dma_mapping_error(void);
10 #line 1 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11688/dscv_tempdir/dscv/ri/331_1a/drivers/net/vmxnet3/vmxnet3_drv.c"
11
12 /*
13 * Linux driver for VMware's vmxnet3 ethernet NIC.
14 *
15 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or modify it
18 * under the terms of the GNU General Public License as published by the
19 * Free Software Foundation; version 2 of the License and no later version.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
24 * NON INFRINGEMENT. See the GNU General Public License for more
25 * details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
30 *
31 * The full GNU General Public License is included in this distribution in
32 * the file called "COPYING".
33 *
34 * Maintained by: pv-drivers@vmware.com
35 *
36 */
37
38 #include <linux/module.h>
39 #include <net/ip6_checksum.h>
40
41 #include "vmxnet3_int.h"
42
43 char vmxnet3_driver_name[] = "vmxnet3";
44 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
45
46 /*
47 * PCI Device ID Table
48 * Last entry must be all 0s
49 */
50 static const struct pci_device_id vmxnet3_pciid_table[] = {
51 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
52 {0}
53 };
54
55 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
56
57 static int enable_mq = 1;
58
59 static void
60 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
61
62 /*
63 * Enable/Disable the given intr
64 */
65 static void
66 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
67 {
68 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
69 }
70
71
72 static void
73 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
74 {
75 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
76 }
77
78
79 /*
80 * Enable/Disable all intrs used by the device
81 */
82 static void
83 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
84 {
85 int i;
86
87 for (i = 0; i < adapter->intr.num_intrs; i++)
88 vmxnet3_enable_intr(adapter, i);
89 adapter->shared->devRead.intrConf.intrCtrl &=
90 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
91 }
92
93
94 static void
95 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
96 {
97 int i;
98
99 adapter->shared->devRead.intrConf.intrCtrl |=
100 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
101 for (i = 0; i < adapter->intr.num_intrs; i++)
102 vmxnet3_disable_intr(adapter, i);
103 }
104
105
106 static void
107 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
108 {
109 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
110 }
111
112
113 static bool
114 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
115 {
116 return tq->stopped;
117 }
118
119
120 static void
121 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
122 {
123 tq->stopped = false;
124 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
125 }
126
127
128 static void
129 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
130 {
131 tq->stopped = false;
132 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
133 }
134
135
136 static void
137 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
138 {
139 tq->stopped = true;
140 tq->num_stop++;
141 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
142 }
143
144
145 /*
146 * Check the link state. This may start or stop the tx queue.
147 */
148 static void
149 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
150 {
151 u32 ret;
152 int i;
153 unsigned long flags;
154
155 spin_lock_irqsave(&adapter->cmd_lock, flags);
156 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
157 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
158 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
159
160 adapter->link_speed = ret >> 16;
161 if (ret & 1) { /* Link is up. */
162 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
163 adapter->link_speed);
164 netif_carrier_on(adapter->netdev);
165
166 if (affectTxQueue) {
167 for (i = 0; i < adapter->num_tx_queues; i++)
168 vmxnet3_tq_start(&adapter->tx_queue[i],
169 adapter);
170 }
171 } else {
172 netdev_info(adapter->netdev, "NIC Link is Down\n");
173 netif_carrier_off(adapter->netdev);
174
175 if (affectTxQueue) {
176 for (i = 0; i < adapter->num_tx_queues; i++)
177 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
178 }
179 }
180 }
181
182 static void
183 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
184 {
185 int i;
186 unsigned long flags;
187 u32 events = le32_to_cpu(adapter->shared->ecr);
188 if (!events)
189 return;
190
191 vmxnet3_ack_events(adapter, events);
192
193 /* Check if link state has changed */
194 if (events & VMXNET3_ECR_LINK)
195 vmxnet3_check_link(adapter, true);
196
197 /* Check if there is an error on xmit/recv queues */
198 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
199 spin_lock_irqsave(&adapter->cmd_lock, flags);
200 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
201 VMXNET3_CMD_GET_QUEUE_STATUS);
202 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
203
204 for (i = 0; i < adapter->num_tx_queues; i++)
205 if (adapter->tqd_start[i].status.stopped)
206 dev_err(&adapter->netdev->dev,
207 "%s: tq[%d] error 0x%x\n",
208 adapter->netdev->name, i, le32_to_cpu(
209 adapter->tqd_start[i].status.error));
210 for (i = 0; i < adapter->num_rx_queues; i++)
211 if (adapter->rqd_start[i].status.stopped)
212 dev_err(&adapter->netdev->dev,
213 "%s: rq[%d] error 0x%x\n",
214 adapter->netdev->name, i,
215 adapter->rqd_start[i].status.error);
216
217 schedule_work(&adapter->work);
218 }
219 }
220
221 #ifdef __BIG_ENDIAN_BITFIELD
222 /*
223 * The device expects the bitfields in shared structures to be written in
224 * little endian. When CPU is big endian, the following routines are used to
225 * correctly read and write into ABI.
226 * The general technique used here is : double word bitfields are defined in
227 * opposite order for big endian architecture. Then before reading them in
228 * driver the complete double word is translated using le32_to_cpu. Similarly
229 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
230 * double words into required format.
231 * In order to avoid touching bits in shared structure more than once, temporary
232 * descriptors are used. These are passed as srcDesc to following functions.
233 */
234 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
235 struct Vmxnet3_RxDesc *dstDesc)
236 {
237 u32 *src = (u32 *)srcDesc + 2;
238 u32 *dst = (u32 *)dstDesc + 2;
239 dstDesc->addr = le64_to_cpu(srcDesc->addr);
240 *dst = le32_to_cpu(*src);
241 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
242 }
243
244 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
245 struct Vmxnet3_TxDesc *dstDesc)
246 {
247 int i;
248 u32 *src = (u32 *)(srcDesc + 1);
249 u32 *dst = (u32 *)(dstDesc + 1);
250
251 /* Working backwards so that the gen bit is set at the end. */
252 for (i = 2; i > 0; i--) {
253 src--;
254 dst--;
255 *dst = cpu_to_le32(*src);
256 }
257 }
258
259
260 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
261 struct Vmxnet3_RxCompDesc *dstDesc)
262 {
263 int i = 0;
264 u32 *src = (u32 *)srcDesc;
265 u32 *dst = (u32 *)dstDesc;
266 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
267 *dst = le32_to_cpu(*src);
268 src++;
269 dst++;
270 }
271 }
272
273
274 /* Used to read bitfield values from double words. */
275 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
276 {
277 u32 temp = le32_to_cpu(*bitfield);
278 u32 mask = ((1 << size) - 1) << pos;
279 temp &= mask;
280 temp >>= pos;
281 return temp;
282 }
283
284
285
286 #endif /* __BIG_ENDIAN_BITFIELD */
287
288 #ifdef __BIG_ENDIAN_BITFIELD
289
290 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
291 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
292 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
293 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
294 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
295 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
296 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
297 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
298 VMXNET3_TCD_GEN_SIZE)
299 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
300 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
301 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
302 (dstrcd) = (tmp); \
303 vmxnet3_RxCompToCPU((rcd), (tmp)); \
304 } while (0)
305 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
306 (dstrxd) = (tmp); \
307 vmxnet3_RxDescToCPU((rxd), (tmp)); \
308 } while (0)
309
310 #else
311
312 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
313 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
314 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
315 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
316 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
317 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
318
319 #endif /* __BIG_ENDIAN_BITFIELD */
320
321
322 static void
323 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
324 struct pci_dev *pdev)
325 {
326 if (tbi->map_type == VMXNET3_MAP_SINGLE)
327 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
328 PCI_DMA_TODEVICE);
329 else if (tbi->map_type == VMXNET3_MAP_PAGE)
330 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
331 PCI_DMA_TODEVICE);
332 else
333 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
334
335 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
336 }
337
338
339 static int
340 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
341 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
342 {
343 struct sk_buff *skb;
344 int entries = 0;
345
346 /* no out of order completion */
347 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
348 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
349
350 skb = tq->buf_info[eop_idx].skb;
351 BUG_ON(skb == NULL);
352 tq->buf_info[eop_idx].skb = NULL;
353
354 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
355
356 while (tq->tx_ring.next2comp != eop_idx) {
357 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
358 pdev);
359
360 /* update next2comp w/o tx_lock. Since we are marking more,
361 * instead of less, tx ring entries avail, the worst case is
362 * that the tx routine incorrectly re-queues a pkt due to
363 * insufficient tx ring entries.
364 */
365 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
366 entries++;
367 }
368
369 dev_kfree_skb_any(skb);
370 return entries;
371 }
372
373
374 static int
375 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
376 struct vmxnet3_adapter *adapter)
377 {
378 int completed = 0;
379 union Vmxnet3_GenericDesc *gdesc;
380
381 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
382 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
383 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
384 &gdesc->tcd), tq, adapter->pdev,
385 adapter);
386
387 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
388 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
389 }
390
391 if (completed) {
392 spin_lock(&tq->tx_lock);
393 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
394 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
395 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
396 netif_carrier_ok(adapter->netdev))) {
397 vmxnet3_tq_wake(tq, adapter);
398 }
399 spin_unlock(&tq->tx_lock);
400 }
401 return completed;
402 }
403
404
405 static void
406 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
407 struct vmxnet3_adapter *adapter)
408 {
409 int i;
410
411 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
412 struct vmxnet3_tx_buf_info *tbi;
413
414 tbi = tq->buf_info + tq->tx_ring.next2comp;
415
416 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
417 if (tbi->skb) {
418 dev_kfree_skb_any(tbi->skb);
419 tbi->skb = NULL;
420 }
421 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
422 }
423
424 /* sanity check, verify all buffers are indeed unmapped and freed */
425 for (i = 0; i < tq->tx_ring.size; i++) {
426 BUG_ON(tq->buf_info[i].skb != NULL ||
427 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
428 }
429
430 tq->tx_ring.gen = VMXNET3_INIT_GEN;
431 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
432
433 tq->comp_ring.gen = VMXNET3_INIT_GEN;
434 tq->comp_ring.next2proc = 0;
435 }
436
437
438 static void
439 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
440 struct vmxnet3_adapter *adapter)
441 {
442 if (tq->tx_ring.base) {
443 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
444 sizeof(struct Vmxnet3_TxDesc),
445 tq->tx_ring.base, tq->tx_ring.basePA);
446 tq->tx_ring.base = NULL;
447 }
448 if (tq->data_ring.base) {
449 dma_free_coherent(&adapter->pdev->dev,
450 tq->data_ring.size * tq->txdata_desc_size,
451 tq->data_ring.base, tq->data_ring.basePA);
452 tq->data_ring.base = NULL;
453 }
454 if (tq->comp_ring.base) {
455 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
456 sizeof(struct Vmxnet3_TxCompDesc),
457 tq->comp_ring.base, tq->comp_ring.basePA);
458 tq->comp_ring.base = NULL;
459 }
460 if (tq->buf_info) {
461 dma_free_coherent(&adapter->pdev->dev,
462 tq->tx_ring.size * sizeof(tq->buf_info[0]),
463 tq->buf_info, tq->buf_info_pa);
464 tq->buf_info = NULL;
465 }
466 }
467
468
469 /* Destroy all tx queues */
470 void
471 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
472 {
473 int i;
474
475 for (i = 0; i < adapter->num_tx_queues; i++)
476 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
477 }
478
479
480 static void
481 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
482 struct vmxnet3_adapter *adapter)
483 {
484 int i;
485
486 /* reset the tx ring contents to 0 and reset the tx ring states */
487 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
488 sizeof(struct Vmxnet3_TxDesc));
489 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
490 tq->tx_ring.gen = VMXNET3_INIT_GEN;
491
492 memset(tq->data_ring.base, 0,
493 tq->data_ring.size * tq->txdata_desc_size);
494
495 /* reset the tx comp ring contents to 0 and reset comp ring states */
496 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
497 sizeof(struct Vmxnet3_TxCompDesc));
498 tq->comp_ring.next2proc = 0;
499 tq->comp_ring.gen = VMXNET3_INIT_GEN;
500
501 /* reset the bookkeeping data */
502 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
503 for (i = 0; i < tq->tx_ring.size; i++)
504 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
505
506 /* stats are not reset */
507 }
508
509
510 static int
511 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
512 struct vmxnet3_adapter *adapter)
513 {
514 size_t sz;
515
516 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
517 tq->comp_ring.base || tq->buf_info);
518
519 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
520 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
521 &tq->tx_ring.basePA, GFP_KERNEL);
522 if (!tq->tx_ring.base) {
523 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
524 goto err;
525 }
526
527 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
528 tq->data_ring.size * tq->txdata_desc_size,
529 &tq->data_ring.basePA, GFP_KERNEL);
530 if (!tq->data_ring.base) {
531 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
532 goto err;
533 }
534
535 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
536 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
537 &tq->comp_ring.basePA, GFP_KERNEL);
538 if (!tq->comp_ring.base) {
539 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
540 goto err;
541 }
542
543 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
544 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
545 &tq->buf_info_pa, GFP_KERNEL);
546 if (!tq->buf_info)
547 goto err;
548
549 return 0;
550
551 err:
552 vmxnet3_tq_destroy(tq, adapter);
553 return -ENOMEM;
554 }
555
556 static void
557 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
558 {
559 int i;
560
561 for (i = 0; i < adapter->num_tx_queues; i++)
562 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
563 }
564
565 /*
566 * starting from ring->next2fill, allocate rx buffers for the given ring
567 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
568 * are allocated or allocation fails
569 */
570
571 static int
572 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
573 int num_to_alloc, struct vmxnet3_adapter *adapter)
574 {
575 int num_allocated = 0;
576 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
577 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
578 u32 val;
579
580 while (num_allocated <= num_to_alloc) {
581 struct vmxnet3_rx_buf_info *rbi;
582 union Vmxnet3_GenericDesc *gd;
583
584 rbi = rbi_base + ring->next2fill;
585 gd = ring->base + ring->next2fill;
586
587 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
588 if (rbi->skb == NULL) {
589 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
590 rbi->len,
591 GFP_KERNEL);
592 if (unlikely(rbi->skb == NULL)) {
593 rq->stats.rx_buf_alloc_failure++;
594 break;
595 }
596
597 rbi->dma_addr = dma_map_single(
598 &adapter->pdev->dev,
599 rbi->skb->data, rbi->len,
600 PCI_DMA_FROMDEVICE);
601 if (dma_mapping_error(&adapter->pdev->dev,
602 rbi->dma_addr)) {
603 dev_kfree_skb_any(rbi->skb);
604 rq->stats.rx_buf_alloc_failure++;
605 break;
606 }
607 } else {
608 /* rx buffer skipped by the device */
609 }
610 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
611 } else {
612 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
613 rbi->len != PAGE_SIZE);
614
615 if (rbi->page == NULL) {
616 rbi->page = alloc_page(GFP_ATOMIC);
617 if (unlikely(rbi->page == NULL)) {
618 rq->stats.rx_buf_alloc_failure++;
619 break;
620 }
621 rbi->dma_addr = dma_map_page(
622 &adapter->pdev->dev,
623 rbi->page, 0, PAGE_SIZE,
624 PCI_DMA_FROMDEVICE);
625 if (dma_mapping_error(&adapter->pdev->dev,
626 rbi->dma_addr)) {
627 put_page(rbi->page);
628 rq->stats.rx_buf_alloc_failure++;
629 break;
630 }
631 } else {
632 /* rx buffers skipped by the device */
633 }
634 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
635 }
636
637 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
638 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
639 | val | rbi->len);
640
641 /* Fill the last buffer but dont mark it ready, or else the
642 * device will think that the queue is full */
643 if (num_allocated == num_to_alloc)
644 break;
645
646 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
647 num_allocated++;
648 vmxnet3_cmd_ring_adv_next2fill(ring);
649 }
650
651 netdev_dbg(adapter->netdev,
652 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
653 num_allocated, ring->next2fill, ring->next2comp);
654
655 /* so that the device can distinguish a full ring and an empty ring */
656 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
657
658 return num_allocated;
659 }
660
661
662 static void
663 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
664 struct vmxnet3_rx_buf_info *rbi)
665 {
666 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
667 skb_shinfo(skb)->nr_frags;
668
669 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
670
671 __skb_frag_set_page(frag, rbi->page);
672 frag->page_offset = 0;
673 skb_frag_size_set(frag, rcd->len);
674 skb->data_len += rcd->len;
675 skb->truesize += PAGE_SIZE;
676 skb_shinfo(skb)->nr_frags++;
677 }
678
679
680 static int
681 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
682 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
683 struct vmxnet3_adapter *adapter)
684 {
685 u32 dw2, len;
686 unsigned long buf_offset;
687 int i;
688 union Vmxnet3_GenericDesc *gdesc;
689 struct vmxnet3_tx_buf_info *tbi = NULL;
690
691 BUG_ON(ctx->copy_size > skb_headlen(skb));
692
693 /* use the previous gen bit for the SOP desc */
694 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
695
696 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
697 gdesc = ctx->sop_txd; /* both loops below can be skipped */
698
699 /* no need to map the buffer if headers are copied */
700 if (ctx->copy_size) {
701 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
702 tq->tx_ring.next2fill *
703 tq->txdata_desc_size);
704 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
705 ctx->sop_txd->dword[3] = 0;
706
707 tbi = tq->buf_info + tq->tx_ring.next2fill;
708 tbi->map_type = VMXNET3_MAP_NONE;
709
710 netdev_dbg(adapter->netdev,
711 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
712 tq->tx_ring.next2fill,
713 le64_to_cpu(ctx->sop_txd->txd.addr),
714 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
715 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
716
717 /* use the right gen for non-SOP desc */
718 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
719 }
720
721 /* linear part can use multiple tx desc if it's big */
722 len = skb_headlen(skb) - ctx->copy_size;
723 buf_offset = ctx->copy_size;
724 while (len) {
725 u32 buf_size;
726
727 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
728 buf_size = len;
729 dw2 |= len;
730 } else {
731 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
732 /* spec says that for TxDesc.len, 0 == 2^14 */
733 }
734
735 tbi = tq->buf_info + tq->tx_ring.next2fill;
736 tbi->map_type = VMXNET3_MAP_SINGLE;
737 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
738 skb->data + buf_offset, buf_size,
739 PCI_DMA_TODEVICE);
740 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
741 return -EFAULT;
742
743 tbi->len = buf_size;
744
745 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
746 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
747
748 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
749 gdesc->dword[2] = cpu_to_le32(dw2);
750 gdesc->dword[3] = 0;
751
752 netdev_dbg(adapter->netdev,
753 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
754 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
755 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
756 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
757 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
758
759 len -= buf_size;
760 buf_offset += buf_size;
761 }
762
763 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
764 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
765 u32 buf_size;
766
767 buf_offset = 0;
768 len = skb_frag_size(frag);
769 while (len) {
770 tbi = tq->buf_info + tq->tx_ring.next2fill;
771 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
772 buf_size = len;
773 dw2 |= len;
774 } else {
775 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
776 /* spec says that for TxDesc.len, 0 == 2^14 */
777 }
778 tbi->map_type = VMXNET3_MAP_PAGE;
779 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
780 buf_offset, buf_size,
781 DMA_TO_DEVICE);
782 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
783 return -EFAULT;
784
785 tbi->len = buf_size;
786
787 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
788 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
789
790 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
791 gdesc->dword[2] = cpu_to_le32(dw2);
792 gdesc->dword[3] = 0;
793
794 netdev_dbg(adapter->netdev,
795 "txd[%u]: 0x%llx %u %u\n",
796 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
797 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
798 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
799 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
800
801 len -= buf_size;
802 buf_offset += buf_size;
803 }
804 }
805
806 ctx->eop_txd = gdesc;
807
808 /* set the last buf_info for the pkt */
809 tbi->skb = skb;
810 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
811
812 return 0;
813 }
814
815
816 /* Init all tx queues */
817 static void
818 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
819 {
820 int i;
821
822 for (i = 0; i < adapter->num_tx_queues; i++)
823 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
824 }
825
826
827 /*
828 * parse relevant protocol headers:
829 * For a tso pkt, relevant headers are L2/3/4 including options
830 * For a pkt requesting csum offloading, they are L2/3 and may include L4
831 * if it's a TCP/UDP pkt
832 *
833 * Returns:
834 * -1: error happens during parsing
835 * 0: protocol headers parsed, but too big to be copied
836 * 1: protocol headers parsed and copied
837 *
838 * Other effects:
839 * 1. related *ctx fields are updated.
840 * 2. ctx->copy_size is # of bytes copied
841 * 3. the portion to be copied is guaranteed to be in the linear part
842 *
843 */
844 static int
845 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
846 struct vmxnet3_tx_ctx *ctx,
847 struct vmxnet3_adapter *adapter)
848 {
849 u8 protocol = 0;
850
851 if (ctx->mss) { /* TSO */
852 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
853 ctx->l4_hdr_size = tcp_hdrlen(skb);
854 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
855 } else {
856 if (skb->ip_summed == CHECKSUM_PARTIAL) {
857 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
858
859 if (ctx->ipv4) {
860 const struct iphdr *iph = ip_hdr(skb);
861
862 protocol = iph->protocol;
863 } else if (ctx->ipv6) {
864 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
865
866 protocol = ipv6h->nexthdr;
867 }
868
869 switch (protocol) {
870 case IPPROTO_TCP:
871 ctx->l4_hdr_size = tcp_hdrlen(skb);
872 break;
873 case IPPROTO_UDP:
874 ctx->l4_hdr_size = sizeof(struct udphdr);
875 break;
876 default:
877 ctx->l4_hdr_size = 0;
878 break;
879 }
880
881 ctx->copy_size = min(ctx->eth_ip_hdr_size +
882 ctx->l4_hdr_size, skb->len);
883 } else {
884 ctx->eth_ip_hdr_size = 0;
885 ctx->l4_hdr_size = 0;
886 /* copy as much as allowed */
887 ctx->copy_size = min_t(unsigned int,
888 tq->txdata_desc_size,
889 skb_headlen(skb));
890 }
891
892 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
893 ctx->copy_size = skb->len;
894
895 /* make sure headers are accessible directly */
896 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
897 goto err;
898 }
899
900 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
901 tq->stats.oversized_hdr++;
902 ctx->copy_size = 0;
903 return 0;
904 }
905
906 return 1;
907 err:
908 return -1;
909 }
910
911 /*
912 * copy relevant protocol headers to the transmit ring:
913 * For a tso pkt, relevant headers are L2/3/4 including options
914 * For a pkt requesting csum offloading, they are L2/3 and may include L4
915 * if it's a TCP/UDP pkt
916 *
917 *
918 * Note that this requires that vmxnet3_parse_hdr be called first to set the
919 * appropriate bits in ctx first
920 */
921 static void
922 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
923 struct vmxnet3_tx_ctx *ctx,
924 struct vmxnet3_adapter *adapter)
925 {
926 struct Vmxnet3_TxDataDesc *tdd;
927
928 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
929
930 memcpy(tdd->data, skb->data, ctx->copy_size);
931 netdev_dbg(adapter->netdev,
932 "copy %u bytes to dataRing[%u]\n",
933 ctx->copy_size, tq->tx_ring.next2fill);
934 }
935
936
937 static void
938 vmxnet3_prepare_tso(struct sk_buff *skb,
939 struct vmxnet3_tx_ctx *ctx)
940 {
941 struct tcphdr *tcph = tcp_hdr(skb);
942
943 if (ctx->ipv4) {
944 struct iphdr *iph = ip_hdr(skb);
945
946 iph->check = 0;
947 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
948 IPPROTO_TCP, 0);
949 } else if (ctx->ipv6) {
950 struct ipv6hdr *iph = ipv6_hdr(skb);
951
952 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
953 IPPROTO_TCP, 0);
954 }
955 }
956
957 static int txd_estimate(const struct sk_buff *skb)
958 {
959 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
960 int i;
961
962 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
963 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
964
965 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
966 }
967 return count;
968 }
969
970 /*
971 * Transmits a pkt thru a given tq
972 * Returns:
973 * NETDEV_TX_OK: descriptors are setup successfully
974 * NETDEV_TX_OK: error occurred, the pkt is dropped
975 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
976 *
977 * Side-effects:
978 * 1. tx ring may be changed
979 * 2. tq stats may be updated accordingly
980 * 3. shared->txNumDeferred may be updated
981 */
982
983 static int
984 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
985 struct vmxnet3_adapter *adapter, struct net_device *netdev)
986 {
987 int ret;
988 u32 count;
989 unsigned long flags;
990 struct vmxnet3_tx_ctx ctx;
991 union Vmxnet3_GenericDesc *gdesc;
992 #ifdef __BIG_ENDIAN_BITFIELD
993 /* Use temporary descriptor to avoid touching bits multiple times */
994 union Vmxnet3_GenericDesc tempTxDesc;
995 #endif
996
997 count = txd_estimate(skb);
998
999 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1000 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1001
1002 ctx.mss = skb_shinfo(skb)->gso_size;
1003 if (ctx.mss) {
1004 if (skb_header_cloned(skb)) {
1005 if (unlikely(pskb_expand_head(skb, 0, 0,
1006 GFP_ATOMIC) != 0)) {
1007 tq->stats.drop_tso++;
1008 goto drop_pkt;
1009 }
1010 tq->stats.copy_skb_header++;
1011 }
1012 vmxnet3_prepare_tso(skb, &ctx);
1013 } else {
1014 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1015
1016 /* non-tso pkts must not use more than
1017 * VMXNET3_MAX_TXD_PER_PKT entries
1018 */
1019 if (skb_linearize(skb) != 0) {
1020 tq->stats.drop_too_many_frags++;
1021 goto drop_pkt;
1022 }
1023 tq->stats.linearized++;
1024
1025 /* recalculate the # of descriptors to use */
1026 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1027 }
1028 }
1029
1030 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1031 if (ret >= 0) {
1032 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1033 /* hdrs parsed, check against other limits */
1034 if (ctx.mss) {
1035 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1036 VMXNET3_MAX_TX_BUF_SIZE)) {
1037 tq->stats.drop_oversized_hdr++;
1038 goto drop_pkt;
1039 }
1040 } else {
1041 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1042 if (unlikely(ctx.eth_ip_hdr_size +
1043 skb->csum_offset >
1044 VMXNET3_MAX_CSUM_OFFSET)) {
1045 tq->stats.drop_oversized_hdr++;
1046 goto drop_pkt;
1047 }
1048 }
1049 }
1050 } else {
1051 tq->stats.drop_hdr_inspect_err++;
1052 goto drop_pkt;
1053 }
1054
1055 spin_lock_irqsave(&tq->tx_lock, flags);
1056
1057 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1058 tq->stats.tx_ring_full++;
1059 netdev_dbg(adapter->netdev,
1060 "tx queue stopped on %s, next2comp %u"
1061 " next2fill %u\n", adapter->netdev->name,
1062 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1063
1064 vmxnet3_tq_stop(tq, adapter);
1065 spin_unlock_irqrestore(&tq->tx_lock, flags);
1066 return NETDEV_TX_BUSY;
1067 }
1068
1069
1070 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1071
1072 /* fill tx descs related to addr & len */
1073 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1074 goto unlock_drop_pkt;
1075
1076 /* setup the EOP desc */
1077 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1078
1079 /* setup the SOP desc */
1080 #ifdef __BIG_ENDIAN_BITFIELD
1081 gdesc = &tempTxDesc;
1082 gdesc->dword[2] = ctx.sop_txd->dword[2];
1083 gdesc->dword[3] = ctx.sop_txd->dword[3];
1084 #else
1085 gdesc = ctx.sop_txd;
1086 #endif
1087 if (ctx.mss) {
1088 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1089 gdesc->txd.om = VMXNET3_OM_TSO;
1090 gdesc->txd.msscof = ctx.mss;
1091 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1092 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1093 } else {
1094 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1095 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1096 gdesc->txd.om = VMXNET3_OM_CSUM;
1097 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1098 skb->csum_offset;
1099 } else {
1100 gdesc->txd.om = 0;
1101 gdesc->txd.msscof = 0;
1102 }
1103 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1104 }
1105
1106 if (skb_vlan_tag_present(skb)) {
1107 gdesc->txd.ti = 1;
1108 gdesc->txd.tci = skb_vlan_tag_get(skb);
1109 }
1110
1111 /* finally flips the GEN bit of the SOP desc. */
1112 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1113 VMXNET3_TXD_GEN);
1114 #ifdef __BIG_ENDIAN_BITFIELD
1115 /* Finished updating in bitfields of Tx Desc, so write them in original
1116 * place.
1117 */
1118 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1119 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1120 gdesc = ctx.sop_txd;
1121 #endif
1122 netdev_dbg(adapter->netdev,
1123 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1124 (u32)(ctx.sop_txd -
1125 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1126 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1127
1128 spin_unlock_irqrestore(&tq->tx_lock, flags);
1129
1130 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1131 le32_to_cpu(tq->shared->txThreshold)) {
1132 tq->shared->txNumDeferred = 0;
1133 VMXNET3_WRITE_BAR0_REG(adapter,
1134 VMXNET3_REG_TXPROD + tq->qid * 8,
1135 tq->tx_ring.next2fill);
1136 }
1137
1138 return NETDEV_TX_OK;
1139
1140 unlock_drop_pkt:
1141 spin_unlock_irqrestore(&tq->tx_lock, flags);
1142 drop_pkt:
1143 tq->stats.drop_total++;
1144 dev_kfree_skb_any(skb);
1145 return NETDEV_TX_OK;
1146 }
1147
1148
1149 static netdev_tx_t
1150 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1151 {
1152 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1153
1154 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1155 return vmxnet3_tq_xmit(skb,
1156 &adapter->tx_queue[skb->queue_mapping],
1157 adapter, netdev);
1158 }
1159
1160
1161 static void
1162 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1163 struct sk_buff *skb,
1164 union Vmxnet3_GenericDesc *gdesc)
1165 {
1166 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1167 if (gdesc->rcd.v4 &&
1168 (le32_to_cpu(gdesc->dword[3]) &
1169 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1170 skb->ip_summed = CHECKSUM_UNNECESSARY;
1171 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1172 BUG_ON(gdesc->rcd.frg);
1173 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1174 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1175 skb->ip_summed = CHECKSUM_UNNECESSARY;
1176 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1177 BUG_ON(gdesc->rcd.frg);
1178 } else {
1179 if (gdesc->rcd.csum) {
1180 skb->csum = htons(gdesc->rcd.csum);
1181 skb->ip_summed = CHECKSUM_PARTIAL;
1182 } else {
1183 skb_checksum_none_assert(skb);
1184 }
1185 }
1186 } else {
1187 skb_checksum_none_assert(skb);
1188 }
1189 }
1190
1191
1192 static void
1193 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1194 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1195 {
1196 rq->stats.drop_err++;
1197 if (!rcd->fcs)
1198 rq->stats.drop_fcs++;
1199
1200 rq->stats.drop_total++;
1201
1202 /*
1203 * We do not unmap and chain the rx buffer to the skb.
1204 * We basically pretend this buffer is not used and will be recycled
1205 * by vmxnet3_rq_alloc_rx_buf()
1206 */
1207
1208 /*
1209 * ctx->skb may be NULL if this is the first and the only one
1210 * desc for the pkt
1211 */
1212 if (ctx->skb)
1213 dev_kfree_skb_irq(ctx->skb);
1214
1215 ctx->skb = NULL;
1216 }
1217
1218
1219 static u32
1220 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1221 union Vmxnet3_GenericDesc *gdesc)
1222 {
1223 u32 hlen, maplen;
1224 union {
1225 void *ptr;
1226 struct ethhdr *eth;
1227 struct iphdr *ipv4;
1228 struct ipv6hdr *ipv6;
1229 struct tcphdr *tcp;
1230 } hdr;
1231 BUG_ON(gdesc->rcd.tcp == 0);
1232
1233 maplen = skb_headlen(skb);
1234 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1235 return 0;
1236
1237 hdr.eth = eth_hdr(skb);
1238 if (gdesc->rcd.v4) {
1239 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
1240 hdr.ptr += sizeof(struct ethhdr);
1241 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1242 hlen = hdr.ipv4->ihl << 2;
1243 hdr.ptr += hdr.ipv4->ihl << 2;
1244 } else if (gdesc->rcd.v6) {
1245 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
1246 hdr.ptr += sizeof(struct ethhdr);
1247 /* Use an estimated value, since we also need to handle
1248 * TSO case.
1249 */
1250 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1251 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1252 hlen = sizeof(struct ipv6hdr);
1253 hdr.ptr += sizeof(struct ipv6hdr);
1254 } else {
1255 /* Non-IP pkt, dont estimate header length */
1256 return 0;
1257 }
1258
1259 if (hlen + sizeof(struct tcphdr) > maplen)
1260 return 0;
1261
1262 return (hlen + (hdr.tcp->doff << 2));
1263 }
1264
1265 static int
1266 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1267 struct vmxnet3_adapter *adapter, int quota)
1268 {
1269 static const u32 rxprod_reg[2] = {
1270 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1271 };
1272 u32 num_pkts = 0;
1273 bool skip_page_frags = false;
1274 struct Vmxnet3_RxCompDesc *rcd;
1275 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1276 u16 segCnt = 0, mss = 0;
1277 #ifdef __BIG_ENDIAN_BITFIELD
1278 struct Vmxnet3_RxDesc rxCmdDesc;
1279 struct Vmxnet3_RxCompDesc rxComp;
1280 #endif
1281 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1282 &rxComp);
1283 while (rcd->gen == rq->comp_ring.gen) {
1284 struct vmxnet3_rx_buf_info *rbi;
1285 struct sk_buff *skb, *new_skb = NULL;
1286 struct page *new_page = NULL;
1287 dma_addr_t new_dma_addr;
1288 int num_to_alloc;
1289 struct Vmxnet3_RxDesc *rxd;
1290 u32 idx, ring_idx;
1291 struct vmxnet3_cmd_ring *ring = NULL;
1292 if (num_pkts >= quota) {
1293 /* we may stop even before we see the EOP desc of
1294 * the current pkt
1295 */
1296 break;
1297 }
1298 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1299 rcd->rqID != rq->dataRingQid);
1300 idx = rcd->rxdIdx;
1301 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1302 ring = rq->rx_ring + ring_idx;
1303 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1304 &rxCmdDesc);
1305 rbi = rq->buf_info[ring_idx] + idx;
1306
1307 BUG_ON(rxd->addr != rbi->dma_addr ||
1308 rxd->len != rbi->len);
1309
1310 if (unlikely(rcd->eop && rcd->err)) {
1311 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1312 goto rcd_done;
1313 }
1314
1315 if (rcd->sop) { /* first buf of the pkt */
1316 bool rxDataRingUsed;
1317 u16 len;
1318
1319 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1320 (rcd->rqID != rq->qid &&
1321 rcd->rqID != rq->dataRingQid));
1322
1323 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1324 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1325
1326 if (unlikely(rcd->len == 0)) {
1327 /* Pretend the rx buffer is skipped. */
1328 BUG_ON(!(rcd->sop && rcd->eop));
1329 netdev_dbg(adapter->netdev,
1330 "rxRing[%u][%u] 0 length\n",
1331 ring_idx, idx);
1332 goto rcd_done;
1333 }
1334
1335 skip_page_frags = false;
1336 ctx->skb = rbi->skb;
1337
1338 rxDataRingUsed =
1339 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1340 len = rxDataRingUsed ? rcd->len : rbi->len;
1341 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1342 len);
1343 if (new_skb == NULL) {
1344 /* Skb allocation failed, do not handover this
1345 * skb to stack. Reuse it. Drop the existing pkt
1346 */
1347 rq->stats.rx_buf_alloc_failure++;
1348 ctx->skb = NULL;
1349 rq->stats.drop_total++;
1350 skip_page_frags = true;
1351 goto rcd_done;
1352 }
1353
1354 if (rxDataRingUsed) {
1355 size_t sz;
1356
1357 BUG_ON(rcd->len > rq->data_ring.desc_size);
1358
1359 ctx->skb = new_skb;
1360 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1361 memcpy(new_skb->data,
1362 &rq->data_ring.base[sz], rcd->len);
1363 } else {
1364 ctx->skb = rbi->skb;
1365
1366 new_dma_addr =
1367 dma_map_single(&adapter->pdev->dev,
1368 new_skb->data, rbi->len,
1369 PCI_DMA_FROMDEVICE);
1370 if (dma_mapping_error(&adapter->pdev->dev,
1371 new_dma_addr)) {
1372 dev_kfree_skb(new_skb);
1373 /* Skb allocation failed, do not
1374 * handover this skb to stack. Reuse
1375 * it. Drop the existing pkt.
1376 */
1377 rq->stats.rx_buf_alloc_failure++;
1378 ctx->skb = NULL;
1379 rq->stats.drop_total++;
1380 skip_page_frags = true;
1381 goto rcd_done;
1382 }
1383
1384 dma_unmap_single(&adapter->pdev->dev,
1385 rbi->dma_addr,
1386 rbi->len,
1387 PCI_DMA_FROMDEVICE);
1388
1389 /* Immediate refill */
1390 rbi->skb = new_skb;
1391 rbi->dma_addr = new_dma_addr;
1392 rxd->addr = cpu_to_le64(rbi->dma_addr);
1393 rxd->len = rbi->len;
1394 }
1395
1396 #ifdef VMXNET3_RSS
1397 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1398 (adapter->netdev->features & NETIF_F_RXHASH))
1399 skb_set_hash(ctx->skb,
1400 le32_to_cpu(rcd->rssHash),
1401 PKT_HASH_TYPE_L3);
1402 #endif
1403 skb_put(ctx->skb, rcd->len);
1404
1405 if (VMXNET3_VERSION_GE_2(adapter) &&
1406 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1407 struct Vmxnet3_RxCompDescExt *rcdlro;
1408 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1409
1410 segCnt = rcdlro->segCnt;
1411 WARN_ON_ONCE(segCnt == 0);
1412 mss = rcdlro->mss;
1413 if (unlikely(segCnt <= 1))
1414 segCnt = 0;
1415 } else {
1416 segCnt = 0;
1417 }
1418 } else {
1419 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1420
1421 /* non SOP buffer must be type 1 in most cases */
1422 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1423 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1424
1425 /* If an sop buffer was dropped, skip all
1426 * following non-sop fragments. They will be reused.
1427 */
1428 if (skip_page_frags)
1429 goto rcd_done;
1430
1431 if (rcd->len) {
1432 new_page = alloc_page(GFP_ATOMIC);
1433 /* Replacement page frag could not be allocated.
1434 * Reuse this page. Drop the pkt and free the
1435 * skb which contained this page as a frag. Skip
1436 * processing all the following non-sop frags.
1437 */
1438 if (unlikely(!new_page)) {
1439 rq->stats.rx_buf_alloc_failure++;
1440 dev_kfree_skb(ctx->skb);
1441 ctx->skb = NULL;
1442 skip_page_frags = true;
1443 goto rcd_done;
1444 }
1445 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1446 new_page,
1447 0, PAGE_SIZE,
1448 PCI_DMA_FROMDEVICE);
1449 if (dma_mapping_error(&adapter->pdev->dev,
1450 new_dma_addr)) {
1451 put_page(new_page);
1452 rq->stats.rx_buf_alloc_failure++;
1453 dev_kfree_skb(ctx->skb);
1454 ctx->skb = NULL;
1455 skip_page_frags = true;
1456 goto rcd_done;
1457 }
1458
1459 dma_unmap_page(&adapter->pdev->dev,
1460 rbi->dma_addr, rbi->len,
1461 PCI_DMA_FROMDEVICE);
1462
1463 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1464
1465 /* Immediate refill */
1466 rbi->page = new_page;
1467 rbi->dma_addr = new_dma_addr;
1468 rxd->addr = cpu_to_le64(rbi->dma_addr);
1469 rxd->len = rbi->len;
1470 }
1471 }
1472
1473
1474 skb = ctx->skb;
1475 if (rcd->eop) {
1476 u32 mtu = adapter->netdev->mtu;
1477 skb->len += skb->data_len;
1478
1479 vmxnet3_rx_csum(adapter, skb,
1480 (union Vmxnet3_GenericDesc *)rcd);
1481 skb->protocol = eth_type_trans(skb, adapter->netdev);
1482 if (!rcd->tcp || !adapter->lro)
1483 goto not_lro;
1484
1485 if (segCnt != 0 && mss != 0) {
1486 skb_shinfo(skb)->gso_type = rcd->v4 ?
1487 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1488 skb_shinfo(skb)->gso_size = mss;
1489 skb_shinfo(skb)->gso_segs = segCnt;
1490 } else if (segCnt != 0 || skb->len > mtu) {
1491 u32 hlen;
1492
1493 hlen = vmxnet3_get_hdr_len(adapter, skb,
1494 (union Vmxnet3_GenericDesc *)rcd);
1495 if (hlen == 0)
1496 goto not_lro;
1497
1498 skb_shinfo(skb)->gso_type =
1499 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1500 if (segCnt != 0) {
1501 skb_shinfo(skb)->gso_segs = segCnt;
1502 skb_shinfo(skb)->gso_size =
1503 DIV_ROUND_UP(skb->len -
1504 hlen, segCnt);
1505 } else {
1506 skb_shinfo(skb)->gso_size = mtu - hlen;
1507 }
1508 }
1509 not_lro:
1510 if (unlikely(rcd->ts))
1511 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1512
1513 if (adapter->netdev->features & NETIF_F_LRO)
1514 netif_receive_skb(skb);
1515 else
1516 napi_gro_receive(&rq->napi, skb);
1517
1518 ctx->skb = NULL;
1519 num_pkts++;
1520 }
1521
1522 rcd_done:
1523 /* device may have skipped some rx descs */
1524 ring->next2comp = idx;
1525 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1526 ring = rq->rx_ring + ring_idx;
1527 while (num_to_alloc) {
1528 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1529 &rxCmdDesc);
1530 BUG_ON(!rxd->addr);
1531
1532 /* Recv desc is ready to be used by the device */
1533 rxd->gen = ring->gen;
1534 vmxnet3_cmd_ring_adv_next2fill(ring);
1535 num_to_alloc--;
1536 }
1537
1538 /* if needed, update the register */
1539 if (unlikely(rq->shared->updateRxProd)) {
1540 VMXNET3_WRITE_BAR0_REG(adapter,
1541 rxprod_reg[ring_idx] + rq->qid * 8,
1542 ring->next2fill);
1543 }
1544
1545 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1546 vmxnet3_getRxComp(rcd,
1547 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1548 }
1549
1550 return num_pkts;
1551 }
1552
1553
1554 static void
1555 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1556 struct vmxnet3_adapter *adapter)
1557 {
1558 u32 i, ring_idx;
1559 struct Vmxnet3_RxDesc *rxd;
1560
1561 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1562 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1563 #ifdef __BIG_ENDIAN_BITFIELD
1564 struct Vmxnet3_RxDesc rxDesc;
1565 #endif
1566 vmxnet3_getRxDesc(rxd,
1567 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1568
1569 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1570 rq->buf_info[ring_idx][i].skb) {
1571 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1572 rxd->len, PCI_DMA_FROMDEVICE);
1573 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1574 rq->buf_info[ring_idx][i].skb = NULL;
1575 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1576 rq->buf_info[ring_idx][i].page) {
1577 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1578 rxd->len, PCI_DMA_FROMDEVICE);
1579 put_page(rq->buf_info[ring_idx][i].page);
1580 rq->buf_info[ring_idx][i].page = NULL;
1581 }
1582 }
1583
1584 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1585 rq->rx_ring[ring_idx].next2fill =
1586 rq->rx_ring[ring_idx].next2comp = 0;
1587 }
1588
1589 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1590 rq->comp_ring.next2proc = 0;
1591 }
1592
1593
1594 static void
1595 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1596 {
1597 int i;
1598
1599 for (i = 0; i < adapter->num_rx_queues; i++)
1600 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1601 }
1602
1603
1604 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1605 struct vmxnet3_adapter *adapter)
1606 {
1607 int i;
1608 int j;
1609
1610 /* all rx buffers must have already been freed */
1611 for (i = 0; i < 2; i++) {
1612 if (rq->buf_info[i]) {
1613 for (j = 0; j < rq->rx_ring[i].size; j++)
1614 BUG_ON(rq->buf_info[i][j].page != NULL);
1615 }
1616 }
1617
1618
1619 for (i = 0; i < 2; i++) {
1620 if (rq->rx_ring[i].base) {
1621 dma_free_coherent(&adapter->pdev->dev,
1622 rq->rx_ring[i].size
1623 * sizeof(struct Vmxnet3_RxDesc),
1624 rq->rx_ring[i].base,
1625 rq->rx_ring[i].basePA);
1626 rq->rx_ring[i].base = NULL;
1627 }
1628 rq->buf_info[i] = NULL;
1629 }
1630
1631 if (rq->data_ring.base) {
1632 dma_free_coherent(&adapter->pdev->dev,
1633 rq->rx_ring[0].size * rq->data_ring.desc_size,
1634 rq->data_ring.base, rq->data_ring.basePA);
1635 rq->data_ring.base = NULL;
1636 }
1637
1638 if (rq->comp_ring.base) {
1639 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1640 * sizeof(struct Vmxnet3_RxCompDesc),
1641 rq->comp_ring.base, rq->comp_ring.basePA);
1642 rq->comp_ring.base = NULL;
1643 }
1644
1645 if (rq->buf_info[0]) {
1646 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1647 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1648 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1649 rq->buf_info_pa);
1650 }
1651 }
1652
1653 void
1654 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1655 {
1656 int i;
1657
1658 for (i = 0; i < adapter->num_rx_queues; i++) {
1659 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1660
1661 if (rq->data_ring.base) {
1662 dma_free_coherent(&adapter->pdev->dev,
1663 (rq->rx_ring[0].size *
1664 rq->data_ring.desc_size),
1665 rq->data_ring.base,
1666 rq->data_ring.basePA);
1667 rq->data_ring.base = NULL;
1668 rq->data_ring.desc_size = 0;
1669 }
1670 }
1671 }
1672
1673 static int
1674 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1675 struct vmxnet3_adapter *adapter)
1676 {
1677 int i;
1678
1679 /* initialize buf_info */
1680 for (i = 0; i < rq->rx_ring[0].size; i++) {
1681
1682 /* 1st buf for a pkt is skbuff */
1683 if (i % adapter->rx_buf_per_pkt == 0) {
1684 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1685 rq->buf_info[0][i].len = adapter->skb_buf_size;
1686 } else { /* subsequent bufs for a pkt is frag */
1687 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1688 rq->buf_info[0][i].len = PAGE_SIZE;
1689 }
1690 }
1691 for (i = 0; i < rq->rx_ring[1].size; i++) {
1692 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1693 rq->buf_info[1][i].len = PAGE_SIZE;
1694 }
1695
1696 /* reset internal state and allocate buffers for both rings */
1697 for (i = 0; i < 2; i++) {
1698 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1699
1700 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1701 sizeof(struct Vmxnet3_RxDesc));
1702 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1703 }
1704 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1705 adapter) == 0) {
1706 /* at least has 1 rx buffer for the 1st ring */
1707 return -ENOMEM;
1708 }
1709 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1710
1711 /* reset the comp ring */
1712 rq->comp_ring.next2proc = 0;
1713 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1714 sizeof(struct Vmxnet3_RxCompDesc));
1715 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1716
1717 /* reset rxctx */
1718 rq->rx_ctx.skb = NULL;
1719
1720 /* stats are not reset */
1721 return 0;
1722 }
1723
1724
1725 static int
1726 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1727 {
1728 int i, err = 0;
1729
1730 for (i = 0; i < adapter->num_rx_queues; i++) {
1731 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1732 if (unlikely(err)) {
1733 dev_err(&adapter->netdev->dev, "%s: failed to "
1734 "initialize rx queue%i\n",
1735 adapter->netdev->name, i);
1736 break;
1737 }
1738 }
1739 return err;
1740
1741 }
1742
1743
1744 static int
1745 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1746 {
1747 int i;
1748 size_t sz;
1749 struct vmxnet3_rx_buf_info *bi;
1750
1751 for (i = 0; i < 2; i++) {
1752
1753 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1754 rq->rx_ring[i].base = dma_alloc_coherent(
1755 &adapter->pdev->dev, sz,
1756 &rq->rx_ring[i].basePA,
1757 GFP_KERNEL);
1758 if (!rq->rx_ring[i].base) {
1759 netdev_err(adapter->netdev,
1760 "failed to allocate rx ring %d\n", i);
1761 goto err;
1762 }
1763 }
1764
1765 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1766 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1767 rq->data_ring.base =
1768 dma_alloc_coherent(&adapter->pdev->dev, sz,
1769 &rq->data_ring.basePA,
1770 GFP_KERNEL);
1771 if (!rq->data_ring.base) {
1772 netdev_err(adapter->netdev,
1773 "rx data ring will be disabled\n");
1774 adapter->rxdataring_enabled = false;
1775 }
1776 } else {
1777 rq->data_ring.base = NULL;
1778 rq->data_ring.desc_size = 0;
1779 }
1780
1781 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1782 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1783 &rq->comp_ring.basePA,
1784 GFP_KERNEL);
1785 if (!rq->comp_ring.base) {
1786 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1787 goto err;
1788 }
1789
1790 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1791 rq->rx_ring[1].size);
1792 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1793 GFP_KERNEL);
1794 if (!bi)
1795 goto err;
1796
1797 rq->buf_info[0] = bi;
1798 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1799
1800 return 0;
1801
1802 err:
1803 vmxnet3_rq_destroy(rq, adapter);
1804 return -ENOMEM;
1805 }
1806
1807
1808 static int
1809 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1810 {
1811 int i, err = 0;
1812
1813 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1814
1815 for (i = 0; i < adapter->num_rx_queues; i++) {
1816 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1817 if (unlikely(err)) {
1818 dev_err(&adapter->netdev->dev,
1819 "%s: failed to create rx queue%i\n",
1820 adapter->netdev->name, i);
1821 goto err_out;
1822 }
1823 }
1824
1825 if (!adapter->rxdataring_enabled)
1826 vmxnet3_rq_destroy_all_rxdataring(adapter);
1827
1828 return err;
1829 err_out:
1830 vmxnet3_rq_destroy_all(adapter);
1831 return err;
1832
1833 }
1834
1835 /* Multiple queue aware polling function for tx and rx */
1836
1837 static int
1838 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1839 {
1840 int rcd_done = 0, i;
1841 if (unlikely(adapter->shared->ecr))
1842 vmxnet3_process_events(adapter);
1843 for (i = 0; i < adapter->num_tx_queues; i++)
1844 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1845
1846 for (i = 0; i < adapter->num_rx_queues; i++)
1847 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1848 adapter, budget);
1849 return rcd_done;
1850 }
1851
1852
1853 static int
1854 vmxnet3_poll(struct napi_struct *napi, int budget)
1855 {
1856 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1857 struct vmxnet3_rx_queue, napi);
1858 int rxd_done;
1859
1860 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1861
1862 if (rxd_done < budget) {
1863 napi_complete(napi);
1864 vmxnet3_enable_all_intrs(rx_queue->adapter);
1865 }
1866 return rxd_done;
1867 }
1868
1869 /*
1870 * NAPI polling function for MSI-X mode with multiple Rx queues
1871 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1872 */
1873
1874 static int
1875 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1876 {
1877 struct vmxnet3_rx_queue *rq = container_of(napi,
1878 struct vmxnet3_rx_queue, napi);
1879 struct vmxnet3_adapter *adapter = rq->adapter;
1880 int rxd_done;
1881
1882 /* When sharing interrupt with corresponding tx queue, process
1883 * tx completions in that queue as well
1884 */
1885 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1886 struct vmxnet3_tx_queue *tq =
1887 &adapter->tx_queue[rq - adapter->rx_queue];
1888 vmxnet3_tq_tx_complete(tq, adapter);
1889 }
1890
1891 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1892
1893 if (rxd_done < budget) {
1894 napi_complete(napi);
1895 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1896 }
1897 return rxd_done;
1898 }
1899
1900
1901 #ifdef CONFIG_PCI_MSI
1902
1903 /*
1904 * Handle completion interrupts on tx queues
1905 * Returns whether or not the intr is handled
1906 */
1907
1908 static irqreturn_t
1909 vmxnet3_msix_tx(int irq, void *data)
1910 {
1911 struct vmxnet3_tx_queue *tq = data;
1912 struct vmxnet3_adapter *adapter = tq->adapter;
1913
1914 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1915 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1916
1917 /* Handle the case where only one irq is allocate for all tx queues */
1918 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1919 int i;
1920 for (i = 0; i < adapter->num_tx_queues; i++) {
1921 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1922 vmxnet3_tq_tx_complete(txq, adapter);
1923 }
1924 } else {
1925 vmxnet3_tq_tx_complete(tq, adapter);
1926 }
1927 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1928
1929 return IRQ_HANDLED;
1930 }
1931
1932
1933 /*
1934 * Handle completion interrupts on rx queues. Returns whether or not the
1935 * intr is handled
1936 */
1937
1938 static irqreturn_t
1939 vmxnet3_msix_rx(int irq, void *data)
1940 {
1941 struct vmxnet3_rx_queue *rq = data;
1942 struct vmxnet3_adapter *adapter = rq->adapter;
1943
1944 /* disable intr if needed */
1945 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1946 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1947 napi_schedule(&rq->napi);
1948
1949 return IRQ_HANDLED;
1950 }
1951
1952 /*
1953 *----------------------------------------------------------------------------
1954 *
1955 * vmxnet3_msix_event --
1956 *
1957 * vmxnet3 msix event intr handler
1958 *
1959 * Result:
1960 * whether or not the intr is handled
1961 *
1962 *----------------------------------------------------------------------------
1963 */
1964
1965 static irqreturn_t
1966 vmxnet3_msix_event(int irq, void *data)
1967 {
1968 struct net_device *dev = data;
1969 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1970
1971 /* disable intr if needed */
1972 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1973 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1974
1975 if (adapter->shared->ecr)
1976 vmxnet3_process_events(adapter);
1977
1978 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1979
1980 return IRQ_HANDLED;
1981 }
1982
1983 #endif /* CONFIG_PCI_MSI */
1984
1985
1986 /* Interrupt handler for vmxnet3 */
1987 static irqreturn_t
1988 vmxnet3_intr(int irq, void *dev_id)
1989 {
1990 struct net_device *dev = dev_id;
1991 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1992
1993 if (adapter->intr.type == VMXNET3_IT_INTX) {
1994 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1995 if (unlikely(icr == 0))
1996 /* not ours */
1997 return IRQ_NONE;
1998 }
1999
2000
2001 /* disable intr if needed */
2002 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2003 vmxnet3_disable_all_intrs(adapter);
2004
2005 napi_schedule(&adapter->rx_queue[0].napi);
2006
2007 return IRQ_HANDLED;
2008 }
2009
2010 #ifdef CONFIG_NET_POLL_CONTROLLER
2011
2012 /* netpoll callback. */
2013 static void
2014 vmxnet3_netpoll(struct net_device *netdev)
2015 {
2016 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2017
2018 switch (adapter->intr.type) {
2019 #ifdef CONFIG_PCI_MSI
2020 case VMXNET3_IT_MSIX: {
2021 int i;
2022 for (i = 0; i < adapter->num_rx_queues; i++)
2023 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2024 break;
2025 }
2026 #endif
2027 case VMXNET3_IT_MSI:
2028 default:
2029 vmxnet3_intr(0, adapter->netdev);
2030 break;
2031 }
2032
2033 }
2034 #endif /* CONFIG_NET_POLL_CONTROLLER */
2035
2036 static int
2037 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2038 {
2039 struct vmxnet3_intr *intr = &adapter->intr;
2040 int err = 0, i;
2041 int vector = 0;
2042
2043 #ifdef CONFIG_PCI_MSI
2044 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2045 for (i = 0; i < adapter->num_tx_queues; i++) {
2046 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2047 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2048 adapter->netdev->name, vector);
2049 err = request_irq(
2050 intr->msix_entries[vector].vector,
2051 vmxnet3_msix_tx, 0,
2052 adapter->tx_queue[i].name,
2053 &adapter->tx_queue[i]);
2054 } else {
2055 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2056 adapter->netdev->name, vector);
2057 }
2058 if (err) {
2059 dev_err(&adapter->netdev->dev,
2060 "Failed to request irq for MSIX, %s, "
2061 "error %d\n",
2062 adapter->tx_queue[i].name, err);
2063 return err;
2064 }
2065
2066 /* Handle the case where only 1 MSIx was allocated for
2067 * all tx queues */
2068 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2069 for (; i < adapter->num_tx_queues; i++)
2070 adapter->tx_queue[i].comp_ring.intr_idx
2071 = vector;
2072 vector++;
2073 break;
2074 } else {
2075 adapter->tx_queue[i].comp_ring.intr_idx
2076 = vector++;
2077 }
2078 }
2079 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2080 vector = 0;
2081
2082 for (i = 0; i < adapter->num_rx_queues; i++) {
2083 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2084 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2085 adapter->netdev->name, vector);
2086 else
2087 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2088 adapter->netdev->name, vector);
2089 err = request_irq(intr->msix_entries[vector].vector,
2090 vmxnet3_msix_rx, 0,
2091 adapter->rx_queue[i].name,
2092 &(adapter->rx_queue[i]));
2093 if (err) {
2094 netdev_err(adapter->netdev,
2095 "Failed to request irq for MSIX, "
2096 "%s, error %d\n",
2097 adapter->rx_queue[i].name, err);
2098 return err;
2099 }
2100
2101 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2102 }
2103
2104 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2105 adapter->netdev->name, vector);
2106 err = request_irq(intr->msix_entries[vector].vector,
2107 vmxnet3_msix_event, 0,
2108 intr->event_msi_vector_name, adapter->netdev);
2109 intr->event_intr_idx = vector;
2110
2111 } else if (intr->type == VMXNET3_IT_MSI) {
2112 adapter->num_rx_queues = 1;
2113 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2114 adapter->netdev->name, adapter->netdev);
2115 } else {
2116 #endif
2117 adapter->num_rx_queues = 1;
2118 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2119 IRQF_SHARED, adapter->netdev->name,
2120 adapter->netdev);
2121 #ifdef CONFIG_PCI_MSI
2122 }
2123 #endif
2124 intr->num_intrs = vector + 1;
2125 if (err) {
2126 netdev_err(adapter->netdev,
2127 "Failed to request irq (intr type:%d), error %d\n",
2128 intr->type, err);
2129 } else {
2130 /* Number of rx queues will not change after this */
2131 for (i = 0; i < adapter->num_rx_queues; i++) {
2132 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2133 rq->qid = i;
2134 rq->qid2 = i + adapter->num_rx_queues;
2135 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2136 }
2137
2138 /* init our intr settings */
2139 for (i = 0; i < intr->num_intrs; i++)
2140 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2141 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2142 adapter->intr.event_intr_idx = 0;
2143 for (i = 0; i < adapter->num_tx_queues; i++)
2144 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2145 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2146 }
2147
2148 netdev_info(adapter->netdev,
2149 "intr type %u, mode %u, %u vectors allocated\n",
2150 intr->type, intr->mask_mode, intr->num_intrs);
2151 }
2152
2153 return err;
2154 }
2155
2156
2157 static void
2158 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2159 {
2160 struct vmxnet3_intr *intr = &adapter->intr;
2161 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2162
2163 switch (intr->type) {
2164 #ifdef CONFIG_PCI_MSI
2165 case VMXNET3_IT_MSIX:
2166 {
2167 int i, vector = 0;
2168
2169 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2170 for (i = 0; i < adapter->num_tx_queues; i++) {
2171 free_irq(intr->msix_entries[vector++].vector,
2172 &(adapter->tx_queue[i]));
2173 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2174 break;
2175 }
2176 }
2177
2178 for (i = 0; i < adapter->num_rx_queues; i++) {
2179 free_irq(intr->msix_entries[vector++].vector,
2180 &(adapter->rx_queue[i]));
2181 }
2182
2183 free_irq(intr->msix_entries[vector].vector,
2184 adapter->netdev);
2185 BUG_ON(vector >= intr->num_intrs);
2186 break;
2187 }
2188 #endif
2189 case VMXNET3_IT_MSI:
2190 free_irq(adapter->pdev->irq, adapter->netdev);
2191 break;
2192 case VMXNET3_IT_INTX:
2193 free_irq(adapter->pdev->irq, adapter->netdev);
2194 break;
2195 default:
2196 BUG();
2197 }
2198 }
2199
2200
2201 static void
2202 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2203 {
2204 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2205 u16 vid;
2206
2207 /* allow untagged pkts */
2208 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2209
2210 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2211 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2212 }
2213
2214
2215 static int
2216 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2217 {
2218 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2219
2220 if (!(netdev->flags & IFF_PROMISC)) {
2221 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2222 unsigned long flags;
2223
2224 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2225 spin_lock_irqsave(&adapter->cmd_lock, flags);
2226 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2227 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2228 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2229 }
2230
2231 set_bit(vid, adapter->active_vlans);
2232
2233 return 0;
2234 }
2235
2236
2237 static int
2238 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2239 {
2240 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2241
2242 if (!(netdev->flags & IFF_PROMISC)) {
2243 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2244 unsigned long flags;
2245
2246 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2247 spin_lock_irqsave(&adapter->cmd_lock, flags);
2248 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2249 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2250 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2251 }
2252
2253 clear_bit(vid, adapter->active_vlans);
2254
2255 return 0;
2256 }
2257
2258
2259 static u8 *
2260 vmxnet3_copy_mc(struct net_device *netdev)
2261 {
2262 u8 *buf = NULL;
2263 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2264
2265 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2266 if (sz <= 0xffff) {
2267 /* We may be called with BH disabled */
2268 buf = kmalloc(sz, GFP_ATOMIC);
2269 if (buf) {
2270 struct netdev_hw_addr *ha;
2271 int i = 0;
2272
2273 netdev_for_each_mc_addr(ha, netdev)
2274 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2275 ETH_ALEN);
2276 }
2277 }
2278 return buf;
2279 }
2280
2281
2282 static void
2283 vmxnet3_set_mc(struct net_device *netdev)
2284 {
2285 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2286 unsigned long flags;
2287 struct Vmxnet3_RxFilterConf *rxConf =
2288 &adapter->shared->devRead.rxFilterConf;
2289 u8 *new_table = NULL;
2290 dma_addr_t new_table_pa = 0;
2291 u32 new_mode = VMXNET3_RXM_UCAST;
2292
2293 if (netdev->flags & IFF_PROMISC) {
2294 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2295 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2296
2297 new_mode |= VMXNET3_RXM_PROMISC;
2298 } else {
2299 vmxnet3_restore_vlan(adapter);
2300 }
2301
2302 if (netdev->flags & IFF_BROADCAST)
2303 new_mode |= VMXNET3_RXM_BCAST;
2304
2305 if (netdev->flags & IFF_ALLMULTI)
2306 new_mode |= VMXNET3_RXM_ALL_MULTI;
2307 else
2308 if (!netdev_mc_empty(netdev)) {
2309 new_table = vmxnet3_copy_mc(netdev);
2310 if (new_table) {
2311 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2312
2313 rxConf->mfTableLen = cpu_to_le16(sz);
2314 new_table_pa = dma_map_single(
2315 &adapter->pdev->dev,
2316 new_table,
2317 sz,
2318 PCI_DMA_TODEVICE);
2319 }
2320
2321 if (!dma_mapping_error(&adapter->pdev->dev,
2322 new_table_pa)) {
2323 new_mode |= VMXNET3_RXM_MCAST;
2324 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2325 } else {
2326 netdev_info(netdev,
2327 "failed to copy mcast list, setting ALL_MULTI\n");
2328 new_mode |= VMXNET3_RXM_ALL_MULTI;
2329 }
2330 }
2331
2332 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2333 rxConf->mfTableLen = 0;
2334 rxConf->mfTablePA = 0;
2335 }
2336
2337 spin_lock_irqsave(&adapter->cmd_lock, flags);
2338 if (new_mode != rxConf->rxMode) {
2339 rxConf->rxMode = cpu_to_le32(new_mode);
2340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2341 VMXNET3_CMD_UPDATE_RX_MODE);
2342 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2343 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2344 }
2345
2346 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2347 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2348 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2349
2350 if (new_table_pa)
2351 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2352 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2353 kfree(new_table);
2354 }
2355
2356 void
2357 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2358 {
2359 int i;
2360
2361 for (i = 0; i < adapter->num_rx_queues; i++)
2362 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2363 }
2364
2365
2366 /*
2367 * Set up driver_shared based on settings in adapter.
2368 */
2369
2370 static void
2371 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2372 {
2373 struct Vmxnet3_DriverShared *shared = adapter->shared;
2374 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2375 struct Vmxnet3_TxQueueConf *tqc;
2376 struct Vmxnet3_RxQueueConf *rqc;
2377 int i;
2378
2379 memset(shared, 0, sizeof(*shared));
2380
2381 /* driver settings */
2382 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2383 devRead->misc.driverInfo.version = cpu_to_le32(
2384 VMXNET3_DRIVER_VERSION_NUM);
2385 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2386 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2387 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2388 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2389 *((u32 *)&devRead->misc.driverInfo.gos));
2390 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2391 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2392
2393 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2394 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2395
2396 /* set up feature flags */
2397 if (adapter->netdev->features & NETIF_F_RXCSUM)
2398 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2399
2400 if (adapter->netdev->features & NETIF_F_LRO) {
2401 devRead->misc.uptFeatures |= UPT1_F_LRO;
2402 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2403 }
2404 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2405 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2406
2407 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2408 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2409 devRead->misc.queueDescLen = cpu_to_le32(
2410 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2411 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2412
2413 /* tx queue settings */
2414 devRead->misc.numTxQueues = adapter->num_tx_queues;
2415 for (i = 0; i < adapter->num_tx_queues; i++) {
2416 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2417 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2418 tqc = &adapter->tqd_start[i].conf;
2419 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2420 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2421 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2422 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2423 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2424 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2425 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2426 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2427 tqc->ddLen = cpu_to_le32(
2428 sizeof(struct vmxnet3_tx_buf_info) *
2429 tqc->txRingSize);
2430 tqc->intrIdx = tq->comp_ring.intr_idx;
2431 }
2432
2433 /* rx queue settings */
2434 devRead->misc.numRxQueues = adapter->num_rx_queues;
2435 for (i = 0; i < adapter->num_rx_queues; i++) {
2436 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2437 rqc = &adapter->rqd_start[i].conf;
2438 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2439 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2440 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2441 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2442 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2443 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2444 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2445 rqc->ddLen = cpu_to_le32(
2446 sizeof(struct vmxnet3_rx_buf_info) *
2447 (rqc->rxRingSize[0] +
2448 rqc->rxRingSize[1]));
2449 rqc->intrIdx = rq->comp_ring.intr_idx;
2450 if (VMXNET3_VERSION_GE_3(adapter)) {
2451 rqc->rxDataRingBasePA =
2452 cpu_to_le64(rq->data_ring.basePA);
2453 rqc->rxDataRingDescSize =
2454 cpu_to_le16(rq->data_ring.desc_size);
2455 }
2456 }
2457
2458 #ifdef VMXNET3_RSS
2459 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2460
2461 if (adapter->rss) {
2462 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2463
2464 devRead->misc.uptFeatures |= UPT1_F_RSS;
2465 devRead->misc.numRxQueues = adapter->num_rx_queues;
2466 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2467 UPT1_RSS_HASH_TYPE_IPV4 |
2468 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2469 UPT1_RSS_HASH_TYPE_IPV6;
2470 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2471 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2472 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2473 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2474
2475 for (i = 0; i < rssConf->indTableSize; i++)
2476 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2477 i, adapter->num_rx_queues);
2478
2479 devRead->rssConfDesc.confVer = 1;
2480 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2481 devRead->rssConfDesc.confPA =
2482 cpu_to_le64(adapter->rss_conf_pa);
2483 }
2484
2485 #endif /* VMXNET3_RSS */
2486
2487 /* intr settings */
2488 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2489 VMXNET3_IMM_AUTO;
2490 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2491 for (i = 0; i < adapter->intr.num_intrs; i++)
2492 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2493
2494 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2495 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2496
2497 /* rx filter settings */
2498 devRead->rxFilterConf.rxMode = 0;
2499 vmxnet3_restore_vlan(adapter);
2500 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2501
2502 /* the rest are already zeroed */
2503 }
2504
2505 static void
2506 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2507 {
2508 struct Vmxnet3_DriverShared *shared = adapter->shared;
2509 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2510 unsigned long flags;
2511
2512 if (!VMXNET3_VERSION_GE_3(adapter))
2513 return;
2514
2515 spin_lock_irqsave(&adapter->cmd_lock, flags);
2516 cmdInfo->varConf.confVer = 1;
2517 cmdInfo->varConf.confLen =
2518 cpu_to_le32(sizeof(*adapter->coal_conf));
2519 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2520
2521 if (adapter->default_coal_mode) {
2522 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2523 VMXNET3_CMD_GET_COALESCE);
2524 } else {
2525 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2526 VMXNET3_CMD_SET_COALESCE);
2527 }
2528
2529 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2530 }
2531
2532 int
2533 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2534 {
2535 int err, i;
2536 u32 ret;
2537 unsigned long flags;
2538
2539 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2540 " ring sizes %u %u %u\n", adapter->netdev->name,
2541 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2542 adapter->tx_queue[0].tx_ring.size,
2543 adapter->rx_queue[0].rx_ring[0].size,
2544 adapter->rx_queue[0].rx_ring[1].size);
2545
2546 vmxnet3_tq_init_all(adapter);
2547 err = vmxnet3_rq_init_all(adapter);
2548 if (err) {
2549 netdev_err(adapter->netdev,
2550 "Failed to init rx queue error %d\n", err);
2551 goto rq_err;
2552 }
2553
2554 err = vmxnet3_request_irqs(adapter);
2555 if (err) {
2556 netdev_err(adapter->netdev,
2557 "Failed to setup irq for error %d\n", err);
2558 goto irq_err;
2559 }
2560
2561 vmxnet3_setup_driver_shared(adapter);
2562
2563 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2564 adapter->shared_pa));
2565 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2566 adapter->shared_pa));
2567 spin_lock_irqsave(&adapter->cmd_lock, flags);
2568 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2569 VMXNET3_CMD_ACTIVATE_DEV);
2570 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2571 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2572
2573 if (ret != 0) {
2574 netdev_err(adapter->netdev,
2575 "Failed to activate dev: error %u\n", ret);
2576 err = -EINVAL;
2577 goto activate_err;
2578 }
2579
2580 vmxnet3_init_coalesce(adapter);
2581
2582 for (i = 0; i < adapter->num_rx_queues; i++) {
2583 VMXNET3_WRITE_BAR0_REG(adapter,
2584 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2585 adapter->rx_queue[i].rx_ring[0].next2fill);
2586 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2587 (i * VMXNET3_REG_ALIGN)),
2588 adapter->rx_queue[i].rx_ring[1].next2fill);
2589 }
2590
2591 /* Apply the rx filter settins last. */
2592 vmxnet3_set_mc(adapter->netdev);
2593
2594 /*
2595 * Check link state when first activating device. It will start the
2596 * tx queue if the link is up.
2597 */
2598 vmxnet3_check_link(adapter, true);
2599 for (i = 0; i < adapter->num_rx_queues; i++)
2600 napi_enable(&adapter->rx_queue[i].napi);
2601 vmxnet3_enable_all_intrs(adapter);
2602 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2603 return 0;
2604
2605 activate_err:
2606 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2607 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2608 vmxnet3_free_irqs(adapter);
2609 irq_err:
2610 rq_err:
2611 /* free up buffers we allocated */
2612 vmxnet3_rq_cleanup_all(adapter);
2613 return err;
2614 }
2615
2616
2617 void
2618 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2619 {
2620 unsigned long flags;
2621 spin_lock_irqsave(&adapter->cmd_lock, flags);
2622 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2623 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2624 }
2625
2626
2627 int
2628 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2629 {
2630 int i;
2631 unsigned long flags;
2632 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2633 return 0;
2634
2635
2636 spin_lock_irqsave(&adapter->cmd_lock, flags);
2637 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2638 VMXNET3_CMD_QUIESCE_DEV);
2639 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2640 vmxnet3_disable_all_intrs(adapter);
2641
2642 for (i = 0; i < adapter->num_rx_queues; i++)
2643 napi_disable(&adapter->rx_queue[i].napi);
2644 netif_tx_disable(adapter->netdev);
2645 adapter->link_speed = 0;
2646 netif_carrier_off(adapter->netdev);
2647
2648 vmxnet3_tq_cleanup_all(adapter);
2649 vmxnet3_rq_cleanup_all(adapter);
2650 vmxnet3_free_irqs(adapter);
2651 return 0;
2652 }
2653
2654
2655 static void
2656 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2657 {
2658 u32 tmp;
2659
2660 tmp = *(u32 *)mac;
2661 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2662
2663 tmp = (mac[5] << 8) | mac[4];
2664 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2665 }
2666
2667
2668 static int
2669 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2670 {
2671 struct sockaddr *addr = p;
2672 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2673
2674 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2675 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2676
2677 return 0;
2678 }
2679
2680
2681 /* ==================== initialization and cleanup routines ============ */
2682
2683 static int
2684 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2685 {
2686 int err;
2687 unsigned long mmio_start, mmio_len;
2688 struct pci_dev *pdev = adapter->pdev;
2689
2690 err = pci_enable_device(pdev);
2691 if (err) {
2692 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2693 return err;
2694 }
2695
2696 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2697 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2698 dev_err(&pdev->dev,
2699 "pci_set_consistent_dma_mask failed\n");
2700 err = -EIO;
2701 goto err_set_mask;
2702 }
2703 *dma64 = true;
2704 } else {
2705 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2706 dev_err(&pdev->dev,
2707 "pci_set_dma_mask failed\n");
2708 err = -EIO;
2709 goto err_set_mask;
2710 }
2711 *dma64 = false;
2712 }
2713
2714 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2715 vmxnet3_driver_name);
2716 if (err) {
2717 dev_err(&pdev->dev,
2718 "Failed to request region for adapter: error %d\n", err);
2719 goto err_set_mask;
2720 }
2721
2722 pci_set_master(pdev);
2723
2724 mmio_start = pci_resource_start(pdev, 0);
2725 mmio_len = pci_resource_len(pdev, 0);
2726 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2727 if (!adapter->hw_addr0) {
2728 dev_err(&pdev->dev, "Failed to map bar0\n");
2729 err = -EIO;
2730 goto err_ioremap;
2731 }
2732
2733 mmio_start = pci_resource_start(pdev, 1);
2734 mmio_len = pci_resource_len(pdev, 1);
2735 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2736 if (!adapter->hw_addr1) {
2737 dev_err(&pdev->dev, "Failed to map bar1\n");
2738 err = -EIO;
2739 goto err_bar1;
2740 }
2741 return 0;
2742
2743 err_bar1:
2744 iounmap(adapter->hw_addr0);
2745 err_ioremap:
2746 pci_release_selected_regions(pdev, (1 << 2) - 1);
2747 err_set_mask:
2748 pci_disable_device(pdev);
2749 return err;
2750 }
2751
2752
2753 static void
2754 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2755 {
2756 BUG_ON(!adapter->pdev);
2757
2758 iounmap(adapter->hw_addr0);
2759 iounmap(adapter->hw_addr1);
2760 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2761 pci_disable_device(adapter->pdev);
2762 }
2763
2764
2765 static void
2766 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2767 {
2768 size_t sz, i, ring0_size, ring1_size, comp_size;
2769 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2770
2771
2772 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2773 VMXNET3_MAX_ETH_HDR_SIZE) {
2774 adapter->skb_buf_size = adapter->netdev->mtu +
2775 VMXNET3_MAX_ETH_HDR_SIZE;
2776 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2777 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2778
2779 adapter->rx_buf_per_pkt = 1;
2780 } else {
2781 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2782 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2783 VMXNET3_MAX_ETH_HDR_SIZE;
2784 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2785 }
2786
2787 /*
2788 * for simplicity, force the ring0 size to be a multiple of
2789 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2790 */
2791 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2792 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2793 ring0_size = (ring0_size + sz - 1) / sz * sz;
2794 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2795 sz * sz);
2796 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2797 ring1_size = (ring1_size + sz - 1) / sz * sz;
2798 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2799 sz * sz);
2800 comp_size = ring0_size + ring1_size;
2801
2802 for (i = 0; i < adapter->num_rx_queues; i++) {
2803 rq = &adapter->rx_queue[i];
2804 rq->rx_ring[0].size = ring0_size;
2805 rq->rx_ring[1].size = ring1_size;
2806 rq->comp_ring.size = comp_size;
2807 }
2808 }
2809
2810
2811 int
2812 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2813 u32 rx_ring_size, u32 rx_ring2_size,
2814 u16 txdata_desc_size, u16 rxdata_desc_size)
2815 {
2816 int err = 0, i;
2817
2818 for (i = 0; i < adapter->num_tx_queues; i++) {
2819 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2820 tq->tx_ring.size = tx_ring_size;
2821 tq->data_ring.size = tx_ring_size;
2822 tq->comp_ring.size = tx_ring_size;
2823 tq->txdata_desc_size = txdata_desc_size;
2824 tq->shared = &adapter->tqd_start[i].ctrl;
2825 tq->stopped = true;
2826 tq->adapter = adapter;
2827 tq->qid = i;
2828 err = vmxnet3_tq_create(tq, adapter);
2829 /*
2830 * Too late to change num_tx_queues. We cannot do away with
2831 * lesser number of queues than what we asked for
2832 */
2833 if (err)
2834 goto queue_err;
2835 }
2836
2837 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2838 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2839 vmxnet3_adjust_rx_ring_size(adapter);
2840
2841 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2842 for (i = 0; i < adapter->num_rx_queues; i++) {
2843 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2844 /* qid and qid2 for rx queues will be assigned later when num
2845 * of rx queues is finalized after allocating intrs */
2846 rq->shared = &adapter->rqd_start[i].ctrl;
2847 rq->adapter = adapter;
2848 rq->data_ring.desc_size = rxdata_desc_size;
2849 err = vmxnet3_rq_create(rq, adapter);
2850 if (err) {
2851 if (i == 0) {
2852 netdev_err(adapter->netdev,
2853 "Could not allocate any rx queues. "
2854 "Aborting.\n");
2855 goto queue_err;
2856 } else {
2857 netdev_info(adapter->netdev,
2858 "Number of rx queues changed "
2859 "to : %d.\n", i);
2860 adapter->num_rx_queues = i;
2861 err = 0;
2862 break;
2863 }
2864 }
2865 }
2866
2867 if (!adapter->rxdataring_enabled)
2868 vmxnet3_rq_destroy_all_rxdataring(adapter);
2869
2870 return err;
2871 queue_err:
2872 vmxnet3_tq_destroy_all(adapter);
2873 return err;
2874 }
2875
2876 static int
2877 vmxnet3_open(struct net_device *netdev)
2878 {
2879 struct vmxnet3_adapter *adapter;
2880 int err, i;
2881
2882 adapter = netdev_priv(netdev);
2883
2884 for (i = 0; i < adapter->num_tx_queues; i++)
2885 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2886
2887 if (VMXNET3_VERSION_GE_3(adapter)) {
2888 unsigned long flags;
2889 u16 txdata_desc_size;
2890
2891 spin_lock_irqsave(&adapter->cmd_lock, flags);
2892 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2893 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2894 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2895 VMXNET3_REG_CMD);
2896 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2897
2898 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2899 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2900 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2901 adapter->txdata_desc_size =
2902 sizeof(struct Vmxnet3_TxDataDesc);
2903 } else {
2904 adapter->txdata_desc_size = txdata_desc_size;
2905 }
2906 } else {
2907 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2908 }
2909
2910 err = vmxnet3_create_queues(adapter,
2911 adapter->tx_ring_size,
2912 adapter->rx_ring_size,
2913 adapter->rx_ring2_size,
2914 adapter->txdata_desc_size,
2915 adapter->rxdata_desc_size);
2916 if (err)
2917 goto queue_err;
2918
2919 err = vmxnet3_activate_dev(adapter);
2920 if (err)
2921 goto activate_err;
2922
2923 return 0;
2924
2925 activate_err:
2926 vmxnet3_rq_destroy_all(adapter);
2927 vmxnet3_tq_destroy_all(adapter);
2928 queue_err:
2929 return err;
2930 }
2931
2932
2933 static int
2934 vmxnet3_close(struct net_device *netdev)
2935 {
2936 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2937
2938 /*
2939 * Reset_work may be in the middle of resetting the device, wait for its
2940 * completion.
2941 */
2942 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2943 msleep(1);
2944
2945 vmxnet3_quiesce_dev(adapter);
2946
2947 vmxnet3_rq_destroy_all(adapter);
2948 vmxnet3_tq_destroy_all(adapter);
2949
2950 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2951
2952
2953 return 0;
2954 }
2955
2956
2957 void
2958 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2959 {
2960 int i;
2961
2962 /*
2963 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2964 * vmxnet3_close() will deadlock.
2965 */
2966 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2967
2968 /* we need to enable NAPI, otherwise dev_close will deadlock */
2969 for (i = 0; i < adapter->num_rx_queues; i++)
2970 napi_enable(&adapter->rx_queue[i].napi);
2971 dev_close(adapter->netdev);
2972 }
2973
2974
2975 static int
2976 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2977 {
2978 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2979 int err = 0;
2980
2981 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2982 return -EINVAL;
2983
2984 netdev->mtu = new_mtu;
2985
2986 /*
2987 * Reset_work may be in the middle of resetting the device, wait for its
2988 * completion.
2989 */
2990 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2991 msleep(1);
2992
2993 if (netif_running(netdev)) {
2994 vmxnet3_quiesce_dev(adapter);
2995 vmxnet3_reset_dev(adapter);
2996
2997 /* we need to re-create the rx queue based on the new mtu */
2998 vmxnet3_rq_destroy_all(adapter);
2999 vmxnet3_adjust_rx_ring_size(adapter);
3000 err = vmxnet3_rq_create_all(adapter);
3001 if (err) {
3002 netdev_err(netdev,
3003 "failed to re-create rx queues, "
3004 " error %d. Closing it.\n", err);
3005 goto out;
3006 }
3007
3008 err = vmxnet3_activate_dev(adapter);
3009 if (err) {
3010 netdev_err(netdev,
3011 "failed to re-activate, error %d. "
3012 "Closing it\n", err);
3013 goto out;
3014 }
3015 }
3016
3017 out:
3018 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3019 if (err)
3020 vmxnet3_force_close(adapter);
3021
3022 return err;
3023 }
3024
3025
3026 static void
3027 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3028 {
3029 struct net_device *netdev = adapter->netdev;
3030
3031 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3032 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3033 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3034 NETIF_F_LRO;
3035 if (dma64)
3036 netdev->hw_features |= NETIF_F_HIGHDMA;
3037 netdev->vlan_features = netdev->hw_features &
3038 ~(NETIF_F_HW_VLAN_CTAG_TX |
3039 NETIF_F_HW_VLAN_CTAG_RX);
3040 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3041 }
3042
3043
3044 static void
3045 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3046 {
3047 u32 tmp;
3048
3049 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3050 *(u32 *)mac = tmp;
3051
3052 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3053 mac[4] = tmp & 0xff;
3054 mac[5] = (tmp >> 8) & 0xff;
3055 }
3056
3057 #ifdef CONFIG_PCI_MSI
3058
3059 /*
3060 * Enable MSIx vectors.
3061 * Returns :
3062 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3063 * were enabled.
3064 * number of vectors which were enabled otherwise (this number is greater
3065 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3066 */
3067
3068 static int
3069 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3070 {
3071 int ret = pci_enable_msix_range(adapter->pdev,
3072 adapter->intr.msix_entries, nvec, nvec);
3073
3074 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3075 dev_err(&adapter->netdev->dev,
3076 "Failed to enable %d MSI-X, trying %d\n",
3077 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3078
3079 ret = pci_enable_msix_range(adapter->pdev,
3080 adapter->intr.msix_entries,
3081 VMXNET3_LINUX_MIN_MSIX_VECT,
3082 VMXNET3_LINUX_MIN_MSIX_VECT);
3083 }
3084
3085 if (ret < 0) {
3086 dev_err(&adapter->netdev->dev,
3087 "Failed to enable MSI-X, error: %d\n", ret);
3088 }
3089
3090 return ret;
3091 }
3092
3093
3094 #endif /* CONFIG_PCI_MSI */
3095
3096 static void
3097 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3098 {
3099 u32 cfg;
3100 unsigned long flags;
3101
3102 /* intr settings */
3103 spin_lock_irqsave(&adapter->cmd_lock, flags);
3104 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3105 VMXNET3_CMD_GET_CONF_INTR);
3106 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3107 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3108 adapter->intr.type = cfg & 0x3;
3109 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3110
3111 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3112 adapter->intr.type = VMXNET3_IT_MSIX;
3113 }
3114
3115 #ifdef CONFIG_PCI_MSI
3116 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3117 int i, nvec;
3118
3119 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3120 1 : adapter->num_tx_queues;
3121 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3122 0 : adapter->num_rx_queues;
3123 nvec += 1; /* for link event */
3124 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3125 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3126
3127 for (i = 0; i < nvec; i++)
3128 adapter->intr.msix_entries[i].entry = i;
3129
3130 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3131 if (nvec < 0)
3132 goto msix_err;
3133
3134 /* If we cannot allocate one MSIx vector per queue
3135 * then limit the number of rx queues to 1
3136 */
3137 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
3138 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3139 || adapter->num_rx_queues != 1) {
3140 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3141 netdev_err(adapter->netdev,
3142 "Number of rx queues : 1\n");
3143 adapter->num_rx_queues = 1;
3144 }
3145 }
3146
3147 adapter->intr.num_intrs = nvec;
3148 return;
3149
3150 msix_err:
3151 /* If we cannot allocate MSIx vectors use only one rx queue */
3152 dev_info(&adapter->pdev->dev,
3153 "Failed to enable MSI-X, error %d. "
3154 "Limiting #rx queues to 1, try MSI.\n", nvec);
3155
3156 adapter->intr.type = VMXNET3_IT_MSI;
3157 }
3158
3159 if (adapter->intr.type == VMXNET3_IT_MSI) {
3160 if (!pci_enable_msi(adapter->pdev)) {
3161 adapter->num_rx_queues = 1;
3162 adapter->intr.num_intrs = 1;
3163 return;
3164 }
3165 }
3166 #endif /* CONFIG_PCI_MSI */
3167
3168 adapter->num_rx_queues = 1;
3169 dev_info(&adapter->netdev->dev,
3170 "Using INTx interrupt, #Rx queues: 1.\n");
3171 adapter->intr.type = VMXNET3_IT_INTX;
3172
3173 /* INT-X related setting */
3174 adapter->intr.num_intrs = 1;
3175 }
3176
3177
3178 static void
3179 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3180 {
3181 if (adapter->intr.type == VMXNET3_IT_MSIX)
3182 pci_disable_msix(adapter->pdev);
3183 else if (adapter->intr.type == VMXNET3_IT_MSI)
3184 pci_disable_msi(adapter->pdev);
3185 else
3186 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3187 }
3188
3189
3190 static void
3191 vmxnet3_tx_timeout(struct net_device *netdev)
3192 {
3193 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3194 adapter->tx_timeout_count++;
3195
3196 netdev_err(adapter->netdev, "tx hang\n");
3197 schedule_work(&adapter->work);
3198 netif_wake_queue(adapter->netdev);
3199 }
3200
3201
3202 static void
3203 vmxnet3_reset_work(struct work_struct *data)
3204 {
3205 struct vmxnet3_adapter *adapter;
3206
3207 adapter = container_of(data, struct vmxnet3_adapter, work);
3208
3209 /* if another thread is resetting the device, no need to proceed */
3210 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3211 return;
3212
3213 /* if the device is closed, we must leave it alone */
3214 rtnl_lock();
3215 if (netif_running(adapter->netdev)) {
3216 netdev_notice(adapter->netdev, "resetting\n");
3217 vmxnet3_quiesce_dev(adapter);
3218 vmxnet3_reset_dev(adapter);
3219 vmxnet3_activate_dev(adapter);
3220 } else {
3221 netdev_info(adapter->netdev, "already closed\n");
3222 }
3223 rtnl_unlock();
3224
3225 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3226 }
3227
3228
3229 static int
3230 vmxnet3_probe_device(struct pci_dev *pdev,
3231 const struct pci_device_id *id)
3232 {
3233 static const struct net_device_ops vmxnet3_netdev_ops = {
3234 .ndo_open = vmxnet3_open,
3235 .ndo_stop = vmxnet3_close,
3236 .ndo_start_xmit = vmxnet3_xmit_frame,
3237 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3238 .ndo_change_mtu = vmxnet3_change_mtu,
3239 .ndo_set_features = vmxnet3_set_features,
3240 .ndo_get_stats64 = vmxnet3_get_stats64,
3241 .ndo_tx_timeout = vmxnet3_tx_timeout,
3242 .ndo_set_rx_mode = vmxnet3_set_mc,
3243 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3244 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3245 #ifdef CONFIG_NET_POLL_CONTROLLER
3246 .ndo_poll_controller = vmxnet3_netpoll,
3247 #endif
3248 };
3249 int err;
3250 bool dma64 = false; /* stupid gcc */
3251 u32 ver;
3252 struct net_device *netdev;
3253 struct vmxnet3_adapter *adapter;
3254 u8 mac[ETH_ALEN];
3255 int size;
3256 int num_tx_queues;
3257 int num_rx_queues;
3258
3259 if (!pci_msi_enabled())
3260 enable_mq = 0;
3261
3262 #ifdef VMXNET3_RSS
3263 if (enable_mq)
3264 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3265 (int)num_online_cpus());
3266 else
3267 #endif
3268 num_rx_queues = 1;
3269 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3270
3271 if (enable_mq)
3272 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3273 (int)num_online_cpus());
3274 else
3275 num_tx_queues = 1;
3276
3277 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3278 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3279 max(num_tx_queues, num_rx_queues));
3280 dev_info(&pdev->dev,
3281 "# of Tx queues : %d, # of Rx queues : %d\n",
3282 num_tx_queues, num_rx_queues);
3283
3284 if (!netdev)
3285 return -ENOMEM;
3286
3287 pci_set_drvdata(pdev, netdev);
3288 adapter = netdev_priv(netdev);
3289 adapter->netdev = netdev;
3290 adapter->pdev = pdev;
3291
3292 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3293 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3294 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3295
3296 spin_lock_init(&adapter->cmd_lock);
3297 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3298 sizeof(struct vmxnet3_adapter),
3299 PCI_DMA_TODEVICE);
3300 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3301 dev_err(&pdev->dev, "Failed to map dma\n");
3302 err = -EFAULT;
3303 goto err_dma_map;
3304 }
3305 adapter->shared = dma_alloc_coherent(
3306 &adapter->pdev->dev,
3307 sizeof(struct Vmxnet3_DriverShared),
3308 &adapter->shared_pa, GFP_KERNEL);
3309 if (!adapter->shared) {
3310 dev_err(&pdev->dev, "Failed to allocate memory\n");
3311 err = -ENOMEM;
3312 goto err_alloc_shared;
3313 }
3314
3315 adapter->num_rx_queues = num_rx_queues;
3316 adapter->num_tx_queues = num_tx_queues;
3317 adapter->rx_buf_per_pkt = 1;
3318
3319 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3320 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3321 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3322 &adapter->queue_desc_pa,
3323 GFP_KERNEL);
3324
3325 if (!adapter->tqd_start) {
3326 dev_err(&pdev->dev, "Failed to allocate memory\n");
3327 err = -ENOMEM;
3328 goto err_alloc_queue_desc;
3329 }
3330 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3331 adapter->num_tx_queues);
3332
3333 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3334 sizeof(struct Vmxnet3_PMConf),
3335 &adapter->pm_conf_pa,
3336 GFP_KERNEL);
3337 if (adapter->pm_conf == NULL) {
3338 err = -ENOMEM;
3339 goto err_alloc_pm;
3340 }
3341
3342 #ifdef VMXNET3_RSS
3343
3344 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3345 sizeof(struct UPT1_RSSConf),
3346 &adapter->rss_conf_pa,
3347 GFP_KERNEL);
3348 if (adapter->rss_conf == NULL) {
3349 err = -ENOMEM;
3350 goto err_alloc_rss;
3351 }
3352 #endif /* VMXNET3_RSS */
3353
3354 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3355 if (err < 0)
3356 goto err_alloc_pci;
3357
3358 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3359 if (ver & (1 << VMXNET3_REV_3)) {
3360 VMXNET3_WRITE_BAR1_REG(adapter,
3361 VMXNET3_REG_VRRS,
3362 1 << VMXNET3_REV_3);
3363 adapter->version = VMXNET3_REV_3 + 1;
3364 } else if (ver & (1 << VMXNET3_REV_2)) {
3365 VMXNET3_WRITE_BAR1_REG(adapter,
3366 VMXNET3_REG_VRRS,
3367 1 << VMXNET3_REV_2);
3368 adapter->version = VMXNET3_REV_2 + 1;
3369 } else if (ver & (1 << VMXNET3_REV_1)) {
3370 VMXNET3_WRITE_BAR1_REG(adapter,
3371 VMXNET3_REG_VRRS,
3372 1 << VMXNET3_REV_1);
3373 adapter->version = VMXNET3_REV_1 + 1;
3374 } else {
3375 dev_err(&pdev->dev,
3376 "Incompatible h/w version (0x%x) for adapter\n", ver);
3377 err = -EBUSY;
3378 goto err_ver;
3379 }
3380 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3381
3382 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3383 if (ver & 1) {
3384 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3385 } else {
3386 dev_err(&pdev->dev,
3387 "Incompatible upt version (0x%x) for adapter\n", ver);
3388 err = -EBUSY;
3389 goto err_ver;
3390 }
3391
3392 if (VMXNET3_VERSION_GE_3(adapter)) {
3393 adapter->coal_conf =
3394 dma_alloc_coherent(&adapter->pdev->dev,
3395 sizeof(struct Vmxnet3_CoalesceScheme)
3396 ,
3397 &adapter->coal_conf_pa,
3398 GFP_KERNEL);
3399 if (!adapter->coal_conf) {
3400 err = -ENOMEM;
3401 goto err_ver;
3402 }
3403 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
3404 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3405 adapter->default_coal_mode = true;
3406 }
3407
3408 SET_NETDEV_DEV(netdev, &pdev->dev);
3409 vmxnet3_declare_features(adapter, dma64);
3410
3411 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3412 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3413
3414 if (adapter->num_tx_queues == adapter->num_rx_queues)
3415 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3416 else
3417 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3418
3419 vmxnet3_alloc_intr_resources(adapter);
3420
3421 #ifdef VMXNET3_RSS
3422 if (adapter->num_rx_queues > 1 &&
3423 adapter->intr.type == VMXNET3_IT_MSIX) {
3424 adapter->rss = true;
3425 netdev->hw_features |= NETIF_F_RXHASH;
3426 netdev->features |= NETIF_F_RXHASH;
3427 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3428 } else {
3429 adapter->rss = false;
3430 }
3431 #endif
3432
3433 vmxnet3_read_mac_addr(adapter, mac);
3434 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3435
3436 netdev->netdev_ops = &vmxnet3_netdev_ops;
3437 vmxnet3_set_ethtool_ops(netdev);
3438 netdev->watchdog_timeo = 5 * HZ;
3439
3440 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3441 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3442
3443 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3444 int i;
3445 for (i = 0; i < adapter->num_rx_queues; i++) {
3446 netif_napi_add(adapter->netdev,
3447 &adapter->rx_queue[i].napi,
3448 vmxnet3_poll_rx_only, 64);
3449 }
3450 } else {
3451 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3452 vmxnet3_poll, 64);
3453 }
3454
3455 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3456 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3457
3458 netif_carrier_off(netdev);
3459 err = register_netdev(netdev);
3460
3461 if (err) {
3462 dev_err(&pdev->dev, "Failed to register adapter\n");
3463 goto err_register;
3464 }
3465
3466 vmxnet3_check_link(adapter, false);
3467 return 0;
3468
3469 err_register:
3470 if (VMXNET3_VERSION_GE_3(adapter)) {
3471 dma_free_coherent(&adapter->pdev->dev,
3472 sizeof(struct Vmxnet3_CoalesceScheme),
3473 adapter->coal_conf, adapter->coal_conf_pa);
3474 }
3475 vmxnet3_free_intr_resources(adapter);
3476 err_ver:
3477 vmxnet3_free_pci_resources(adapter);
3478 err_alloc_pci:
3479 #ifdef VMXNET3_RSS
3480 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3481 adapter->rss_conf, adapter->rss_conf_pa);
3482 err_alloc_rss:
3483 #endif
3484 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3485 adapter->pm_conf, adapter->pm_conf_pa);
3486 err_alloc_pm:
3487 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3488 adapter->queue_desc_pa);
3489 err_alloc_queue_desc:
3490 dma_free_coherent(&adapter->pdev->dev,
3491 sizeof(struct Vmxnet3_DriverShared),
3492 adapter->shared, adapter->shared_pa);
3493 err_alloc_shared:
3494 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3495 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3496 err_dma_map:
3497 free_netdev(netdev);
3498 return err;
3499 }
3500
3501
3502 static void
3503 vmxnet3_remove_device(struct pci_dev *pdev)
3504 {
3505 struct net_device *netdev = pci_get_drvdata(pdev);
3506 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3507 int size = 0;
3508 int num_rx_queues;
3509
3510 #ifdef VMXNET3_RSS
3511 if (enable_mq)
3512 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3513 (int)num_online_cpus());
3514 else
3515 #endif
3516 num_rx_queues = 1;
3517 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3518
3519 cancel_work_sync(&adapter->work);
3520
3521 unregister_netdev(netdev);
3522
3523 vmxnet3_free_intr_resources(adapter);
3524 vmxnet3_free_pci_resources(adapter);
3525 if (VMXNET3_VERSION_GE_3(adapter)) {
3526 dma_free_coherent(&adapter->pdev->dev,
3527 sizeof(struct Vmxnet3_CoalesceScheme),
3528 adapter->coal_conf, adapter->coal_conf_pa);
3529 }
3530 #ifdef VMXNET3_RSS
3531 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3532 adapter->rss_conf, adapter->rss_conf_pa);
3533 #endif
3534 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3535 adapter->pm_conf, adapter->pm_conf_pa);
3536
3537 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3538 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3539 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3540 adapter->queue_desc_pa);
3541 dma_free_coherent(&adapter->pdev->dev,
3542 sizeof(struct Vmxnet3_DriverShared),
3543 adapter->shared, adapter->shared_pa);
3544 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3545 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3546 free_netdev(netdev);
3547 }
3548
3549 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3550 {
3551 struct net_device *netdev = pci_get_drvdata(pdev);
3552 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3553 unsigned long flags;
3554
3555 /* Reset_work may be in the middle of resetting the device, wait for its
3556 * completion.
3557 */
3558 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3559 msleep(1);
3560
3561 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3562 &adapter->state)) {
3563 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3564 return;
3565 }
3566 spin_lock_irqsave(&adapter->cmd_lock, flags);
3567 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3568 VMXNET3_CMD_QUIESCE_DEV);
3569 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3570 vmxnet3_disable_all_intrs(adapter);
3571
3572 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3573 }
3574
3575
3576 #ifdef CONFIG_PM
3577
3578 static int
3579 vmxnet3_suspend(struct device *device)
3580 {
3581 struct pci_dev *pdev = to_pci_dev(device);
3582 struct net_device *netdev = pci_get_drvdata(pdev);
3583 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3584 struct Vmxnet3_PMConf *pmConf;
3585 struct ethhdr *ehdr;
3586 struct arphdr *ahdr;
3587 u8 *arpreq;
3588 struct in_device *in_dev;
3589 struct in_ifaddr *ifa;
3590 unsigned long flags;
3591 int i = 0;
3592
3593 if (!netif_running(netdev))
3594 return 0;
3595
3596 for (i = 0; i < adapter->num_rx_queues; i++)
3597 napi_disable(&adapter->rx_queue[i].napi);
3598
3599 vmxnet3_disable_all_intrs(adapter);
3600 vmxnet3_free_irqs(adapter);
3601 vmxnet3_free_intr_resources(adapter);
3602
3603 netif_device_detach(netdev);
3604 netif_tx_stop_all_queues(netdev);
3605
3606 /* Create wake-up filters. */
3607 pmConf = adapter->pm_conf;
3608 memset(pmConf, 0, sizeof(*pmConf));
3609
3610 if (adapter->wol & WAKE_UCAST) {
3611 pmConf->filters[i].patternSize = ETH_ALEN;
3612 pmConf->filters[i].maskSize = 1;
3613 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3614 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3615
3616 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3617 i++;
3618 }
3619
3620 if (adapter->wol & WAKE_ARP) {
3621 in_dev = in_dev_get(netdev);
3622 if (!in_dev)
3623 goto skip_arp;
3624
3625 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3626 if (!ifa)
3627 goto skip_arp;
3628
3629 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3630 sizeof(struct arphdr) + /* ARP header */
3631 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3632 2 * sizeof(u32); /*2 IPv4 addresses */
3633 pmConf->filters[i].maskSize =
3634 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3635
3636 /* ETH_P_ARP in Ethernet header. */
3637 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3638 ehdr->h_proto = htons(ETH_P_ARP);
3639
3640 /* ARPOP_REQUEST in ARP header. */
3641 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3642 ahdr->ar_op = htons(ARPOP_REQUEST);
3643 arpreq = (u8 *)(ahdr + 1);
3644
3645 /* The Unicast IPv4 address in 'tip' field. */
3646 arpreq += 2 * ETH_ALEN + sizeof(u32);
3647 *(u32 *)arpreq = ifa->ifa_address;
3648
3649 /* The mask for the relevant bits. */
3650 pmConf->filters[i].mask[0] = 0x00;
3651 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3652 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3653 pmConf->filters[i].mask[3] = 0x00;
3654 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3655 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3656 in_dev_put(in_dev);
3657
3658 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3659 i++;
3660 }
3661
3662 skip_arp:
3663 if (adapter->wol & WAKE_MAGIC)
3664 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3665
3666 pmConf->numFilters = i;
3667
3668 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3669 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3670 *pmConf));
3671 adapter->shared->devRead.pmConfDesc.confPA =
3672 cpu_to_le64(adapter->pm_conf_pa);
3673
3674 spin_lock_irqsave(&adapter->cmd_lock, flags);
3675 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3676 VMXNET3_CMD_UPDATE_PMCFG);
3677 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3678
3679 pci_save_state(pdev);
3680 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3681 adapter->wol);
3682 pci_disable_device(pdev);
3683 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3684
3685 return 0;
3686 }
3687
3688
3689 static int
3690 vmxnet3_resume(struct device *device)
3691 {
3692 int err;
3693 unsigned long flags;
3694 struct pci_dev *pdev = to_pci_dev(device);
3695 struct net_device *netdev = pci_get_drvdata(pdev);
3696 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3697
3698 if (!netif_running(netdev))
3699 return 0;
3700
3701 pci_set_power_state(pdev, PCI_D0);
3702 pci_restore_state(pdev);
3703 err = pci_enable_device_mem(pdev);
3704 if (err != 0)
3705 return err;
3706
3707 pci_enable_wake(pdev, PCI_D0, 0);
3708
3709 vmxnet3_alloc_intr_resources(adapter);
3710
3711 /* During hibernate and suspend, device has to be reinitialized as the
3712 * device state need not be preserved.
3713 */
3714
3715 /* Need not check adapter state as other reset tasks cannot run during
3716 * device resume.
3717 */
3718 spin_lock_irqsave(&adapter->cmd_lock, flags);
3719 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3720 VMXNET3_CMD_QUIESCE_DEV);
3721 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3722 vmxnet3_tq_cleanup_all(adapter);
3723 vmxnet3_rq_cleanup_all(adapter);
3724
3725 vmxnet3_reset_dev(adapter);
3726 err = vmxnet3_activate_dev(adapter);
3727 if (err != 0) {
3728 netdev_err(netdev,
3729 "failed to re-activate on resume, error: %d", err);
3730 vmxnet3_force_close(adapter);
3731 return err;
3732 }
3733 netif_device_attach(netdev);
3734
3735 return 0;
3736 }
3737
3738 static const struct dev_pm_ops vmxnet3_pm_ops = {
3739 .suspend = vmxnet3_suspend,
3740 .resume = vmxnet3_resume,
3741 .freeze = vmxnet3_suspend,
3742 .restore = vmxnet3_resume,
3743 };
3744 #endif
3745
3746 static struct pci_driver vmxnet3_driver = {
3747 .name = vmxnet3_driver_name,
3748 .id_table = vmxnet3_pciid_table,
3749 .probe = vmxnet3_probe_device,
3750 .remove = vmxnet3_remove_device,
3751 .shutdown = vmxnet3_shutdown_device,
3752 #ifdef CONFIG_PM
3753 .driver.pm = &vmxnet3_pm_ops,
3754 #endif
3755 };
3756
3757
3758 static int __init
3759 vmxnet3_init_module(void)
3760 {
3761 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3762 VMXNET3_DRIVER_VERSION_REPORT);
3763 return pci_register_driver(&vmxnet3_driver);
3764 }
3765
3766 module_init(vmxnet3_init_module);
3767
3768
3769 static void
3770 vmxnet3_exit_module(void)
3771 {
3772 pci_unregister_driver(&vmxnet3_driver);
3773 }
3774
3775 module_exit(vmxnet3_exit_module);
3776
3777 MODULE_AUTHOR("VMware, Inc.");
3778 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3779 MODULE_LICENSE("GPL v2");
3780 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
3781
3782
3783
3784
3785
3786 /* LDV_COMMENT_BEGIN_MAIN */
3787 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
3788
3789 /*###########################################################################*/
3790
3791 /*############## Driver Environment Generator 0.2 output ####################*/
3792
3793 /*###########################################################################*/
3794
3795
3796
3797 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
3798 void ldv_check_final_state(void);
3799
3800 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
3801 void ldv_check_return_value(int res);
3802
3803 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
3804 void ldv_check_return_value_probe(int res);
3805
3806 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
3807 void ldv_initialize(void);
3808
3809 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
3810 void ldv_handler_precall(void);
3811
3812 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
3813 int nondet_int(void);
3814
3815 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
3816 int LDV_IN_INTERRUPT;
3817
3818 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
3819 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
3820
3821
3822
3823 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
3824 /*============================= VARIABLE DECLARATION PART =============================*/
3825 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
3826 /* content: static int vmxnet3_open(struct net_device *netdev)*/
3827 /* LDV_COMMENT_BEGIN_PREP */
3828 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
3829 #ifdef __BIG_ENDIAN_BITFIELD
3830 #endif
3831 #ifdef __BIG_ENDIAN_BITFIELD
3832 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
3833 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
3834 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
3835 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
3836 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
3837 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
3838 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
3839 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
3840 VMXNET3_TCD_GEN_SIZE)
3841 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
3842 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
3843 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
3844 (dstrcd) = (tmp); \
3845 vmxnet3_RxCompToCPU((rcd), (tmp)); \
3846 } while (0)
3847 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
3848 (dstrxd) = (tmp); \
3849 vmxnet3_RxDescToCPU((rxd), (tmp)); \
3850 } while (0)
3851 #else
3852 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
3853 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
3854 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
3855 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
3856 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
3857 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
3858 #endif
3859 #ifdef __BIG_ENDIAN_BITFIELD
3860 #endif
3861 #ifdef __BIG_ENDIAN_BITFIELD
3862 #else
3863 #endif
3864 #ifdef __BIG_ENDIAN_BITFIELD
3865 #endif
3866 #ifdef __BIG_ENDIAN_BITFIELD
3867 #endif
3868 #ifdef VMXNET3_RSS
3869 #endif
3870 #ifdef __BIG_ENDIAN_BITFIELD
3871 #endif
3872 #ifdef CONFIG_PCI_MSI
3873 #endif
3874 #ifdef CONFIG_NET_POLL_CONTROLLER
3875 #ifdef CONFIG_PCI_MSI
3876 #endif
3877 #endif
3878 #ifdef CONFIG_PCI_MSI
3879 #endif
3880 #ifdef CONFIG_PCI_MSI
3881 #endif
3882 #ifdef CONFIG_PCI_MSI
3883 #endif
3884 #ifdef VMXNET3_RSS
3885 #endif
3886 /* LDV_COMMENT_END_PREP */
3887 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_open" */
3888 struct net_device * var_group1;
3889 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vmxnet3_open" */
3890 static int res_vmxnet3_open_73;
3891 /* LDV_COMMENT_BEGIN_PREP */
3892 #ifdef CONFIG_PCI_MSI
3893 #endif
3894 #ifdef CONFIG_PCI_MSI
3895 #endif
3896 #ifdef CONFIG_NET_POLL_CONTROLLER
3897 #endif
3898 #ifdef VMXNET3_RSS
3899 #endif
3900 #ifdef VMXNET3_RSS
3901 #endif
3902 #ifdef VMXNET3_RSS
3903 #endif
3904 #ifdef VMXNET3_RSS
3905 #endif
3906 #ifdef VMXNET3_RSS
3907 #endif
3908 #ifdef VMXNET3_RSS
3909 #endif
3910 #ifdef CONFIG_PM
3911 #endif
3912 #ifdef CONFIG_PM
3913 #endif
3914 /* LDV_COMMENT_END_PREP */
3915 /* content: static int vmxnet3_close(struct net_device *netdev)*/
3916 /* LDV_COMMENT_BEGIN_PREP */
3917 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
3918 #ifdef __BIG_ENDIAN_BITFIELD
3919 #endif
3920 #ifdef __BIG_ENDIAN_BITFIELD
3921 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
3922 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
3923 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
3924 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
3925 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
3926 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
3927 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
3928 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
3929 VMXNET3_TCD_GEN_SIZE)
3930 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
3931 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
3932 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
3933 (dstrcd) = (tmp); \
3934 vmxnet3_RxCompToCPU((rcd), (tmp)); \
3935 } while (0)
3936 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
3937 (dstrxd) = (tmp); \
3938 vmxnet3_RxDescToCPU((rxd), (tmp)); \
3939 } while (0)
3940 #else
3941 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
3942 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
3943 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
3944 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
3945 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
3946 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
3947 #endif
3948 #ifdef __BIG_ENDIAN_BITFIELD
3949 #endif
3950 #ifdef __BIG_ENDIAN_BITFIELD
3951 #else
3952 #endif
3953 #ifdef __BIG_ENDIAN_BITFIELD
3954 #endif
3955 #ifdef __BIG_ENDIAN_BITFIELD
3956 #endif
3957 #ifdef VMXNET3_RSS
3958 #endif
3959 #ifdef __BIG_ENDIAN_BITFIELD
3960 #endif
3961 #ifdef CONFIG_PCI_MSI
3962 #endif
3963 #ifdef CONFIG_NET_POLL_CONTROLLER
3964 #ifdef CONFIG_PCI_MSI
3965 #endif
3966 #endif
3967 #ifdef CONFIG_PCI_MSI
3968 #endif
3969 #ifdef CONFIG_PCI_MSI
3970 #endif
3971 #ifdef CONFIG_PCI_MSI
3972 #endif
3973 #ifdef VMXNET3_RSS
3974 #endif
3975 /* LDV_COMMENT_END_PREP */
3976 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vmxnet3_close" */
3977 static int res_vmxnet3_close_74;
3978 /* LDV_COMMENT_BEGIN_PREP */
3979 #ifdef CONFIG_PCI_MSI
3980 #endif
3981 #ifdef CONFIG_PCI_MSI
3982 #endif
3983 #ifdef CONFIG_NET_POLL_CONTROLLER
3984 #endif
3985 #ifdef VMXNET3_RSS
3986 #endif
3987 #ifdef VMXNET3_RSS
3988 #endif
3989 #ifdef VMXNET3_RSS
3990 #endif
3991 #ifdef VMXNET3_RSS
3992 #endif
3993 #ifdef VMXNET3_RSS
3994 #endif
3995 #ifdef VMXNET3_RSS
3996 #endif
3997 #ifdef CONFIG_PM
3998 #endif
3999 #ifdef CONFIG_PM
4000 #endif
4001 /* LDV_COMMENT_END_PREP */
4002 /* content: static netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)*/
4003 /* LDV_COMMENT_BEGIN_PREP */
4004 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4005 #ifdef __BIG_ENDIAN_BITFIELD
4006 #endif
4007 #ifdef __BIG_ENDIAN_BITFIELD
4008 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4009 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4010 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4011 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4012 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4013 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4014 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4015 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4016 VMXNET3_TCD_GEN_SIZE)
4017 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4018 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4019 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4020 (dstrcd) = (tmp); \
4021 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4022 } while (0)
4023 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4024 (dstrxd) = (tmp); \
4025 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4026 } while (0)
4027 #else
4028 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4029 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4030 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4031 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4032 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4033 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4034 #endif
4035 #ifdef __BIG_ENDIAN_BITFIELD
4036 #endif
4037 #ifdef __BIG_ENDIAN_BITFIELD
4038 #else
4039 #endif
4040 #ifdef __BIG_ENDIAN_BITFIELD
4041 #endif
4042 /* LDV_COMMENT_END_PREP */
4043 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_xmit_frame" */
4044 struct sk_buff * var_group2;
4045 /* LDV_COMMENT_BEGIN_PREP */
4046 #ifdef __BIG_ENDIAN_BITFIELD
4047 #endif
4048 #ifdef VMXNET3_RSS
4049 #endif
4050 #ifdef __BIG_ENDIAN_BITFIELD
4051 #endif
4052 #ifdef CONFIG_PCI_MSI
4053 #endif
4054 #ifdef CONFIG_NET_POLL_CONTROLLER
4055 #ifdef CONFIG_PCI_MSI
4056 #endif
4057 #endif
4058 #ifdef CONFIG_PCI_MSI
4059 #endif
4060 #ifdef CONFIG_PCI_MSI
4061 #endif
4062 #ifdef CONFIG_PCI_MSI
4063 #endif
4064 #ifdef VMXNET3_RSS
4065 #endif
4066 #ifdef CONFIG_PCI_MSI
4067 #endif
4068 #ifdef CONFIG_PCI_MSI
4069 #endif
4070 #ifdef CONFIG_NET_POLL_CONTROLLER
4071 #endif
4072 #ifdef VMXNET3_RSS
4073 #endif
4074 #ifdef VMXNET3_RSS
4075 #endif
4076 #ifdef VMXNET3_RSS
4077 #endif
4078 #ifdef VMXNET3_RSS
4079 #endif
4080 #ifdef VMXNET3_RSS
4081 #endif
4082 #ifdef VMXNET3_RSS
4083 #endif
4084 #ifdef CONFIG_PM
4085 #endif
4086 #ifdef CONFIG_PM
4087 #endif
4088 /* LDV_COMMENT_END_PREP */
4089 /* content: static int vmxnet3_set_mac_addr(struct net_device *netdev, void *p)*/
4090 /* LDV_COMMENT_BEGIN_PREP */
4091 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4092 #ifdef __BIG_ENDIAN_BITFIELD
4093 #endif
4094 #ifdef __BIG_ENDIAN_BITFIELD
4095 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4096 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4097 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4098 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4099 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4100 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4101 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4102 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4103 VMXNET3_TCD_GEN_SIZE)
4104 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4105 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4106 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4107 (dstrcd) = (tmp); \
4108 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4109 } while (0)
4110 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4111 (dstrxd) = (tmp); \
4112 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4113 } while (0)
4114 #else
4115 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4116 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4117 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4118 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4119 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4120 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4121 #endif
4122 #ifdef __BIG_ENDIAN_BITFIELD
4123 #endif
4124 #ifdef __BIG_ENDIAN_BITFIELD
4125 #else
4126 #endif
4127 #ifdef __BIG_ENDIAN_BITFIELD
4128 #endif
4129 #ifdef __BIG_ENDIAN_BITFIELD
4130 #endif
4131 #ifdef VMXNET3_RSS
4132 #endif
4133 #ifdef __BIG_ENDIAN_BITFIELD
4134 #endif
4135 #ifdef CONFIG_PCI_MSI
4136 #endif
4137 #ifdef CONFIG_NET_POLL_CONTROLLER
4138 #ifdef CONFIG_PCI_MSI
4139 #endif
4140 #endif
4141 #ifdef CONFIG_PCI_MSI
4142 #endif
4143 #ifdef CONFIG_PCI_MSI
4144 #endif
4145 #ifdef CONFIG_PCI_MSI
4146 #endif
4147 #ifdef VMXNET3_RSS
4148 #endif
4149 /* LDV_COMMENT_END_PREP */
4150 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_set_mac_addr" */
4151 void * var_vmxnet3_set_mac_addr_68_p1;
4152 /* LDV_COMMENT_BEGIN_PREP */
4153 #ifdef CONFIG_PCI_MSI
4154 #endif
4155 #ifdef CONFIG_PCI_MSI
4156 #endif
4157 #ifdef CONFIG_NET_POLL_CONTROLLER
4158 #endif
4159 #ifdef VMXNET3_RSS
4160 #endif
4161 #ifdef VMXNET3_RSS
4162 #endif
4163 #ifdef VMXNET3_RSS
4164 #endif
4165 #ifdef VMXNET3_RSS
4166 #endif
4167 #ifdef VMXNET3_RSS
4168 #endif
4169 #ifdef VMXNET3_RSS
4170 #endif
4171 #ifdef CONFIG_PM
4172 #endif
4173 #ifdef CONFIG_PM
4174 #endif
4175 /* LDV_COMMENT_END_PREP */
4176 /* content: static int vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)*/
4177 /* LDV_COMMENT_BEGIN_PREP */
4178 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4179 #ifdef __BIG_ENDIAN_BITFIELD
4180 #endif
4181 #ifdef __BIG_ENDIAN_BITFIELD
4182 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4183 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4184 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4185 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4186 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4187 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4188 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4189 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4190 VMXNET3_TCD_GEN_SIZE)
4191 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4192 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4193 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4194 (dstrcd) = (tmp); \
4195 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4196 } while (0)
4197 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4198 (dstrxd) = (tmp); \
4199 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4200 } while (0)
4201 #else
4202 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4203 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4204 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4205 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4206 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4207 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4208 #endif
4209 #ifdef __BIG_ENDIAN_BITFIELD
4210 #endif
4211 #ifdef __BIG_ENDIAN_BITFIELD
4212 #else
4213 #endif
4214 #ifdef __BIG_ENDIAN_BITFIELD
4215 #endif
4216 #ifdef __BIG_ENDIAN_BITFIELD
4217 #endif
4218 #ifdef VMXNET3_RSS
4219 #endif
4220 #ifdef __BIG_ENDIAN_BITFIELD
4221 #endif
4222 #ifdef CONFIG_PCI_MSI
4223 #endif
4224 #ifdef CONFIG_NET_POLL_CONTROLLER
4225 #ifdef CONFIG_PCI_MSI
4226 #endif
4227 #endif
4228 #ifdef CONFIG_PCI_MSI
4229 #endif
4230 #ifdef CONFIG_PCI_MSI
4231 #endif
4232 #ifdef CONFIG_PCI_MSI
4233 #endif
4234 #ifdef VMXNET3_RSS
4235 #endif
4236 /* LDV_COMMENT_END_PREP */
4237 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_change_mtu" */
4238 int var_vmxnet3_change_mtu_76_p1;
4239 /* LDV_COMMENT_BEGIN_PREP */
4240 #ifdef CONFIG_PCI_MSI
4241 #endif
4242 #ifdef CONFIG_PCI_MSI
4243 #endif
4244 #ifdef CONFIG_NET_POLL_CONTROLLER
4245 #endif
4246 #ifdef VMXNET3_RSS
4247 #endif
4248 #ifdef VMXNET3_RSS
4249 #endif
4250 #ifdef VMXNET3_RSS
4251 #endif
4252 #ifdef VMXNET3_RSS
4253 #endif
4254 #ifdef VMXNET3_RSS
4255 #endif
4256 #ifdef VMXNET3_RSS
4257 #endif
4258 #ifdef CONFIG_PM
4259 #endif
4260 #ifdef CONFIG_PM
4261 #endif
4262 /* LDV_COMMENT_END_PREP */
4263 /* content: static void vmxnet3_tx_timeout(struct net_device *netdev)*/
4264 /* LDV_COMMENT_BEGIN_PREP */
4265 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4266 #ifdef __BIG_ENDIAN_BITFIELD
4267 #endif
4268 #ifdef __BIG_ENDIAN_BITFIELD
4269 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4270 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4271 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4272 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4273 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4274 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4275 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4276 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4277 VMXNET3_TCD_GEN_SIZE)
4278 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4279 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4280 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4281 (dstrcd) = (tmp); \
4282 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4283 } while (0)
4284 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4285 (dstrxd) = (tmp); \
4286 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4287 } while (0)
4288 #else
4289 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4290 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4291 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4292 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4293 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4294 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4295 #endif
4296 #ifdef __BIG_ENDIAN_BITFIELD
4297 #endif
4298 #ifdef __BIG_ENDIAN_BITFIELD
4299 #else
4300 #endif
4301 #ifdef __BIG_ENDIAN_BITFIELD
4302 #endif
4303 #ifdef __BIG_ENDIAN_BITFIELD
4304 #endif
4305 #ifdef VMXNET3_RSS
4306 #endif
4307 #ifdef __BIG_ENDIAN_BITFIELD
4308 #endif
4309 #ifdef CONFIG_PCI_MSI
4310 #endif
4311 #ifdef CONFIG_NET_POLL_CONTROLLER
4312 #ifdef CONFIG_PCI_MSI
4313 #endif
4314 #endif
4315 #ifdef CONFIG_PCI_MSI
4316 #endif
4317 #ifdef CONFIG_PCI_MSI
4318 #endif
4319 #ifdef CONFIG_PCI_MSI
4320 #endif
4321 #ifdef VMXNET3_RSS
4322 #endif
4323 #ifdef CONFIG_PCI_MSI
4324 #endif
4325 #ifdef CONFIG_PCI_MSI
4326 #endif
4327 /* LDV_COMMENT_END_PREP */
4328 /* LDV_COMMENT_BEGIN_PREP */
4329 #ifdef CONFIG_NET_POLL_CONTROLLER
4330 #endif
4331 #ifdef VMXNET3_RSS
4332 #endif
4333 #ifdef VMXNET3_RSS
4334 #endif
4335 #ifdef VMXNET3_RSS
4336 #endif
4337 #ifdef VMXNET3_RSS
4338 #endif
4339 #ifdef VMXNET3_RSS
4340 #endif
4341 #ifdef VMXNET3_RSS
4342 #endif
4343 #ifdef CONFIG_PM
4344 #endif
4345 #ifdef CONFIG_PM
4346 #endif
4347 /* LDV_COMMENT_END_PREP */
4348 /* content: static void vmxnet3_set_mc(struct net_device *netdev)*/
4349 /* LDV_COMMENT_BEGIN_PREP */
4350 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4351 #ifdef __BIG_ENDIAN_BITFIELD
4352 #endif
4353 #ifdef __BIG_ENDIAN_BITFIELD
4354 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4355 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4356 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4357 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4358 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4359 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4360 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4361 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4362 VMXNET3_TCD_GEN_SIZE)
4363 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4364 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4365 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4366 (dstrcd) = (tmp); \
4367 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4368 } while (0)
4369 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4370 (dstrxd) = (tmp); \
4371 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4372 } while (0)
4373 #else
4374 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4375 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4376 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4377 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4378 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4379 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4380 #endif
4381 #ifdef __BIG_ENDIAN_BITFIELD
4382 #endif
4383 #ifdef __BIG_ENDIAN_BITFIELD
4384 #else
4385 #endif
4386 #ifdef __BIG_ENDIAN_BITFIELD
4387 #endif
4388 #ifdef __BIG_ENDIAN_BITFIELD
4389 #endif
4390 #ifdef VMXNET3_RSS
4391 #endif
4392 #ifdef __BIG_ENDIAN_BITFIELD
4393 #endif
4394 #ifdef CONFIG_PCI_MSI
4395 #endif
4396 #ifdef CONFIG_NET_POLL_CONTROLLER
4397 #ifdef CONFIG_PCI_MSI
4398 #endif
4399 #endif
4400 #ifdef CONFIG_PCI_MSI
4401 #endif
4402 #ifdef CONFIG_PCI_MSI
4403 #endif
4404 #ifdef CONFIG_PCI_MSI
4405 #endif
4406 /* LDV_COMMENT_END_PREP */
4407 /* LDV_COMMENT_BEGIN_PREP */
4408 #ifdef VMXNET3_RSS
4409 #endif
4410 #ifdef CONFIG_PCI_MSI
4411 #endif
4412 #ifdef CONFIG_PCI_MSI
4413 #endif
4414 #ifdef CONFIG_NET_POLL_CONTROLLER
4415 #endif
4416 #ifdef VMXNET3_RSS
4417 #endif
4418 #ifdef VMXNET3_RSS
4419 #endif
4420 #ifdef VMXNET3_RSS
4421 #endif
4422 #ifdef VMXNET3_RSS
4423 #endif
4424 #ifdef VMXNET3_RSS
4425 #endif
4426 #ifdef VMXNET3_RSS
4427 #endif
4428 #ifdef CONFIG_PM
4429 #endif
4430 #ifdef CONFIG_PM
4431 #endif
4432 /* LDV_COMMENT_END_PREP */
4433 /* content: static int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
4434 /* LDV_COMMENT_BEGIN_PREP */
4435 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4436 #ifdef __BIG_ENDIAN_BITFIELD
4437 #endif
4438 #ifdef __BIG_ENDIAN_BITFIELD
4439 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4440 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4441 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4442 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4443 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4444 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4445 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4446 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4447 VMXNET3_TCD_GEN_SIZE)
4448 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4449 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4450 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4451 (dstrcd) = (tmp); \
4452 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4453 } while (0)
4454 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4455 (dstrxd) = (tmp); \
4456 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4457 } while (0)
4458 #else
4459 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4460 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4461 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4462 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4463 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4464 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4465 #endif
4466 #ifdef __BIG_ENDIAN_BITFIELD
4467 #endif
4468 #ifdef __BIG_ENDIAN_BITFIELD
4469 #else
4470 #endif
4471 #ifdef __BIG_ENDIAN_BITFIELD
4472 #endif
4473 #ifdef __BIG_ENDIAN_BITFIELD
4474 #endif
4475 #ifdef VMXNET3_RSS
4476 #endif
4477 #ifdef __BIG_ENDIAN_BITFIELD
4478 #endif
4479 #ifdef CONFIG_PCI_MSI
4480 #endif
4481 #ifdef CONFIG_NET_POLL_CONTROLLER
4482 #ifdef CONFIG_PCI_MSI
4483 #endif
4484 #endif
4485 #ifdef CONFIG_PCI_MSI
4486 #endif
4487 #ifdef CONFIG_PCI_MSI
4488 #endif
4489 #ifdef CONFIG_PCI_MSI
4490 #endif
4491 /* LDV_COMMENT_END_PREP */
4492 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_add_vid" */
4493 __be16 var_vmxnet3_vlan_rx_add_vid_57_p1;
4494 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_add_vid" */
4495 u16 var_vmxnet3_vlan_rx_add_vid_57_p2;
4496 /* LDV_COMMENT_BEGIN_PREP */
4497 #ifdef VMXNET3_RSS
4498 #endif
4499 #ifdef CONFIG_PCI_MSI
4500 #endif
4501 #ifdef CONFIG_PCI_MSI
4502 #endif
4503 #ifdef CONFIG_NET_POLL_CONTROLLER
4504 #endif
4505 #ifdef VMXNET3_RSS
4506 #endif
4507 #ifdef VMXNET3_RSS
4508 #endif
4509 #ifdef VMXNET3_RSS
4510 #endif
4511 #ifdef VMXNET3_RSS
4512 #endif
4513 #ifdef VMXNET3_RSS
4514 #endif
4515 #ifdef VMXNET3_RSS
4516 #endif
4517 #ifdef CONFIG_PM
4518 #endif
4519 #ifdef CONFIG_PM
4520 #endif
4521 /* LDV_COMMENT_END_PREP */
4522 /* content: static int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
4523 /* LDV_COMMENT_BEGIN_PREP */
4524 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4525 #ifdef __BIG_ENDIAN_BITFIELD
4526 #endif
4527 #ifdef __BIG_ENDIAN_BITFIELD
4528 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4529 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4530 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4531 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4532 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4533 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4534 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4535 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4536 VMXNET3_TCD_GEN_SIZE)
4537 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4538 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4539 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4540 (dstrcd) = (tmp); \
4541 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4542 } while (0)
4543 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4544 (dstrxd) = (tmp); \
4545 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4546 } while (0)
4547 #else
4548 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4549 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4550 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4551 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4552 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4553 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4554 #endif
4555 #ifdef __BIG_ENDIAN_BITFIELD
4556 #endif
4557 #ifdef __BIG_ENDIAN_BITFIELD
4558 #else
4559 #endif
4560 #ifdef __BIG_ENDIAN_BITFIELD
4561 #endif
4562 #ifdef __BIG_ENDIAN_BITFIELD
4563 #endif
4564 #ifdef VMXNET3_RSS
4565 #endif
4566 #ifdef __BIG_ENDIAN_BITFIELD
4567 #endif
4568 #ifdef CONFIG_PCI_MSI
4569 #endif
4570 #ifdef CONFIG_NET_POLL_CONTROLLER
4571 #ifdef CONFIG_PCI_MSI
4572 #endif
4573 #endif
4574 #ifdef CONFIG_PCI_MSI
4575 #endif
4576 #ifdef CONFIG_PCI_MSI
4577 #endif
4578 #ifdef CONFIG_PCI_MSI
4579 #endif
4580 /* LDV_COMMENT_END_PREP */
4581 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_kill_vid" */
4582 __be16 var_vmxnet3_vlan_rx_kill_vid_58_p1;
4583 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_vlan_rx_kill_vid" */
4584 u16 var_vmxnet3_vlan_rx_kill_vid_58_p2;
4585 /* LDV_COMMENT_BEGIN_PREP */
4586 #ifdef VMXNET3_RSS
4587 #endif
4588 #ifdef CONFIG_PCI_MSI
4589 #endif
4590 #ifdef CONFIG_PCI_MSI
4591 #endif
4592 #ifdef CONFIG_NET_POLL_CONTROLLER
4593 #endif
4594 #ifdef VMXNET3_RSS
4595 #endif
4596 #ifdef VMXNET3_RSS
4597 #endif
4598 #ifdef VMXNET3_RSS
4599 #endif
4600 #ifdef VMXNET3_RSS
4601 #endif
4602 #ifdef VMXNET3_RSS
4603 #endif
4604 #ifdef VMXNET3_RSS
4605 #endif
4606 #ifdef CONFIG_PM
4607 #endif
4608 #ifdef CONFIG_PM
4609 #endif
4610 /* LDV_COMMENT_END_PREP */
4611 /* content: static void vmxnet3_netpoll(struct net_device *netdev)*/
4612 /* LDV_COMMENT_BEGIN_PREP */
4613 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4614 #ifdef __BIG_ENDIAN_BITFIELD
4615 #endif
4616 #ifdef __BIG_ENDIAN_BITFIELD
4617 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4618 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4619 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4620 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4621 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4622 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4623 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4624 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4625 VMXNET3_TCD_GEN_SIZE)
4626 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4627 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4628 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4629 (dstrcd) = (tmp); \
4630 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4631 } while (0)
4632 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4633 (dstrxd) = (tmp); \
4634 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4635 } while (0)
4636 #else
4637 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4638 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4639 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4640 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4641 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4642 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4643 #endif
4644 #ifdef __BIG_ENDIAN_BITFIELD
4645 #endif
4646 #ifdef __BIG_ENDIAN_BITFIELD
4647 #else
4648 #endif
4649 #ifdef __BIG_ENDIAN_BITFIELD
4650 #endif
4651 #ifdef __BIG_ENDIAN_BITFIELD
4652 #endif
4653 #ifdef VMXNET3_RSS
4654 #endif
4655 #ifdef __BIG_ENDIAN_BITFIELD
4656 #endif
4657 #ifdef CONFIG_PCI_MSI
4658 #endif
4659 #ifdef CONFIG_NET_POLL_CONTROLLER
4660 /* LDV_COMMENT_END_PREP */
4661 /* LDV_COMMENT_BEGIN_PREP */
4662 #endif
4663 #ifdef CONFIG_PCI_MSI
4664 #endif
4665 #ifdef CONFIG_PCI_MSI
4666 #endif
4667 #ifdef CONFIG_PCI_MSI
4668 #endif
4669 #ifdef VMXNET3_RSS
4670 #endif
4671 #ifdef CONFIG_PCI_MSI
4672 #endif
4673 #ifdef CONFIG_PCI_MSI
4674 #endif
4675 #ifdef CONFIG_NET_POLL_CONTROLLER
4676 #endif
4677 #ifdef VMXNET3_RSS
4678 #endif
4679 #ifdef VMXNET3_RSS
4680 #endif
4681 #ifdef VMXNET3_RSS
4682 #endif
4683 #ifdef VMXNET3_RSS
4684 #endif
4685 #ifdef VMXNET3_RSS
4686 #endif
4687 #ifdef VMXNET3_RSS
4688 #endif
4689 #ifdef CONFIG_PM
4690 #endif
4691 #ifdef CONFIG_PM
4692 #endif
4693 /* LDV_COMMENT_END_PREP */
4694
4695 /** STRUCT: struct type: dev_pm_ops, struct name: vmxnet3_pm_ops **/
4696 /* content: static int vmxnet3_suspend(struct device *device)*/
4697 /* LDV_COMMENT_BEGIN_PREP */
4698 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4699 #ifdef __BIG_ENDIAN_BITFIELD
4700 #endif
4701 #ifdef __BIG_ENDIAN_BITFIELD
4702 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4703 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4704 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4705 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4706 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4707 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4708 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4709 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4710 VMXNET3_TCD_GEN_SIZE)
4711 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4712 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4713 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4714 (dstrcd) = (tmp); \
4715 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4716 } while (0)
4717 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4718 (dstrxd) = (tmp); \
4719 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4720 } while (0)
4721 #else
4722 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4723 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4724 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4725 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4726 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4727 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4728 #endif
4729 #ifdef __BIG_ENDIAN_BITFIELD
4730 #endif
4731 #ifdef __BIG_ENDIAN_BITFIELD
4732 #else
4733 #endif
4734 #ifdef __BIG_ENDIAN_BITFIELD
4735 #endif
4736 #ifdef __BIG_ENDIAN_BITFIELD
4737 #endif
4738 #ifdef VMXNET3_RSS
4739 #endif
4740 #ifdef __BIG_ENDIAN_BITFIELD
4741 #endif
4742 #ifdef CONFIG_PCI_MSI
4743 #endif
4744 #ifdef CONFIG_NET_POLL_CONTROLLER
4745 #ifdef CONFIG_PCI_MSI
4746 #endif
4747 #endif
4748 #ifdef CONFIG_PCI_MSI
4749 #endif
4750 #ifdef CONFIG_PCI_MSI
4751 #endif
4752 #ifdef CONFIG_PCI_MSI
4753 #endif
4754 #ifdef VMXNET3_RSS
4755 #endif
4756 #ifdef CONFIG_PCI_MSI
4757 #endif
4758 #ifdef CONFIG_PCI_MSI
4759 #endif
4760 #ifdef CONFIG_NET_POLL_CONTROLLER
4761 #endif
4762 #ifdef VMXNET3_RSS
4763 #endif
4764 #ifdef VMXNET3_RSS
4765 #endif
4766 #ifdef VMXNET3_RSS
4767 #endif
4768 #ifdef VMXNET3_RSS
4769 #endif
4770 #ifdef VMXNET3_RSS
4771 #endif
4772 #ifdef VMXNET3_RSS
4773 #endif
4774 #ifdef CONFIG_PM
4775 /* LDV_COMMENT_END_PREP */
4776 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_suspend" */
4777 struct device * var_group3;
4778 /* LDV_COMMENT_BEGIN_PREP */
4779 #endif
4780 #ifdef CONFIG_PM
4781 #endif
4782 /* LDV_COMMENT_END_PREP */
4783 /* content: static int vmxnet3_resume(struct device *device)*/
4784 /* LDV_COMMENT_BEGIN_PREP */
4785 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4786 #ifdef __BIG_ENDIAN_BITFIELD
4787 #endif
4788 #ifdef __BIG_ENDIAN_BITFIELD
4789 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4790 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4791 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4792 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4793 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4794 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4795 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4796 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4797 VMXNET3_TCD_GEN_SIZE)
4798 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4799 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4800 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4801 (dstrcd) = (tmp); \
4802 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4803 } while (0)
4804 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4805 (dstrxd) = (tmp); \
4806 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4807 } while (0)
4808 #else
4809 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4810 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4811 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4812 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4813 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4814 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4815 #endif
4816 #ifdef __BIG_ENDIAN_BITFIELD
4817 #endif
4818 #ifdef __BIG_ENDIAN_BITFIELD
4819 #else
4820 #endif
4821 #ifdef __BIG_ENDIAN_BITFIELD
4822 #endif
4823 #ifdef __BIG_ENDIAN_BITFIELD
4824 #endif
4825 #ifdef VMXNET3_RSS
4826 #endif
4827 #ifdef __BIG_ENDIAN_BITFIELD
4828 #endif
4829 #ifdef CONFIG_PCI_MSI
4830 #endif
4831 #ifdef CONFIG_NET_POLL_CONTROLLER
4832 #ifdef CONFIG_PCI_MSI
4833 #endif
4834 #endif
4835 #ifdef CONFIG_PCI_MSI
4836 #endif
4837 #ifdef CONFIG_PCI_MSI
4838 #endif
4839 #ifdef CONFIG_PCI_MSI
4840 #endif
4841 #ifdef VMXNET3_RSS
4842 #endif
4843 #ifdef CONFIG_PCI_MSI
4844 #endif
4845 #ifdef CONFIG_PCI_MSI
4846 #endif
4847 #ifdef CONFIG_NET_POLL_CONTROLLER
4848 #endif
4849 #ifdef VMXNET3_RSS
4850 #endif
4851 #ifdef VMXNET3_RSS
4852 #endif
4853 #ifdef VMXNET3_RSS
4854 #endif
4855 #ifdef VMXNET3_RSS
4856 #endif
4857 #ifdef VMXNET3_RSS
4858 #endif
4859 #ifdef VMXNET3_RSS
4860 #endif
4861 #ifdef CONFIG_PM
4862 /* LDV_COMMENT_END_PREP */
4863 /* LDV_COMMENT_BEGIN_PREP */
4864 #endif
4865 #ifdef CONFIG_PM
4866 #endif
4867 /* LDV_COMMENT_END_PREP */
4868
4869 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
4870 /* content: static int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id)*/
4871 /* LDV_COMMENT_BEGIN_PREP */
4872 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4873 #ifdef __BIG_ENDIAN_BITFIELD
4874 #endif
4875 #ifdef __BIG_ENDIAN_BITFIELD
4876 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4877 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4878 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4879 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4880 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4881 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4882 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4883 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4884 VMXNET3_TCD_GEN_SIZE)
4885 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4886 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4887 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4888 (dstrcd) = (tmp); \
4889 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4890 } while (0)
4891 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4892 (dstrxd) = (tmp); \
4893 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4894 } while (0)
4895 #else
4896 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4897 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4898 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4899 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4900 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4901 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4902 #endif
4903 #ifdef __BIG_ENDIAN_BITFIELD
4904 #endif
4905 #ifdef __BIG_ENDIAN_BITFIELD
4906 #else
4907 #endif
4908 #ifdef __BIG_ENDIAN_BITFIELD
4909 #endif
4910 #ifdef __BIG_ENDIAN_BITFIELD
4911 #endif
4912 #ifdef VMXNET3_RSS
4913 #endif
4914 #ifdef __BIG_ENDIAN_BITFIELD
4915 #endif
4916 #ifdef CONFIG_PCI_MSI
4917 #endif
4918 #ifdef CONFIG_NET_POLL_CONTROLLER
4919 #ifdef CONFIG_PCI_MSI
4920 #endif
4921 #endif
4922 #ifdef CONFIG_PCI_MSI
4923 #endif
4924 #ifdef CONFIG_PCI_MSI
4925 #endif
4926 #ifdef CONFIG_PCI_MSI
4927 #endif
4928 #ifdef VMXNET3_RSS
4929 #endif
4930 #ifdef CONFIG_PCI_MSI
4931 #endif
4932 #ifdef CONFIG_PCI_MSI
4933 #endif
4934 /* LDV_COMMENT_END_PREP */
4935 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_probe_device" */
4936 struct pci_dev * var_group4;
4937 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_probe_device" */
4938 const struct pci_device_id * var_vmxnet3_probe_device_84_p1;
4939 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vmxnet3_probe_device" */
4940 static int res_vmxnet3_probe_device_84;
4941 /* LDV_COMMENT_BEGIN_PREP */
4942 #ifdef VMXNET3_RSS
4943 #endif
4944 #ifdef VMXNET3_RSS
4945 #endif
4946 #ifdef CONFIG_PM
4947 #endif
4948 #ifdef CONFIG_PM
4949 #endif
4950 /* LDV_COMMENT_END_PREP */
4951 /* content: static void vmxnet3_remove_device(struct pci_dev *pdev)*/
4952 /* LDV_COMMENT_BEGIN_PREP */
4953 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
4954 #ifdef __BIG_ENDIAN_BITFIELD
4955 #endif
4956 #ifdef __BIG_ENDIAN_BITFIELD
4957 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
4958 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
4959 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
4960 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
4961 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
4962 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
4963 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
4964 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
4965 VMXNET3_TCD_GEN_SIZE)
4966 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
4967 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
4968 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
4969 (dstrcd) = (tmp); \
4970 vmxnet3_RxCompToCPU((rcd), (tmp)); \
4971 } while (0)
4972 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
4973 (dstrxd) = (tmp); \
4974 vmxnet3_RxDescToCPU((rxd), (tmp)); \
4975 } while (0)
4976 #else
4977 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
4978 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
4979 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
4980 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
4981 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
4982 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
4983 #endif
4984 #ifdef __BIG_ENDIAN_BITFIELD
4985 #endif
4986 #ifdef __BIG_ENDIAN_BITFIELD
4987 #else
4988 #endif
4989 #ifdef __BIG_ENDIAN_BITFIELD
4990 #endif
4991 #ifdef __BIG_ENDIAN_BITFIELD
4992 #endif
4993 #ifdef VMXNET3_RSS
4994 #endif
4995 #ifdef __BIG_ENDIAN_BITFIELD
4996 #endif
4997 #ifdef CONFIG_PCI_MSI
4998 #endif
4999 #ifdef CONFIG_NET_POLL_CONTROLLER
5000 #ifdef CONFIG_PCI_MSI
5001 #endif
5002 #endif
5003 #ifdef CONFIG_PCI_MSI
5004 #endif
5005 #ifdef CONFIG_PCI_MSI
5006 #endif
5007 #ifdef CONFIG_PCI_MSI
5008 #endif
5009 #ifdef VMXNET3_RSS
5010 #endif
5011 #ifdef CONFIG_PCI_MSI
5012 #endif
5013 #ifdef CONFIG_PCI_MSI
5014 #endif
5015 #ifdef CONFIG_NET_POLL_CONTROLLER
5016 #endif
5017 #ifdef VMXNET3_RSS
5018 #endif
5019 #ifdef VMXNET3_RSS
5020 #endif
5021 #ifdef VMXNET3_RSS
5022 #endif
5023 #ifdef VMXNET3_RSS
5024 #endif
5025 /* LDV_COMMENT_END_PREP */
5026 /* LDV_COMMENT_BEGIN_PREP */
5027 #ifdef CONFIG_PM
5028 #endif
5029 #ifdef CONFIG_PM
5030 #endif
5031 /* LDV_COMMENT_END_PREP */
5032 /* content: static void vmxnet3_shutdown_device(struct pci_dev *pdev)*/
5033 /* LDV_COMMENT_BEGIN_PREP */
5034 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5035 #ifdef __BIG_ENDIAN_BITFIELD
5036 #endif
5037 #ifdef __BIG_ENDIAN_BITFIELD
5038 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5039 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5040 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5041 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5042 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5043 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5044 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5045 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5046 VMXNET3_TCD_GEN_SIZE)
5047 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5048 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5049 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5050 (dstrcd) = (tmp); \
5051 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5052 } while (0)
5053 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5054 (dstrxd) = (tmp); \
5055 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5056 } while (0)
5057 #else
5058 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5059 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5060 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5061 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5062 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5063 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5064 #endif
5065 #ifdef __BIG_ENDIAN_BITFIELD
5066 #endif
5067 #ifdef __BIG_ENDIAN_BITFIELD
5068 #else
5069 #endif
5070 #ifdef __BIG_ENDIAN_BITFIELD
5071 #endif
5072 #ifdef __BIG_ENDIAN_BITFIELD
5073 #endif
5074 #ifdef VMXNET3_RSS
5075 #endif
5076 #ifdef __BIG_ENDIAN_BITFIELD
5077 #endif
5078 #ifdef CONFIG_PCI_MSI
5079 #endif
5080 #ifdef CONFIG_NET_POLL_CONTROLLER
5081 #ifdef CONFIG_PCI_MSI
5082 #endif
5083 #endif
5084 #ifdef CONFIG_PCI_MSI
5085 #endif
5086 #ifdef CONFIG_PCI_MSI
5087 #endif
5088 #ifdef CONFIG_PCI_MSI
5089 #endif
5090 #ifdef VMXNET3_RSS
5091 #endif
5092 #ifdef CONFIG_PCI_MSI
5093 #endif
5094 #ifdef CONFIG_PCI_MSI
5095 #endif
5096 #ifdef CONFIG_NET_POLL_CONTROLLER
5097 #endif
5098 #ifdef VMXNET3_RSS
5099 #endif
5100 #ifdef VMXNET3_RSS
5101 #endif
5102 #ifdef VMXNET3_RSS
5103 #endif
5104 #ifdef VMXNET3_RSS
5105 #endif
5106 #ifdef VMXNET3_RSS
5107 #endif
5108 #ifdef VMXNET3_RSS
5109 #endif
5110 /* LDV_COMMENT_END_PREP */
5111 /* LDV_COMMENT_BEGIN_PREP */
5112 #ifdef CONFIG_PM
5113 #endif
5114 #ifdef CONFIG_PM
5115 #endif
5116 /* LDV_COMMENT_END_PREP */
5117
5118 /** CALLBACK SECTION request_irq **/
5119 /* content: static irqreturn_t vmxnet3_intr(int irq, void *dev_id)*/
5120 /* LDV_COMMENT_BEGIN_PREP */
5121 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5122 #ifdef __BIG_ENDIAN_BITFIELD
5123 #endif
5124 #ifdef __BIG_ENDIAN_BITFIELD
5125 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5126 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5127 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5128 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5129 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5130 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5131 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5132 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5133 VMXNET3_TCD_GEN_SIZE)
5134 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5135 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5136 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5137 (dstrcd) = (tmp); \
5138 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5139 } while (0)
5140 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5141 (dstrxd) = (tmp); \
5142 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5143 } while (0)
5144 #else
5145 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5146 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5147 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5148 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5149 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5150 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5151 #endif
5152 #ifdef __BIG_ENDIAN_BITFIELD
5153 #endif
5154 #ifdef __BIG_ENDIAN_BITFIELD
5155 #else
5156 #endif
5157 #ifdef __BIG_ENDIAN_BITFIELD
5158 #endif
5159 #ifdef __BIG_ENDIAN_BITFIELD
5160 #endif
5161 #ifdef VMXNET3_RSS
5162 #endif
5163 #ifdef __BIG_ENDIAN_BITFIELD
5164 #endif
5165 #ifdef CONFIG_PCI_MSI
5166 #endif
5167 /* LDV_COMMENT_END_PREP */
5168 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_intr" */
5169 int var_vmxnet3_intr_52_p0;
5170 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_intr" */
5171 void * var_vmxnet3_intr_52_p1;
5172 /* LDV_COMMENT_BEGIN_PREP */
5173 #ifdef CONFIG_NET_POLL_CONTROLLER
5174 #ifdef CONFIG_PCI_MSI
5175 #endif
5176 #endif
5177 #ifdef CONFIG_PCI_MSI
5178 #endif
5179 #ifdef CONFIG_PCI_MSI
5180 #endif
5181 #ifdef CONFIG_PCI_MSI
5182 #endif
5183 #ifdef VMXNET3_RSS
5184 #endif
5185 #ifdef CONFIG_PCI_MSI
5186 #endif
5187 #ifdef CONFIG_PCI_MSI
5188 #endif
5189 #ifdef CONFIG_NET_POLL_CONTROLLER
5190 #endif
5191 #ifdef VMXNET3_RSS
5192 #endif
5193 #ifdef VMXNET3_RSS
5194 #endif
5195 #ifdef VMXNET3_RSS
5196 #endif
5197 #ifdef VMXNET3_RSS
5198 #endif
5199 #ifdef VMXNET3_RSS
5200 #endif
5201 #ifdef VMXNET3_RSS
5202 #endif
5203 #ifdef CONFIG_PM
5204 #endif
5205 #ifdef CONFIG_PM
5206 #endif
5207 /* LDV_COMMENT_END_PREP */
5208 /* content: static irqreturn_t vmxnet3_msix_event(int irq, void *data)*/
5209 /* LDV_COMMENT_BEGIN_PREP */
5210 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5211 #ifdef __BIG_ENDIAN_BITFIELD
5212 #endif
5213 #ifdef __BIG_ENDIAN_BITFIELD
5214 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5215 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5216 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5217 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5218 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5219 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5220 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5221 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5222 VMXNET3_TCD_GEN_SIZE)
5223 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5224 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5225 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5226 (dstrcd) = (tmp); \
5227 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5228 } while (0)
5229 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5230 (dstrxd) = (tmp); \
5231 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5232 } while (0)
5233 #else
5234 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5235 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5236 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5237 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5238 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5239 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5240 #endif
5241 #ifdef __BIG_ENDIAN_BITFIELD
5242 #endif
5243 #ifdef __BIG_ENDIAN_BITFIELD
5244 #else
5245 #endif
5246 #ifdef __BIG_ENDIAN_BITFIELD
5247 #endif
5248 #ifdef __BIG_ENDIAN_BITFIELD
5249 #endif
5250 #ifdef VMXNET3_RSS
5251 #endif
5252 #ifdef __BIG_ENDIAN_BITFIELD
5253 #endif
5254 #ifdef CONFIG_PCI_MSI
5255 /* LDV_COMMENT_END_PREP */
5256 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_event" */
5257 int var_vmxnet3_msix_event_51_p0;
5258 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_event" */
5259 void * var_vmxnet3_msix_event_51_p1;
5260 /* LDV_COMMENT_BEGIN_PREP */
5261 #endif
5262 #ifdef CONFIG_NET_POLL_CONTROLLER
5263 #ifdef CONFIG_PCI_MSI
5264 #endif
5265 #endif
5266 #ifdef CONFIG_PCI_MSI
5267 #endif
5268 #ifdef CONFIG_PCI_MSI
5269 #endif
5270 #ifdef CONFIG_PCI_MSI
5271 #endif
5272 #ifdef VMXNET3_RSS
5273 #endif
5274 #ifdef CONFIG_PCI_MSI
5275 #endif
5276 #ifdef CONFIG_PCI_MSI
5277 #endif
5278 #ifdef CONFIG_NET_POLL_CONTROLLER
5279 #endif
5280 #ifdef VMXNET3_RSS
5281 #endif
5282 #ifdef VMXNET3_RSS
5283 #endif
5284 #ifdef VMXNET3_RSS
5285 #endif
5286 #ifdef VMXNET3_RSS
5287 #endif
5288 #ifdef VMXNET3_RSS
5289 #endif
5290 #ifdef VMXNET3_RSS
5291 #endif
5292 #ifdef CONFIG_PM
5293 #endif
5294 #ifdef CONFIG_PM
5295 #endif
5296 /* LDV_COMMENT_END_PREP */
5297 /* content: static irqreturn_t vmxnet3_msix_rx(int irq, void *data)*/
5298 /* LDV_COMMENT_BEGIN_PREP */
5299 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5300 #ifdef __BIG_ENDIAN_BITFIELD
5301 #endif
5302 #ifdef __BIG_ENDIAN_BITFIELD
5303 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5304 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5305 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5306 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5307 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5308 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5309 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5310 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5311 VMXNET3_TCD_GEN_SIZE)
5312 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5313 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5314 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5315 (dstrcd) = (tmp); \
5316 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5317 } while (0)
5318 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5319 (dstrxd) = (tmp); \
5320 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5321 } while (0)
5322 #else
5323 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5324 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5325 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5326 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5327 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5328 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5329 #endif
5330 #ifdef __BIG_ENDIAN_BITFIELD
5331 #endif
5332 #ifdef __BIG_ENDIAN_BITFIELD
5333 #else
5334 #endif
5335 #ifdef __BIG_ENDIAN_BITFIELD
5336 #endif
5337 #ifdef __BIG_ENDIAN_BITFIELD
5338 #endif
5339 #ifdef VMXNET3_RSS
5340 #endif
5341 #ifdef __BIG_ENDIAN_BITFIELD
5342 #endif
5343 #ifdef CONFIG_PCI_MSI
5344 /* LDV_COMMENT_END_PREP */
5345 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_rx" */
5346 int var_vmxnet3_msix_rx_50_p0;
5347 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_rx" */
5348 void * var_vmxnet3_msix_rx_50_p1;
5349 /* LDV_COMMENT_BEGIN_PREP */
5350 #endif
5351 #ifdef CONFIG_NET_POLL_CONTROLLER
5352 #ifdef CONFIG_PCI_MSI
5353 #endif
5354 #endif
5355 #ifdef CONFIG_PCI_MSI
5356 #endif
5357 #ifdef CONFIG_PCI_MSI
5358 #endif
5359 #ifdef CONFIG_PCI_MSI
5360 #endif
5361 #ifdef VMXNET3_RSS
5362 #endif
5363 #ifdef CONFIG_PCI_MSI
5364 #endif
5365 #ifdef CONFIG_PCI_MSI
5366 #endif
5367 #ifdef CONFIG_NET_POLL_CONTROLLER
5368 #endif
5369 #ifdef VMXNET3_RSS
5370 #endif
5371 #ifdef VMXNET3_RSS
5372 #endif
5373 #ifdef VMXNET3_RSS
5374 #endif
5375 #ifdef VMXNET3_RSS
5376 #endif
5377 #ifdef VMXNET3_RSS
5378 #endif
5379 #ifdef VMXNET3_RSS
5380 #endif
5381 #ifdef CONFIG_PM
5382 #endif
5383 #ifdef CONFIG_PM
5384 #endif
5385 /* LDV_COMMENT_END_PREP */
5386 /* content: static irqreturn_t vmxnet3_msix_tx(int irq, void *data)*/
5387 /* LDV_COMMENT_BEGIN_PREP */
5388 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5389 #ifdef __BIG_ENDIAN_BITFIELD
5390 #endif
5391 #ifdef __BIG_ENDIAN_BITFIELD
5392 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5393 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5394 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5395 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5396 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5397 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5398 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5399 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5400 VMXNET3_TCD_GEN_SIZE)
5401 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5402 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5403 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5404 (dstrcd) = (tmp); \
5405 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5406 } while (0)
5407 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5408 (dstrxd) = (tmp); \
5409 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5410 } while (0)
5411 #else
5412 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5413 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5414 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5415 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5416 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5417 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5418 #endif
5419 #ifdef __BIG_ENDIAN_BITFIELD
5420 #endif
5421 #ifdef __BIG_ENDIAN_BITFIELD
5422 #else
5423 #endif
5424 #ifdef __BIG_ENDIAN_BITFIELD
5425 #endif
5426 #ifdef __BIG_ENDIAN_BITFIELD
5427 #endif
5428 #ifdef VMXNET3_RSS
5429 #endif
5430 #ifdef __BIG_ENDIAN_BITFIELD
5431 #endif
5432 #ifdef CONFIG_PCI_MSI
5433 /* LDV_COMMENT_END_PREP */
5434 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_tx" */
5435 int var_vmxnet3_msix_tx_49_p0;
5436 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vmxnet3_msix_tx" */
5437 void * var_vmxnet3_msix_tx_49_p1;
5438 /* LDV_COMMENT_BEGIN_PREP */
5439 #endif
5440 #ifdef CONFIG_NET_POLL_CONTROLLER
5441 #ifdef CONFIG_PCI_MSI
5442 #endif
5443 #endif
5444 #ifdef CONFIG_PCI_MSI
5445 #endif
5446 #ifdef CONFIG_PCI_MSI
5447 #endif
5448 #ifdef CONFIG_PCI_MSI
5449 #endif
5450 #ifdef VMXNET3_RSS
5451 #endif
5452 #ifdef CONFIG_PCI_MSI
5453 #endif
5454 #ifdef CONFIG_PCI_MSI
5455 #endif
5456 #ifdef CONFIG_NET_POLL_CONTROLLER
5457 #endif
5458 #ifdef VMXNET3_RSS
5459 #endif
5460 #ifdef VMXNET3_RSS
5461 #endif
5462 #ifdef VMXNET3_RSS
5463 #endif
5464 #ifdef VMXNET3_RSS
5465 #endif
5466 #ifdef VMXNET3_RSS
5467 #endif
5468 #ifdef VMXNET3_RSS
5469 #endif
5470 #ifdef CONFIG_PM
5471 #endif
5472 #ifdef CONFIG_PM
5473 #endif
5474 /* LDV_COMMENT_END_PREP */
5475
5476
5477
5478
5479 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
5480 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
5481 /*============================= VARIABLE INITIALIZING PART =============================*/
5482 LDV_IN_INTERRUPT=1;
5483
5484
5485
5486
5487 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
5488 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
5489 /*============================= FUNCTION CALL SECTION =============================*/
5490 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
5491 ldv_initialize();
5492
5493 /** INIT: init_type: ST_MODULE_INIT **/
5494 /* content: static int __init vmxnet3_init_module(void)*/
5495 /* LDV_COMMENT_BEGIN_PREP */
5496 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5497 #ifdef __BIG_ENDIAN_BITFIELD
5498 #endif
5499 #ifdef __BIG_ENDIAN_BITFIELD
5500 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5501 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5502 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5503 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5504 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5505 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5506 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5507 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5508 VMXNET3_TCD_GEN_SIZE)
5509 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5510 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5511 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5512 (dstrcd) = (tmp); \
5513 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5514 } while (0)
5515 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5516 (dstrxd) = (tmp); \
5517 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5518 } while (0)
5519 #else
5520 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5521 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5522 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5523 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5524 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5525 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5526 #endif
5527 #ifdef __BIG_ENDIAN_BITFIELD
5528 #endif
5529 #ifdef __BIG_ENDIAN_BITFIELD
5530 #else
5531 #endif
5532 #ifdef __BIG_ENDIAN_BITFIELD
5533 #endif
5534 #ifdef __BIG_ENDIAN_BITFIELD
5535 #endif
5536 #ifdef VMXNET3_RSS
5537 #endif
5538 #ifdef __BIG_ENDIAN_BITFIELD
5539 #endif
5540 #ifdef CONFIG_PCI_MSI
5541 #endif
5542 #ifdef CONFIG_NET_POLL_CONTROLLER
5543 #ifdef CONFIG_PCI_MSI
5544 #endif
5545 #endif
5546 #ifdef CONFIG_PCI_MSI
5547 #endif
5548 #ifdef CONFIG_PCI_MSI
5549 #endif
5550 #ifdef CONFIG_PCI_MSI
5551 #endif
5552 #ifdef VMXNET3_RSS
5553 #endif
5554 #ifdef CONFIG_PCI_MSI
5555 #endif
5556 #ifdef CONFIG_PCI_MSI
5557 #endif
5558 #ifdef CONFIG_NET_POLL_CONTROLLER
5559 #endif
5560 #ifdef VMXNET3_RSS
5561 #endif
5562 #ifdef VMXNET3_RSS
5563 #endif
5564 #ifdef VMXNET3_RSS
5565 #endif
5566 #ifdef VMXNET3_RSS
5567 #endif
5568 #ifdef VMXNET3_RSS
5569 #endif
5570 #ifdef VMXNET3_RSS
5571 #endif
5572 #ifdef CONFIG_PM
5573 #endif
5574 #ifdef CONFIG_PM
5575 #endif
5576 /* LDV_COMMENT_END_PREP */
5577 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
5578 ldv_handler_precall();
5579 if(vmxnet3_init_module())
5580 goto ldv_final;
5581 int ldv_s_vmxnet3_netdev_ops_net_device_ops = 0;
5582
5583
5584
5585
5586 int ldv_s_vmxnet3_driver_pci_driver = 0;
5587
5588
5589
5590
5591
5592 while( nondet_int()
5593 || !(ldv_s_vmxnet3_netdev_ops_net_device_ops == 0)
5594 || !(ldv_s_vmxnet3_driver_pci_driver == 0)
5595 ) {
5596
5597 switch(nondet_int()) {
5598
5599 case 0: {
5600
5601 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5602 if(ldv_s_vmxnet3_netdev_ops_net_device_ops==0) {
5603
5604 /* content: static int vmxnet3_open(struct net_device *netdev)*/
5605 /* LDV_COMMENT_BEGIN_PREP */
5606 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5607 #ifdef __BIG_ENDIAN_BITFIELD
5608 #endif
5609 #ifdef __BIG_ENDIAN_BITFIELD
5610 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5611 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5612 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5613 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5614 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5615 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5616 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5617 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5618 VMXNET3_TCD_GEN_SIZE)
5619 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5620 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5621 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5622 (dstrcd) = (tmp); \
5623 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5624 } while (0)
5625 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5626 (dstrxd) = (tmp); \
5627 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5628 } while (0)
5629 #else
5630 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5631 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5632 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5633 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5634 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5635 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5636 #endif
5637 #ifdef __BIG_ENDIAN_BITFIELD
5638 #endif
5639 #ifdef __BIG_ENDIAN_BITFIELD
5640 #else
5641 #endif
5642 #ifdef __BIG_ENDIAN_BITFIELD
5643 #endif
5644 #ifdef __BIG_ENDIAN_BITFIELD
5645 #endif
5646 #ifdef VMXNET3_RSS
5647 #endif
5648 #ifdef __BIG_ENDIAN_BITFIELD
5649 #endif
5650 #ifdef CONFIG_PCI_MSI
5651 #endif
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 #ifdef CONFIG_PCI_MSI
5654 #endif
5655 #endif
5656 #ifdef CONFIG_PCI_MSI
5657 #endif
5658 #ifdef CONFIG_PCI_MSI
5659 #endif
5660 #ifdef CONFIG_PCI_MSI
5661 #endif
5662 #ifdef VMXNET3_RSS
5663 #endif
5664 /* LDV_COMMENT_END_PREP */
5665 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "vmxnet3_netdev_ops". Standart function test for correct return result. */
5666 ldv_handler_precall();
5667 res_vmxnet3_open_73 = vmxnet3_open( var_group1);
5668 ldv_check_return_value(res_vmxnet3_open_73);
5669 if(res_vmxnet3_open_73 < 0)
5670 goto ldv_module_exit;
5671 /* LDV_COMMENT_BEGIN_PREP */
5672 #ifdef CONFIG_PCI_MSI
5673 #endif
5674 #ifdef CONFIG_PCI_MSI
5675 #endif
5676 #ifdef CONFIG_NET_POLL_CONTROLLER
5677 #endif
5678 #ifdef VMXNET3_RSS
5679 #endif
5680 #ifdef VMXNET3_RSS
5681 #endif
5682 #ifdef VMXNET3_RSS
5683 #endif
5684 #ifdef VMXNET3_RSS
5685 #endif
5686 #ifdef VMXNET3_RSS
5687 #endif
5688 #ifdef VMXNET3_RSS
5689 #endif
5690 #ifdef CONFIG_PM
5691 #endif
5692 #ifdef CONFIG_PM
5693 #endif
5694 /* LDV_COMMENT_END_PREP */
5695 ldv_s_vmxnet3_netdev_ops_net_device_ops++;
5696
5697 }
5698
5699 }
5700
5701 break;
5702 case 1: {
5703
5704 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5705 if(ldv_s_vmxnet3_netdev_ops_net_device_ops==1) {
5706
5707 /* content: static int vmxnet3_close(struct net_device *netdev)*/
5708 /* LDV_COMMENT_BEGIN_PREP */
5709 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5710 #ifdef __BIG_ENDIAN_BITFIELD
5711 #endif
5712 #ifdef __BIG_ENDIAN_BITFIELD
5713 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5714 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5715 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5716 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5717 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5718 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5719 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5720 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5721 VMXNET3_TCD_GEN_SIZE)
5722 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5723 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5724 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5725 (dstrcd) = (tmp); \
5726 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5727 } while (0)
5728 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5729 (dstrxd) = (tmp); \
5730 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5731 } while (0)
5732 #else
5733 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5734 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5735 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5736 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5737 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5738 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5739 #endif
5740 #ifdef __BIG_ENDIAN_BITFIELD
5741 #endif
5742 #ifdef __BIG_ENDIAN_BITFIELD
5743 #else
5744 #endif
5745 #ifdef __BIG_ENDIAN_BITFIELD
5746 #endif
5747 #ifdef __BIG_ENDIAN_BITFIELD
5748 #endif
5749 #ifdef VMXNET3_RSS
5750 #endif
5751 #ifdef __BIG_ENDIAN_BITFIELD
5752 #endif
5753 #ifdef CONFIG_PCI_MSI
5754 #endif
5755 #ifdef CONFIG_NET_POLL_CONTROLLER
5756 #ifdef CONFIG_PCI_MSI
5757 #endif
5758 #endif
5759 #ifdef CONFIG_PCI_MSI
5760 #endif
5761 #ifdef CONFIG_PCI_MSI
5762 #endif
5763 #ifdef CONFIG_PCI_MSI
5764 #endif
5765 #ifdef VMXNET3_RSS
5766 #endif
5767 /* LDV_COMMENT_END_PREP */
5768 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "vmxnet3_netdev_ops". Standart function test for correct return result. */
5769 ldv_handler_precall();
5770 res_vmxnet3_close_74 = vmxnet3_close( var_group1);
5771 ldv_check_return_value(res_vmxnet3_close_74);
5772 if(res_vmxnet3_close_74)
5773 goto ldv_module_exit;
5774 /* LDV_COMMENT_BEGIN_PREP */
5775 #ifdef CONFIG_PCI_MSI
5776 #endif
5777 #ifdef CONFIG_PCI_MSI
5778 #endif
5779 #ifdef CONFIG_NET_POLL_CONTROLLER
5780 #endif
5781 #ifdef VMXNET3_RSS
5782 #endif
5783 #ifdef VMXNET3_RSS
5784 #endif
5785 #ifdef VMXNET3_RSS
5786 #endif
5787 #ifdef VMXNET3_RSS
5788 #endif
5789 #ifdef VMXNET3_RSS
5790 #endif
5791 #ifdef VMXNET3_RSS
5792 #endif
5793 #ifdef CONFIG_PM
5794 #endif
5795 #ifdef CONFIG_PM
5796 #endif
5797 /* LDV_COMMENT_END_PREP */
5798 ldv_s_vmxnet3_netdev_ops_net_device_ops=0;
5799
5800 }
5801
5802 }
5803
5804 break;
5805 case 2: {
5806
5807 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5808
5809
5810 /* content: static netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)*/
5811 /* LDV_COMMENT_BEGIN_PREP */
5812 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5813 #ifdef __BIG_ENDIAN_BITFIELD
5814 #endif
5815 #ifdef __BIG_ENDIAN_BITFIELD
5816 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5817 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5818 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5819 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5820 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5821 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5822 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5823 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5824 VMXNET3_TCD_GEN_SIZE)
5825 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5826 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5827 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5828 (dstrcd) = (tmp); \
5829 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5830 } while (0)
5831 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5832 (dstrxd) = (tmp); \
5833 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5834 } while (0)
5835 #else
5836 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5837 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5838 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5839 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5840 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5841 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5842 #endif
5843 #ifdef __BIG_ENDIAN_BITFIELD
5844 #endif
5845 #ifdef __BIG_ENDIAN_BITFIELD
5846 #else
5847 #endif
5848 #ifdef __BIG_ENDIAN_BITFIELD
5849 #endif
5850 /* LDV_COMMENT_END_PREP */
5851 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "vmxnet3_netdev_ops" */
5852 ldv_handler_precall();
5853 vmxnet3_xmit_frame( var_group2, var_group1);
5854 /* LDV_COMMENT_BEGIN_PREP */
5855 #ifdef __BIG_ENDIAN_BITFIELD
5856 #endif
5857 #ifdef VMXNET3_RSS
5858 #endif
5859 #ifdef __BIG_ENDIAN_BITFIELD
5860 #endif
5861 #ifdef CONFIG_PCI_MSI
5862 #endif
5863 #ifdef CONFIG_NET_POLL_CONTROLLER
5864 #ifdef CONFIG_PCI_MSI
5865 #endif
5866 #endif
5867 #ifdef CONFIG_PCI_MSI
5868 #endif
5869 #ifdef CONFIG_PCI_MSI
5870 #endif
5871 #ifdef CONFIG_PCI_MSI
5872 #endif
5873 #ifdef VMXNET3_RSS
5874 #endif
5875 #ifdef CONFIG_PCI_MSI
5876 #endif
5877 #ifdef CONFIG_PCI_MSI
5878 #endif
5879 #ifdef CONFIG_NET_POLL_CONTROLLER
5880 #endif
5881 #ifdef VMXNET3_RSS
5882 #endif
5883 #ifdef VMXNET3_RSS
5884 #endif
5885 #ifdef VMXNET3_RSS
5886 #endif
5887 #ifdef VMXNET3_RSS
5888 #endif
5889 #ifdef VMXNET3_RSS
5890 #endif
5891 #ifdef VMXNET3_RSS
5892 #endif
5893 #ifdef CONFIG_PM
5894 #endif
5895 #ifdef CONFIG_PM
5896 #endif
5897 /* LDV_COMMENT_END_PREP */
5898
5899
5900
5901
5902 }
5903
5904 break;
5905 case 3: {
5906
5907 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
5908
5909
5910 /* content: static int vmxnet3_set_mac_addr(struct net_device *netdev, void *p)*/
5911 /* LDV_COMMENT_BEGIN_PREP */
5912 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
5913 #ifdef __BIG_ENDIAN_BITFIELD
5914 #endif
5915 #ifdef __BIG_ENDIAN_BITFIELD
5916 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
5917 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
5918 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
5919 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
5920 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
5921 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
5922 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
5923 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
5924 VMXNET3_TCD_GEN_SIZE)
5925 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
5926 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
5927 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
5928 (dstrcd) = (tmp); \
5929 vmxnet3_RxCompToCPU((rcd), (tmp)); \
5930 } while (0)
5931 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
5932 (dstrxd) = (tmp); \
5933 vmxnet3_RxDescToCPU((rxd), (tmp)); \
5934 } while (0)
5935 #else
5936 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
5937 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
5938 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
5939 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
5940 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
5941 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
5942 #endif
5943 #ifdef __BIG_ENDIAN_BITFIELD
5944 #endif
5945 #ifdef __BIG_ENDIAN_BITFIELD
5946 #else
5947 #endif
5948 #ifdef __BIG_ENDIAN_BITFIELD
5949 #endif
5950 #ifdef __BIG_ENDIAN_BITFIELD
5951 #endif
5952 #ifdef VMXNET3_RSS
5953 #endif
5954 #ifdef __BIG_ENDIAN_BITFIELD
5955 #endif
5956 #ifdef CONFIG_PCI_MSI
5957 #endif
5958 #ifdef CONFIG_NET_POLL_CONTROLLER
5959 #ifdef CONFIG_PCI_MSI
5960 #endif
5961 #endif
5962 #ifdef CONFIG_PCI_MSI
5963 #endif
5964 #ifdef CONFIG_PCI_MSI
5965 #endif
5966 #ifdef CONFIG_PCI_MSI
5967 #endif
5968 #ifdef VMXNET3_RSS
5969 #endif
5970 /* LDV_COMMENT_END_PREP */
5971 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_mac_address" from driver structure with callbacks "vmxnet3_netdev_ops" */
5972 ldv_handler_precall();
5973 vmxnet3_set_mac_addr( var_group1, var_vmxnet3_set_mac_addr_68_p1);
5974 /* LDV_COMMENT_BEGIN_PREP */
5975 #ifdef CONFIG_PCI_MSI
5976 #endif
5977 #ifdef CONFIG_PCI_MSI
5978 #endif
5979 #ifdef CONFIG_NET_POLL_CONTROLLER
5980 #endif
5981 #ifdef VMXNET3_RSS
5982 #endif
5983 #ifdef VMXNET3_RSS
5984 #endif
5985 #ifdef VMXNET3_RSS
5986 #endif
5987 #ifdef VMXNET3_RSS
5988 #endif
5989 #ifdef VMXNET3_RSS
5990 #endif
5991 #ifdef VMXNET3_RSS
5992 #endif
5993 #ifdef CONFIG_PM
5994 #endif
5995 #ifdef CONFIG_PM
5996 #endif
5997 /* LDV_COMMENT_END_PREP */
5998
5999
6000
6001
6002 }
6003
6004 break;
6005 case 4: {
6006
6007 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6008
6009
6010 /* content: static int vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)*/
6011 /* LDV_COMMENT_BEGIN_PREP */
6012 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6013 #ifdef __BIG_ENDIAN_BITFIELD
6014 #endif
6015 #ifdef __BIG_ENDIAN_BITFIELD
6016 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6017 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6018 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6019 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6020 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6021 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6022 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6023 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6024 VMXNET3_TCD_GEN_SIZE)
6025 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6026 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6027 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6028 (dstrcd) = (tmp); \
6029 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6030 } while (0)
6031 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6032 (dstrxd) = (tmp); \
6033 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6034 } while (0)
6035 #else
6036 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6037 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6038 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6039 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6040 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6041 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6042 #endif
6043 #ifdef __BIG_ENDIAN_BITFIELD
6044 #endif
6045 #ifdef __BIG_ENDIAN_BITFIELD
6046 #else
6047 #endif
6048 #ifdef __BIG_ENDIAN_BITFIELD
6049 #endif
6050 #ifdef __BIG_ENDIAN_BITFIELD
6051 #endif
6052 #ifdef VMXNET3_RSS
6053 #endif
6054 #ifdef __BIG_ENDIAN_BITFIELD
6055 #endif
6056 #ifdef CONFIG_PCI_MSI
6057 #endif
6058 #ifdef CONFIG_NET_POLL_CONTROLLER
6059 #ifdef CONFIG_PCI_MSI
6060 #endif
6061 #endif
6062 #ifdef CONFIG_PCI_MSI
6063 #endif
6064 #ifdef CONFIG_PCI_MSI
6065 #endif
6066 #ifdef CONFIG_PCI_MSI
6067 #endif
6068 #ifdef VMXNET3_RSS
6069 #endif
6070 /* LDV_COMMENT_END_PREP */
6071 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_change_mtu" from driver structure with callbacks "vmxnet3_netdev_ops" */
6072 ldv_handler_precall();
6073 vmxnet3_change_mtu( var_group1, var_vmxnet3_change_mtu_76_p1);
6074 /* LDV_COMMENT_BEGIN_PREP */
6075 #ifdef CONFIG_PCI_MSI
6076 #endif
6077 #ifdef CONFIG_PCI_MSI
6078 #endif
6079 #ifdef CONFIG_NET_POLL_CONTROLLER
6080 #endif
6081 #ifdef VMXNET3_RSS
6082 #endif
6083 #ifdef VMXNET3_RSS
6084 #endif
6085 #ifdef VMXNET3_RSS
6086 #endif
6087 #ifdef VMXNET3_RSS
6088 #endif
6089 #ifdef VMXNET3_RSS
6090 #endif
6091 #ifdef VMXNET3_RSS
6092 #endif
6093 #ifdef CONFIG_PM
6094 #endif
6095 #ifdef CONFIG_PM
6096 #endif
6097 /* LDV_COMMENT_END_PREP */
6098
6099
6100
6101
6102 }
6103
6104 break;
6105 case 5: {
6106
6107 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6108
6109
6110 /* content: static void vmxnet3_tx_timeout(struct net_device *netdev)*/
6111 /* LDV_COMMENT_BEGIN_PREP */
6112 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6113 #ifdef __BIG_ENDIAN_BITFIELD
6114 #endif
6115 #ifdef __BIG_ENDIAN_BITFIELD
6116 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6117 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6118 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6119 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6120 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6121 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6122 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6123 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6124 VMXNET3_TCD_GEN_SIZE)
6125 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6126 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6127 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6128 (dstrcd) = (tmp); \
6129 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6130 } while (0)
6131 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6132 (dstrxd) = (tmp); \
6133 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6134 } while (0)
6135 #else
6136 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6137 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6138 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6139 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6140 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6141 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6142 #endif
6143 #ifdef __BIG_ENDIAN_BITFIELD
6144 #endif
6145 #ifdef __BIG_ENDIAN_BITFIELD
6146 #else
6147 #endif
6148 #ifdef __BIG_ENDIAN_BITFIELD
6149 #endif
6150 #ifdef __BIG_ENDIAN_BITFIELD
6151 #endif
6152 #ifdef VMXNET3_RSS
6153 #endif
6154 #ifdef __BIG_ENDIAN_BITFIELD
6155 #endif
6156 #ifdef CONFIG_PCI_MSI
6157 #endif
6158 #ifdef CONFIG_NET_POLL_CONTROLLER
6159 #ifdef CONFIG_PCI_MSI
6160 #endif
6161 #endif
6162 #ifdef CONFIG_PCI_MSI
6163 #endif
6164 #ifdef CONFIG_PCI_MSI
6165 #endif
6166 #ifdef CONFIG_PCI_MSI
6167 #endif
6168 #ifdef VMXNET3_RSS
6169 #endif
6170 #ifdef CONFIG_PCI_MSI
6171 #endif
6172 #ifdef CONFIG_PCI_MSI
6173 #endif
6174 /* LDV_COMMENT_END_PREP */
6175 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "vmxnet3_netdev_ops" */
6176 ldv_handler_precall();
6177 vmxnet3_tx_timeout( var_group1);
6178 /* LDV_COMMENT_BEGIN_PREP */
6179 #ifdef CONFIG_NET_POLL_CONTROLLER
6180 #endif
6181 #ifdef VMXNET3_RSS
6182 #endif
6183 #ifdef VMXNET3_RSS
6184 #endif
6185 #ifdef VMXNET3_RSS
6186 #endif
6187 #ifdef VMXNET3_RSS
6188 #endif
6189 #ifdef VMXNET3_RSS
6190 #endif
6191 #ifdef VMXNET3_RSS
6192 #endif
6193 #ifdef CONFIG_PM
6194 #endif
6195 #ifdef CONFIG_PM
6196 #endif
6197 /* LDV_COMMENT_END_PREP */
6198
6199
6200
6201
6202 }
6203
6204 break;
6205 case 6: {
6206
6207 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6208
6209
6210 /* content: static void vmxnet3_set_mc(struct net_device *netdev)*/
6211 /* LDV_COMMENT_BEGIN_PREP */
6212 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6213 #ifdef __BIG_ENDIAN_BITFIELD
6214 #endif
6215 #ifdef __BIG_ENDIAN_BITFIELD
6216 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6217 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6218 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6219 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6220 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6221 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6222 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6223 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6224 VMXNET3_TCD_GEN_SIZE)
6225 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6226 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6227 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6228 (dstrcd) = (tmp); \
6229 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6230 } while (0)
6231 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6232 (dstrxd) = (tmp); \
6233 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6234 } while (0)
6235 #else
6236 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6237 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6238 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6239 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6240 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6241 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6242 #endif
6243 #ifdef __BIG_ENDIAN_BITFIELD
6244 #endif
6245 #ifdef __BIG_ENDIAN_BITFIELD
6246 #else
6247 #endif
6248 #ifdef __BIG_ENDIAN_BITFIELD
6249 #endif
6250 #ifdef __BIG_ENDIAN_BITFIELD
6251 #endif
6252 #ifdef VMXNET3_RSS
6253 #endif
6254 #ifdef __BIG_ENDIAN_BITFIELD
6255 #endif
6256 #ifdef CONFIG_PCI_MSI
6257 #endif
6258 #ifdef CONFIG_NET_POLL_CONTROLLER
6259 #ifdef CONFIG_PCI_MSI
6260 #endif
6261 #endif
6262 #ifdef CONFIG_PCI_MSI
6263 #endif
6264 #ifdef CONFIG_PCI_MSI
6265 #endif
6266 #ifdef CONFIG_PCI_MSI
6267 #endif
6268 /* LDV_COMMENT_END_PREP */
6269 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "vmxnet3_netdev_ops" */
6270 ldv_handler_precall();
6271 vmxnet3_set_mc( var_group1);
6272 /* LDV_COMMENT_BEGIN_PREP */
6273 #ifdef VMXNET3_RSS
6274 #endif
6275 #ifdef CONFIG_PCI_MSI
6276 #endif
6277 #ifdef CONFIG_PCI_MSI
6278 #endif
6279 #ifdef CONFIG_NET_POLL_CONTROLLER
6280 #endif
6281 #ifdef VMXNET3_RSS
6282 #endif
6283 #ifdef VMXNET3_RSS
6284 #endif
6285 #ifdef VMXNET3_RSS
6286 #endif
6287 #ifdef VMXNET3_RSS
6288 #endif
6289 #ifdef VMXNET3_RSS
6290 #endif
6291 #ifdef VMXNET3_RSS
6292 #endif
6293 #ifdef CONFIG_PM
6294 #endif
6295 #ifdef CONFIG_PM
6296 #endif
6297 /* LDV_COMMENT_END_PREP */
6298
6299
6300
6301
6302 }
6303
6304 break;
6305 case 7: {
6306
6307 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6308
6309
6310 /* content: static int vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
6311 /* LDV_COMMENT_BEGIN_PREP */
6312 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6313 #ifdef __BIG_ENDIAN_BITFIELD
6314 #endif
6315 #ifdef __BIG_ENDIAN_BITFIELD
6316 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6317 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6318 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6319 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6320 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6321 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6322 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6323 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6324 VMXNET3_TCD_GEN_SIZE)
6325 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6326 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6327 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6328 (dstrcd) = (tmp); \
6329 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6330 } while (0)
6331 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6332 (dstrxd) = (tmp); \
6333 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6334 } while (0)
6335 #else
6336 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6337 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6338 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6339 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6340 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6341 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6342 #endif
6343 #ifdef __BIG_ENDIAN_BITFIELD
6344 #endif
6345 #ifdef __BIG_ENDIAN_BITFIELD
6346 #else
6347 #endif
6348 #ifdef __BIG_ENDIAN_BITFIELD
6349 #endif
6350 #ifdef __BIG_ENDIAN_BITFIELD
6351 #endif
6352 #ifdef VMXNET3_RSS
6353 #endif
6354 #ifdef __BIG_ENDIAN_BITFIELD
6355 #endif
6356 #ifdef CONFIG_PCI_MSI
6357 #endif
6358 #ifdef CONFIG_NET_POLL_CONTROLLER
6359 #ifdef CONFIG_PCI_MSI
6360 #endif
6361 #endif
6362 #ifdef CONFIG_PCI_MSI
6363 #endif
6364 #ifdef CONFIG_PCI_MSI
6365 #endif
6366 #ifdef CONFIG_PCI_MSI
6367 #endif
6368 /* LDV_COMMENT_END_PREP */
6369 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_add_vid" from driver structure with callbacks "vmxnet3_netdev_ops" */
6370 ldv_handler_precall();
6371 vmxnet3_vlan_rx_add_vid( var_group1, var_vmxnet3_vlan_rx_add_vid_57_p1, var_vmxnet3_vlan_rx_add_vid_57_p2);
6372 /* LDV_COMMENT_BEGIN_PREP */
6373 #ifdef VMXNET3_RSS
6374 #endif
6375 #ifdef CONFIG_PCI_MSI
6376 #endif
6377 #ifdef CONFIG_PCI_MSI
6378 #endif
6379 #ifdef CONFIG_NET_POLL_CONTROLLER
6380 #endif
6381 #ifdef VMXNET3_RSS
6382 #endif
6383 #ifdef VMXNET3_RSS
6384 #endif
6385 #ifdef VMXNET3_RSS
6386 #endif
6387 #ifdef VMXNET3_RSS
6388 #endif
6389 #ifdef VMXNET3_RSS
6390 #endif
6391 #ifdef VMXNET3_RSS
6392 #endif
6393 #ifdef CONFIG_PM
6394 #endif
6395 #ifdef CONFIG_PM
6396 #endif
6397 /* LDV_COMMENT_END_PREP */
6398
6399
6400
6401
6402 }
6403
6404 break;
6405 case 8: {
6406
6407 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6408
6409
6410 /* content: static int vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)*/
6411 /* LDV_COMMENT_BEGIN_PREP */
6412 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6413 #ifdef __BIG_ENDIAN_BITFIELD
6414 #endif
6415 #ifdef __BIG_ENDIAN_BITFIELD
6416 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6417 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6418 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6419 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6420 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6421 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6422 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6423 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6424 VMXNET3_TCD_GEN_SIZE)
6425 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6426 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6427 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6428 (dstrcd) = (tmp); \
6429 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6430 } while (0)
6431 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6432 (dstrxd) = (tmp); \
6433 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6434 } while (0)
6435 #else
6436 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6437 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6438 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6439 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6440 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6441 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6442 #endif
6443 #ifdef __BIG_ENDIAN_BITFIELD
6444 #endif
6445 #ifdef __BIG_ENDIAN_BITFIELD
6446 #else
6447 #endif
6448 #ifdef __BIG_ENDIAN_BITFIELD
6449 #endif
6450 #ifdef __BIG_ENDIAN_BITFIELD
6451 #endif
6452 #ifdef VMXNET3_RSS
6453 #endif
6454 #ifdef __BIG_ENDIAN_BITFIELD
6455 #endif
6456 #ifdef CONFIG_PCI_MSI
6457 #endif
6458 #ifdef CONFIG_NET_POLL_CONTROLLER
6459 #ifdef CONFIG_PCI_MSI
6460 #endif
6461 #endif
6462 #ifdef CONFIG_PCI_MSI
6463 #endif
6464 #ifdef CONFIG_PCI_MSI
6465 #endif
6466 #ifdef CONFIG_PCI_MSI
6467 #endif
6468 /* LDV_COMMENT_END_PREP */
6469 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_vlan_rx_kill_vid" from driver structure with callbacks "vmxnet3_netdev_ops" */
6470 ldv_handler_precall();
6471 vmxnet3_vlan_rx_kill_vid( var_group1, var_vmxnet3_vlan_rx_kill_vid_58_p1, var_vmxnet3_vlan_rx_kill_vid_58_p2);
6472 /* LDV_COMMENT_BEGIN_PREP */
6473 #ifdef VMXNET3_RSS
6474 #endif
6475 #ifdef CONFIG_PCI_MSI
6476 #endif
6477 #ifdef CONFIG_PCI_MSI
6478 #endif
6479 #ifdef CONFIG_NET_POLL_CONTROLLER
6480 #endif
6481 #ifdef VMXNET3_RSS
6482 #endif
6483 #ifdef VMXNET3_RSS
6484 #endif
6485 #ifdef VMXNET3_RSS
6486 #endif
6487 #ifdef VMXNET3_RSS
6488 #endif
6489 #ifdef VMXNET3_RSS
6490 #endif
6491 #ifdef VMXNET3_RSS
6492 #endif
6493 #ifdef CONFIG_PM
6494 #endif
6495 #ifdef CONFIG_PM
6496 #endif
6497 /* LDV_COMMENT_END_PREP */
6498
6499
6500
6501
6502 }
6503
6504 break;
6505 case 9: {
6506
6507 /** STRUCT: struct type: net_device_ops, struct name: vmxnet3_netdev_ops **/
6508
6509
6510 /* content: static void vmxnet3_netpoll(struct net_device *netdev)*/
6511 /* LDV_COMMENT_BEGIN_PREP */
6512 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6513 #ifdef __BIG_ENDIAN_BITFIELD
6514 #endif
6515 #ifdef __BIG_ENDIAN_BITFIELD
6516 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6517 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6518 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6519 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6520 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6521 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6522 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6523 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6524 VMXNET3_TCD_GEN_SIZE)
6525 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6526 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6527 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6528 (dstrcd) = (tmp); \
6529 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6530 } while (0)
6531 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6532 (dstrxd) = (tmp); \
6533 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6534 } while (0)
6535 #else
6536 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6537 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6538 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6539 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6540 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6541 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6542 #endif
6543 #ifdef __BIG_ENDIAN_BITFIELD
6544 #endif
6545 #ifdef __BIG_ENDIAN_BITFIELD
6546 #else
6547 #endif
6548 #ifdef __BIG_ENDIAN_BITFIELD
6549 #endif
6550 #ifdef __BIG_ENDIAN_BITFIELD
6551 #endif
6552 #ifdef VMXNET3_RSS
6553 #endif
6554 #ifdef __BIG_ENDIAN_BITFIELD
6555 #endif
6556 #ifdef CONFIG_PCI_MSI
6557 #endif
6558 #ifdef CONFIG_NET_POLL_CONTROLLER
6559 /* LDV_COMMENT_END_PREP */
6560 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_poll_controller" from driver structure with callbacks "vmxnet3_netdev_ops" */
6561 ldv_handler_precall();
6562 vmxnet3_netpoll( var_group1);
6563 /* LDV_COMMENT_BEGIN_PREP */
6564 #endif
6565 #ifdef CONFIG_PCI_MSI
6566 #endif
6567 #ifdef CONFIG_PCI_MSI
6568 #endif
6569 #ifdef CONFIG_PCI_MSI
6570 #endif
6571 #ifdef VMXNET3_RSS
6572 #endif
6573 #ifdef CONFIG_PCI_MSI
6574 #endif
6575 #ifdef CONFIG_PCI_MSI
6576 #endif
6577 #ifdef CONFIG_NET_POLL_CONTROLLER
6578 #endif
6579 #ifdef VMXNET3_RSS
6580 #endif
6581 #ifdef VMXNET3_RSS
6582 #endif
6583 #ifdef VMXNET3_RSS
6584 #endif
6585 #ifdef VMXNET3_RSS
6586 #endif
6587 #ifdef VMXNET3_RSS
6588 #endif
6589 #ifdef VMXNET3_RSS
6590 #endif
6591 #ifdef CONFIG_PM
6592 #endif
6593 #ifdef CONFIG_PM
6594 #endif
6595 /* LDV_COMMENT_END_PREP */
6596
6597
6598
6599
6600 }
6601
6602 break;
6603 case 10: {
6604
6605 /** STRUCT: struct type: dev_pm_ops, struct name: vmxnet3_pm_ops **/
6606
6607
6608 /* content: static int vmxnet3_suspend(struct device *device)*/
6609 /* LDV_COMMENT_BEGIN_PREP */
6610 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6611 #ifdef __BIG_ENDIAN_BITFIELD
6612 #endif
6613 #ifdef __BIG_ENDIAN_BITFIELD
6614 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6615 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6616 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6617 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6618 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6619 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6620 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6621 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6622 VMXNET3_TCD_GEN_SIZE)
6623 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6624 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6625 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6626 (dstrcd) = (tmp); \
6627 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6628 } while (0)
6629 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6630 (dstrxd) = (tmp); \
6631 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6632 } while (0)
6633 #else
6634 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6635 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6636 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6637 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6638 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6639 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6640 #endif
6641 #ifdef __BIG_ENDIAN_BITFIELD
6642 #endif
6643 #ifdef __BIG_ENDIAN_BITFIELD
6644 #else
6645 #endif
6646 #ifdef __BIG_ENDIAN_BITFIELD
6647 #endif
6648 #ifdef __BIG_ENDIAN_BITFIELD
6649 #endif
6650 #ifdef VMXNET3_RSS
6651 #endif
6652 #ifdef __BIG_ENDIAN_BITFIELD
6653 #endif
6654 #ifdef CONFIG_PCI_MSI
6655 #endif
6656 #ifdef CONFIG_NET_POLL_CONTROLLER
6657 #ifdef CONFIG_PCI_MSI
6658 #endif
6659 #endif
6660 #ifdef CONFIG_PCI_MSI
6661 #endif
6662 #ifdef CONFIG_PCI_MSI
6663 #endif
6664 #ifdef CONFIG_PCI_MSI
6665 #endif
6666 #ifdef VMXNET3_RSS
6667 #endif
6668 #ifdef CONFIG_PCI_MSI
6669 #endif
6670 #ifdef CONFIG_PCI_MSI
6671 #endif
6672 #ifdef CONFIG_NET_POLL_CONTROLLER
6673 #endif
6674 #ifdef VMXNET3_RSS
6675 #endif
6676 #ifdef VMXNET3_RSS
6677 #endif
6678 #ifdef VMXNET3_RSS
6679 #endif
6680 #ifdef VMXNET3_RSS
6681 #endif
6682 #ifdef VMXNET3_RSS
6683 #endif
6684 #ifdef VMXNET3_RSS
6685 #endif
6686 #ifdef CONFIG_PM
6687 /* LDV_COMMENT_END_PREP */
6688 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "vmxnet3_pm_ops" */
6689 ldv_handler_precall();
6690 vmxnet3_suspend( var_group3);
6691 /* LDV_COMMENT_BEGIN_PREP */
6692 #endif
6693 #ifdef CONFIG_PM
6694 #endif
6695 /* LDV_COMMENT_END_PREP */
6696
6697
6698
6699
6700 }
6701
6702 break;
6703 case 11: {
6704
6705 /** STRUCT: struct type: dev_pm_ops, struct name: vmxnet3_pm_ops **/
6706
6707
6708 /* content: static int vmxnet3_resume(struct device *device)*/
6709 /* LDV_COMMENT_BEGIN_PREP */
6710 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6711 #ifdef __BIG_ENDIAN_BITFIELD
6712 #endif
6713 #ifdef __BIG_ENDIAN_BITFIELD
6714 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6715 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6716 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6717 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6718 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6719 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6720 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6721 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6722 VMXNET3_TCD_GEN_SIZE)
6723 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6724 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6725 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6726 (dstrcd) = (tmp); \
6727 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6728 } while (0)
6729 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6730 (dstrxd) = (tmp); \
6731 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6732 } while (0)
6733 #else
6734 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6735 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6736 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6737 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6738 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6739 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6740 #endif
6741 #ifdef __BIG_ENDIAN_BITFIELD
6742 #endif
6743 #ifdef __BIG_ENDIAN_BITFIELD
6744 #else
6745 #endif
6746 #ifdef __BIG_ENDIAN_BITFIELD
6747 #endif
6748 #ifdef __BIG_ENDIAN_BITFIELD
6749 #endif
6750 #ifdef VMXNET3_RSS
6751 #endif
6752 #ifdef __BIG_ENDIAN_BITFIELD
6753 #endif
6754 #ifdef CONFIG_PCI_MSI
6755 #endif
6756 #ifdef CONFIG_NET_POLL_CONTROLLER
6757 #ifdef CONFIG_PCI_MSI
6758 #endif
6759 #endif
6760 #ifdef CONFIG_PCI_MSI
6761 #endif
6762 #ifdef CONFIG_PCI_MSI
6763 #endif
6764 #ifdef CONFIG_PCI_MSI
6765 #endif
6766 #ifdef VMXNET3_RSS
6767 #endif
6768 #ifdef CONFIG_PCI_MSI
6769 #endif
6770 #ifdef CONFIG_PCI_MSI
6771 #endif
6772 #ifdef CONFIG_NET_POLL_CONTROLLER
6773 #endif
6774 #ifdef VMXNET3_RSS
6775 #endif
6776 #ifdef VMXNET3_RSS
6777 #endif
6778 #ifdef VMXNET3_RSS
6779 #endif
6780 #ifdef VMXNET3_RSS
6781 #endif
6782 #ifdef VMXNET3_RSS
6783 #endif
6784 #ifdef VMXNET3_RSS
6785 #endif
6786 #ifdef CONFIG_PM
6787 /* LDV_COMMENT_END_PREP */
6788 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "vmxnet3_pm_ops" */
6789 ldv_handler_precall();
6790 vmxnet3_resume( var_group3);
6791 /* LDV_COMMENT_BEGIN_PREP */
6792 #endif
6793 #ifdef CONFIG_PM
6794 #endif
6795 /* LDV_COMMENT_END_PREP */
6796
6797
6798
6799
6800 }
6801
6802 break;
6803 case 12: {
6804
6805 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
6806 if(ldv_s_vmxnet3_driver_pci_driver==0) {
6807
6808 /* content: static int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id)*/
6809 /* LDV_COMMENT_BEGIN_PREP */
6810 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6811 #ifdef __BIG_ENDIAN_BITFIELD
6812 #endif
6813 #ifdef __BIG_ENDIAN_BITFIELD
6814 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6815 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6816 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6817 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6818 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6819 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6820 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6821 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6822 VMXNET3_TCD_GEN_SIZE)
6823 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6824 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6825 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6826 (dstrcd) = (tmp); \
6827 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6828 } while (0)
6829 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6830 (dstrxd) = (tmp); \
6831 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6832 } while (0)
6833 #else
6834 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6835 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6836 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6837 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6838 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6839 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6840 #endif
6841 #ifdef __BIG_ENDIAN_BITFIELD
6842 #endif
6843 #ifdef __BIG_ENDIAN_BITFIELD
6844 #else
6845 #endif
6846 #ifdef __BIG_ENDIAN_BITFIELD
6847 #endif
6848 #ifdef __BIG_ENDIAN_BITFIELD
6849 #endif
6850 #ifdef VMXNET3_RSS
6851 #endif
6852 #ifdef __BIG_ENDIAN_BITFIELD
6853 #endif
6854 #ifdef CONFIG_PCI_MSI
6855 #endif
6856 #ifdef CONFIG_NET_POLL_CONTROLLER
6857 #ifdef CONFIG_PCI_MSI
6858 #endif
6859 #endif
6860 #ifdef CONFIG_PCI_MSI
6861 #endif
6862 #ifdef CONFIG_PCI_MSI
6863 #endif
6864 #ifdef CONFIG_PCI_MSI
6865 #endif
6866 #ifdef VMXNET3_RSS
6867 #endif
6868 #ifdef CONFIG_PCI_MSI
6869 #endif
6870 #ifdef CONFIG_PCI_MSI
6871 #endif
6872 /* LDV_COMMENT_END_PREP */
6873 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "vmxnet3_driver". Standart function test for correct return result. */
6874 res_vmxnet3_probe_device_84 = vmxnet3_probe_device( var_group4, var_vmxnet3_probe_device_84_p1);
6875 ldv_check_return_value(res_vmxnet3_probe_device_84);
6876 ldv_check_return_value_probe(res_vmxnet3_probe_device_84);
6877 if(res_vmxnet3_probe_device_84)
6878 goto ldv_module_exit;
6879 /* LDV_COMMENT_BEGIN_PREP */
6880 #ifdef VMXNET3_RSS
6881 #endif
6882 #ifdef VMXNET3_RSS
6883 #endif
6884 #ifdef CONFIG_PM
6885 #endif
6886 #ifdef CONFIG_PM
6887 #endif
6888 /* LDV_COMMENT_END_PREP */
6889 ldv_s_vmxnet3_driver_pci_driver++;
6890
6891 }
6892
6893 }
6894
6895 break;
6896 case 13: {
6897
6898 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
6899 if(ldv_s_vmxnet3_driver_pci_driver==1) {
6900
6901 /* content: static void vmxnet3_remove_device(struct pci_dev *pdev)*/
6902 /* LDV_COMMENT_BEGIN_PREP */
6903 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
6904 #ifdef __BIG_ENDIAN_BITFIELD
6905 #endif
6906 #ifdef __BIG_ENDIAN_BITFIELD
6907 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
6908 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
6909 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
6910 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
6911 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
6912 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
6913 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
6914 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
6915 VMXNET3_TCD_GEN_SIZE)
6916 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
6917 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
6918 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
6919 (dstrcd) = (tmp); \
6920 vmxnet3_RxCompToCPU((rcd), (tmp)); \
6921 } while (0)
6922 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
6923 (dstrxd) = (tmp); \
6924 vmxnet3_RxDescToCPU((rxd), (tmp)); \
6925 } while (0)
6926 #else
6927 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
6928 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
6929 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
6930 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
6931 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
6932 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
6933 #endif
6934 #ifdef __BIG_ENDIAN_BITFIELD
6935 #endif
6936 #ifdef __BIG_ENDIAN_BITFIELD
6937 #else
6938 #endif
6939 #ifdef __BIG_ENDIAN_BITFIELD
6940 #endif
6941 #ifdef __BIG_ENDIAN_BITFIELD
6942 #endif
6943 #ifdef VMXNET3_RSS
6944 #endif
6945 #ifdef __BIG_ENDIAN_BITFIELD
6946 #endif
6947 #ifdef CONFIG_PCI_MSI
6948 #endif
6949 #ifdef CONFIG_NET_POLL_CONTROLLER
6950 #ifdef CONFIG_PCI_MSI
6951 #endif
6952 #endif
6953 #ifdef CONFIG_PCI_MSI
6954 #endif
6955 #ifdef CONFIG_PCI_MSI
6956 #endif
6957 #ifdef CONFIG_PCI_MSI
6958 #endif
6959 #ifdef VMXNET3_RSS
6960 #endif
6961 #ifdef CONFIG_PCI_MSI
6962 #endif
6963 #ifdef CONFIG_PCI_MSI
6964 #endif
6965 #ifdef CONFIG_NET_POLL_CONTROLLER
6966 #endif
6967 #ifdef VMXNET3_RSS
6968 #endif
6969 #ifdef VMXNET3_RSS
6970 #endif
6971 #ifdef VMXNET3_RSS
6972 #endif
6973 #ifdef VMXNET3_RSS
6974 #endif
6975 /* LDV_COMMENT_END_PREP */
6976 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "vmxnet3_driver" */
6977 ldv_handler_precall();
6978 vmxnet3_remove_device( var_group4);
6979 /* LDV_COMMENT_BEGIN_PREP */
6980 #ifdef CONFIG_PM
6981 #endif
6982 #ifdef CONFIG_PM
6983 #endif
6984 /* LDV_COMMENT_END_PREP */
6985 ldv_s_vmxnet3_driver_pci_driver=0;
6986
6987 }
6988
6989 }
6990
6991 break;
6992 case 14: {
6993
6994 /** STRUCT: struct type: pci_driver, struct name: vmxnet3_driver **/
6995
6996
6997 /* content: static void vmxnet3_shutdown_device(struct pci_dev *pdev)*/
6998 /* LDV_COMMENT_BEGIN_PREP */
6999 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7000 #ifdef __BIG_ENDIAN_BITFIELD
7001 #endif
7002 #ifdef __BIG_ENDIAN_BITFIELD
7003 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7004 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7005 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7006 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7007 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7008 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7009 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7010 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7011 VMXNET3_TCD_GEN_SIZE)
7012 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7013 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7014 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7015 (dstrcd) = (tmp); \
7016 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7017 } while (0)
7018 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7019 (dstrxd) = (tmp); \
7020 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7021 } while (0)
7022 #else
7023 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7024 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7025 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7026 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7027 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7028 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7029 #endif
7030 #ifdef __BIG_ENDIAN_BITFIELD
7031 #endif
7032 #ifdef __BIG_ENDIAN_BITFIELD
7033 #else
7034 #endif
7035 #ifdef __BIG_ENDIAN_BITFIELD
7036 #endif
7037 #ifdef __BIG_ENDIAN_BITFIELD
7038 #endif
7039 #ifdef VMXNET3_RSS
7040 #endif
7041 #ifdef __BIG_ENDIAN_BITFIELD
7042 #endif
7043 #ifdef CONFIG_PCI_MSI
7044 #endif
7045 #ifdef CONFIG_NET_POLL_CONTROLLER
7046 #ifdef CONFIG_PCI_MSI
7047 #endif
7048 #endif
7049 #ifdef CONFIG_PCI_MSI
7050 #endif
7051 #ifdef CONFIG_PCI_MSI
7052 #endif
7053 #ifdef CONFIG_PCI_MSI
7054 #endif
7055 #ifdef VMXNET3_RSS
7056 #endif
7057 #ifdef CONFIG_PCI_MSI
7058 #endif
7059 #ifdef CONFIG_PCI_MSI
7060 #endif
7061 #ifdef CONFIG_NET_POLL_CONTROLLER
7062 #endif
7063 #ifdef VMXNET3_RSS
7064 #endif
7065 #ifdef VMXNET3_RSS
7066 #endif
7067 #ifdef VMXNET3_RSS
7068 #endif
7069 #ifdef VMXNET3_RSS
7070 #endif
7071 #ifdef VMXNET3_RSS
7072 #endif
7073 #ifdef VMXNET3_RSS
7074 #endif
7075 /* LDV_COMMENT_END_PREP */
7076 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "vmxnet3_driver" */
7077 ldv_handler_precall();
7078 vmxnet3_shutdown_device( var_group4);
7079 /* LDV_COMMENT_BEGIN_PREP */
7080 #ifdef CONFIG_PM
7081 #endif
7082 #ifdef CONFIG_PM
7083 #endif
7084 /* LDV_COMMENT_END_PREP */
7085
7086
7087
7088
7089 }
7090
7091 break;
7092 case 15: {
7093
7094 /** CALLBACK SECTION request_irq **/
7095 LDV_IN_INTERRUPT=2;
7096
7097 /* content: static irqreturn_t vmxnet3_intr(int irq, void *dev_id)*/
7098 /* LDV_COMMENT_BEGIN_PREP */
7099 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7100 #ifdef __BIG_ENDIAN_BITFIELD
7101 #endif
7102 #ifdef __BIG_ENDIAN_BITFIELD
7103 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7104 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7105 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7106 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7107 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7108 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7109 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7110 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7111 VMXNET3_TCD_GEN_SIZE)
7112 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7113 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7114 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7115 (dstrcd) = (tmp); \
7116 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7117 } while (0)
7118 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7119 (dstrxd) = (tmp); \
7120 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7121 } while (0)
7122 #else
7123 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7124 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7125 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7126 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7127 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7128 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7129 #endif
7130 #ifdef __BIG_ENDIAN_BITFIELD
7131 #endif
7132 #ifdef __BIG_ENDIAN_BITFIELD
7133 #else
7134 #endif
7135 #ifdef __BIG_ENDIAN_BITFIELD
7136 #endif
7137 #ifdef __BIG_ENDIAN_BITFIELD
7138 #endif
7139 #ifdef VMXNET3_RSS
7140 #endif
7141 #ifdef __BIG_ENDIAN_BITFIELD
7142 #endif
7143 #ifdef CONFIG_PCI_MSI
7144 #endif
7145 /* LDV_COMMENT_END_PREP */
7146 /* LDV_COMMENT_FUNCTION_CALL */
7147 ldv_handler_precall();
7148 vmxnet3_intr( var_vmxnet3_intr_52_p0, var_vmxnet3_intr_52_p1);
7149 /* LDV_COMMENT_BEGIN_PREP */
7150 #ifdef CONFIG_NET_POLL_CONTROLLER
7151 #ifdef CONFIG_PCI_MSI
7152 #endif
7153 #endif
7154 #ifdef CONFIG_PCI_MSI
7155 #endif
7156 #ifdef CONFIG_PCI_MSI
7157 #endif
7158 #ifdef CONFIG_PCI_MSI
7159 #endif
7160 #ifdef VMXNET3_RSS
7161 #endif
7162 #ifdef CONFIG_PCI_MSI
7163 #endif
7164 #ifdef CONFIG_PCI_MSI
7165 #endif
7166 #ifdef CONFIG_NET_POLL_CONTROLLER
7167 #endif
7168 #ifdef VMXNET3_RSS
7169 #endif
7170 #ifdef VMXNET3_RSS
7171 #endif
7172 #ifdef VMXNET3_RSS
7173 #endif
7174 #ifdef VMXNET3_RSS
7175 #endif
7176 #ifdef VMXNET3_RSS
7177 #endif
7178 #ifdef VMXNET3_RSS
7179 #endif
7180 #ifdef CONFIG_PM
7181 #endif
7182 #ifdef CONFIG_PM
7183 #endif
7184 /* LDV_COMMENT_END_PREP */
7185 LDV_IN_INTERRUPT=1;
7186
7187
7188
7189 }
7190
7191 break;
7192 case 16: {
7193
7194 /** CALLBACK SECTION request_irq **/
7195 LDV_IN_INTERRUPT=2;
7196
7197 /* content: static irqreturn_t vmxnet3_msix_event(int irq, void *data)*/
7198 /* LDV_COMMENT_BEGIN_PREP */
7199 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7200 #ifdef __BIG_ENDIAN_BITFIELD
7201 #endif
7202 #ifdef __BIG_ENDIAN_BITFIELD
7203 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7204 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7205 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7206 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7207 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7208 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7209 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7210 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7211 VMXNET3_TCD_GEN_SIZE)
7212 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7213 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7214 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7215 (dstrcd) = (tmp); \
7216 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7217 } while (0)
7218 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7219 (dstrxd) = (tmp); \
7220 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7221 } while (0)
7222 #else
7223 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7224 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7225 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7226 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7227 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7228 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7229 #endif
7230 #ifdef __BIG_ENDIAN_BITFIELD
7231 #endif
7232 #ifdef __BIG_ENDIAN_BITFIELD
7233 #else
7234 #endif
7235 #ifdef __BIG_ENDIAN_BITFIELD
7236 #endif
7237 #ifdef __BIG_ENDIAN_BITFIELD
7238 #endif
7239 #ifdef VMXNET3_RSS
7240 #endif
7241 #ifdef __BIG_ENDIAN_BITFIELD
7242 #endif
7243 #ifdef CONFIG_PCI_MSI
7244 /* LDV_COMMENT_END_PREP */
7245 /* LDV_COMMENT_FUNCTION_CALL */
7246 ldv_handler_precall();
7247 vmxnet3_msix_event( var_vmxnet3_msix_event_51_p0, var_vmxnet3_msix_event_51_p1);
7248 /* LDV_COMMENT_BEGIN_PREP */
7249 #endif
7250 #ifdef CONFIG_NET_POLL_CONTROLLER
7251 #ifdef CONFIG_PCI_MSI
7252 #endif
7253 #endif
7254 #ifdef CONFIG_PCI_MSI
7255 #endif
7256 #ifdef CONFIG_PCI_MSI
7257 #endif
7258 #ifdef CONFIG_PCI_MSI
7259 #endif
7260 #ifdef VMXNET3_RSS
7261 #endif
7262 #ifdef CONFIG_PCI_MSI
7263 #endif
7264 #ifdef CONFIG_PCI_MSI
7265 #endif
7266 #ifdef CONFIG_NET_POLL_CONTROLLER
7267 #endif
7268 #ifdef VMXNET3_RSS
7269 #endif
7270 #ifdef VMXNET3_RSS
7271 #endif
7272 #ifdef VMXNET3_RSS
7273 #endif
7274 #ifdef VMXNET3_RSS
7275 #endif
7276 #ifdef VMXNET3_RSS
7277 #endif
7278 #ifdef VMXNET3_RSS
7279 #endif
7280 #ifdef CONFIG_PM
7281 #endif
7282 #ifdef CONFIG_PM
7283 #endif
7284 /* LDV_COMMENT_END_PREP */
7285 LDV_IN_INTERRUPT=1;
7286
7287
7288
7289 }
7290
7291 break;
7292 case 17: {
7293
7294 /** CALLBACK SECTION request_irq **/
7295 LDV_IN_INTERRUPT=2;
7296
7297 /* content: static irqreturn_t vmxnet3_msix_rx(int irq, void *data)*/
7298 /* LDV_COMMENT_BEGIN_PREP */
7299 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7300 #ifdef __BIG_ENDIAN_BITFIELD
7301 #endif
7302 #ifdef __BIG_ENDIAN_BITFIELD
7303 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7304 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7305 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7306 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7307 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7308 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7309 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7310 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7311 VMXNET3_TCD_GEN_SIZE)
7312 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7313 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7314 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7315 (dstrcd) = (tmp); \
7316 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7317 } while (0)
7318 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7319 (dstrxd) = (tmp); \
7320 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7321 } while (0)
7322 #else
7323 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7324 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7325 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7326 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7327 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7328 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7329 #endif
7330 #ifdef __BIG_ENDIAN_BITFIELD
7331 #endif
7332 #ifdef __BIG_ENDIAN_BITFIELD
7333 #else
7334 #endif
7335 #ifdef __BIG_ENDIAN_BITFIELD
7336 #endif
7337 #ifdef __BIG_ENDIAN_BITFIELD
7338 #endif
7339 #ifdef VMXNET3_RSS
7340 #endif
7341 #ifdef __BIG_ENDIAN_BITFIELD
7342 #endif
7343 #ifdef CONFIG_PCI_MSI
7344 /* LDV_COMMENT_END_PREP */
7345 /* LDV_COMMENT_FUNCTION_CALL */
7346 ldv_handler_precall();
7347 vmxnet3_msix_rx( var_vmxnet3_msix_rx_50_p0, var_vmxnet3_msix_rx_50_p1);
7348 /* LDV_COMMENT_BEGIN_PREP */
7349 #endif
7350 #ifdef CONFIG_NET_POLL_CONTROLLER
7351 #ifdef CONFIG_PCI_MSI
7352 #endif
7353 #endif
7354 #ifdef CONFIG_PCI_MSI
7355 #endif
7356 #ifdef CONFIG_PCI_MSI
7357 #endif
7358 #ifdef CONFIG_PCI_MSI
7359 #endif
7360 #ifdef VMXNET3_RSS
7361 #endif
7362 #ifdef CONFIG_PCI_MSI
7363 #endif
7364 #ifdef CONFIG_PCI_MSI
7365 #endif
7366 #ifdef CONFIG_NET_POLL_CONTROLLER
7367 #endif
7368 #ifdef VMXNET3_RSS
7369 #endif
7370 #ifdef VMXNET3_RSS
7371 #endif
7372 #ifdef VMXNET3_RSS
7373 #endif
7374 #ifdef VMXNET3_RSS
7375 #endif
7376 #ifdef VMXNET3_RSS
7377 #endif
7378 #ifdef VMXNET3_RSS
7379 #endif
7380 #ifdef CONFIG_PM
7381 #endif
7382 #ifdef CONFIG_PM
7383 #endif
7384 /* LDV_COMMENT_END_PREP */
7385 LDV_IN_INTERRUPT=1;
7386
7387
7388
7389 }
7390
7391 break;
7392 case 18: {
7393
7394 /** CALLBACK SECTION request_irq **/
7395 LDV_IN_INTERRUPT=2;
7396
7397 /* content: static irqreturn_t vmxnet3_msix_tx(int irq, void *data)*/
7398 /* LDV_COMMENT_BEGIN_PREP */
7399 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7400 #ifdef __BIG_ENDIAN_BITFIELD
7401 #endif
7402 #ifdef __BIG_ENDIAN_BITFIELD
7403 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7404 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7405 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7406 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7407 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7408 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7409 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7410 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7411 VMXNET3_TCD_GEN_SIZE)
7412 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7413 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7414 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7415 (dstrcd) = (tmp); \
7416 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7417 } while (0)
7418 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7419 (dstrxd) = (tmp); \
7420 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7421 } while (0)
7422 #else
7423 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7424 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7425 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7426 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7427 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7428 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7429 #endif
7430 #ifdef __BIG_ENDIAN_BITFIELD
7431 #endif
7432 #ifdef __BIG_ENDIAN_BITFIELD
7433 #else
7434 #endif
7435 #ifdef __BIG_ENDIAN_BITFIELD
7436 #endif
7437 #ifdef __BIG_ENDIAN_BITFIELD
7438 #endif
7439 #ifdef VMXNET3_RSS
7440 #endif
7441 #ifdef __BIG_ENDIAN_BITFIELD
7442 #endif
7443 #ifdef CONFIG_PCI_MSI
7444 /* LDV_COMMENT_END_PREP */
7445 /* LDV_COMMENT_FUNCTION_CALL */
7446 ldv_handler_precall();
7447 vmxnet3_msix_tx( var_vmxnet3_msix_tx_49_p0, var_vmxnet3_msix_tx_49_p1);
7448 /* LDV_COMMENT_BEGIN_PREP */
7449 #endif
7450 #ifdef CONFIG_NET_POLL_CONTROLLER
7451 #ifdef CONFIG_PCI_MSI
7452 #endif
7453 #endif
7454 #ifdef CONFIG_PCI_MSI
7455 #endif
7456 #ifdef CONFIG_PCI_MSI
7457 #endif
7458 #ifdef CONFIG_PCI_MSI
7459 #endif
7460 #ifdef VMXNET3_RSS
7461 #endif
7462 #ifdef CONFIG_PCI_MSI
7463 #endif
7464 #ifdef CONFIG_PCI_MSI
7465 #endif
7466 #ifdef CONFIG_NET_POLL_CONTROLLER
7467 #endif
7468 #ifdef VMXNET3_RSS
7469 #endif
7470 #ifdef VMXNET3_RSS
7471 #endif
7472 #ifdef VMXNET3_RSS
7473 #endif
7474 #ifdef VMXNET3_RSS
7475 #endif
7476 #ifdef VMXNET3_RSS
7477 #endif
7478 #ifdef VMXNET3_RSS
7479 #endif
7480 #ifdef CONFIG_PM
7481 #endif
7482 #ifdef CONFIG_PM
7483 #endif
7484 /* LDV_COMMENT_END_PREP */
7485 LDV_IN_INTERRUPT=1;
7486
7487
7488
7489 }
7490
7491 break;
7492 default: break;
7493
7494 }
7495
7496 }
7497
7498 ldv_module_exit:
7499
7500 /** INIT: init_type: ST_MODULE_EXIT **/
7501 /* content: static void vmxnet3_exit_module(void)*/
7502 /* LDV_COMMENT_BEGIN_PREP */
7503 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
7504 #ifdef __BIG_ENDIAN_BITFIELD
7505 #endif
7506 #ifdef __BIG_ENDIAN_BITFIELD
7507 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
7508 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
7509 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
7510 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
7511 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
7512 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
7513 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
7514 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
7515 VMXNET3_TCD_GEN_SIZE)
7516 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
7517 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
7518 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
7519 (dstrcd) = (tmp); \
7520 vmxnet3_RxCompToCPU((rcd), (tmp)); \
7521 } while (0)
7522 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
7523 (dstrxd) = (tmp); \
7524 vmxnet3_RxDescToCPU((rxd), (tmp)); \
7525 } while (0)
7526 #else
7527 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
7528 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
7529 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
7530 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
7531 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
7532 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
7533 #endif
7534 #ifdef __BIG_ENDIAN_BITFIELD
7535 #endif
7536 #ifdef __BIG_ENDIAN_BITFIELD
7537 #else
7538 #endif
7539 #ifdef __BIG_ENDIAN_BITFIELD
7540 #endif
7541 #ifdef __BIG_ENDIAN_BITFIELD
7542 #endif
7543 #ifdef VMXNET3_RSS
7544 #endif
7545 #ifdef __BIG_ENDIAN_BITFIELD
7546 #endif
7547 #ifdef CONFIG_PCI_MSI
7548 #endif
7549 #ifdef CONFIG_NET_POLL_CONTROLLER
7550 #ifdef CONFIG_PCI_MSI
7551 #endif
7552 #endif
7553 #ifdef CONFIG_PCI_MSI
7554 #endif
7555 #ifdef CONFIG_PCI_MSI
7556 #endif
7557 #ifdef CONFIG_PCI_MSI
7558 #endif
7559 #ifdef VMXNET3_RSS
7560 #endif
7561 #ifdef CONFIG_PCI_MSI
7562 #endif
7563 #ifdef CONFIG_PCI_MSI
7564 #endif
7565 #ifdef CONFIG_NET_POLL_CONTROLLER
7566 #endif
7567 #ifdef VMXNET3_RSS
7568 #endif
7569 #ifdef VMXNET3_RSS
7570 #endif
7571 #ifdef VMXNET3_RSS
7572 #endif
7573 #ifdef VMXNET3_RSS
7574 #endif
7575 #ifdef VMXNET3_RSS
7576 #endif
7577 #ifdef VMXNET3_RSS
7578 #endif
7579 #ifdef CONFIG_PM
7580 #endif
7581 #ifdef CONFIG_PM
7582 #endif
7583 /* LDV_COMMENT_END_PREP */
7584 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
7585 ldv_handler_precall();
7586 vmxnet3_exit_module();
7587
7588 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
7589 ldv_final: ldv_check_final_state();
7590
7591 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
7592 return;
7593
7594 }
7595 #endif
7596
7597 /* LDV_COMMENT_END_MAIN */
7598
7599 #line 10 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11688/dscv_tempdir/dscv/ri/331_1a/drivers/net/vmxnet3/vmxnet3_drv.o.c.prepared" 1
2 #include <verifier/rcv.h>
3 #include <kernel-model/ERR.inc>
4
5 int LDV_DMA_MAP_CALLS = 0;
6
7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
8 void ldv_dma_map_page(void) {
9 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
10 ldv_assert(LDV_DMA_MAP_CALLS == 0);
11 /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
12 LDV_DMA_MAP_CALLS++;
13 }
14
15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
16 void ldv_dma_mapping_error(void) {
17 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
18 ldv_assert(LDV_DMA_MAP_CALLS != 0);
19 /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
20 LDV_DMA_MAP_CALLS--;
21 }
22
23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
24 void ldv_check_final_state(void) {
25 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
26 ldv_assert(LDV_DMA_MAP_CALLS == 0);
27 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 /*
2 * device.h - generic, centralized driver model
3 *
4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
6 * Copyright (c) 2008-2009 Novell Inc.
7 *
8 * This file is released under the GPLv2
9 *
10 * See Documentation/driver-model/ for more information.
11 */
12
13 #ifndef _DEVICE_H_
14 #define _DEVICE_H_
15
16 #include <linux/ioport.h>
17 #include <linux/kobject.h>
18 #include <linux/klist.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/compiler.h>
22 #include <linux/types.h>
23 #include <linux/mutex.h>
24 #include <linux/pinctrl/devinfo.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/ratelimit.h>
28 #include <linux/uidgid.h>
29 #include <linux/gfp.h>
30 #include <asm/device.h>
31
32 struct device;
33 struct device_private;
34 struct device_driver;
35 struct driver_private;
36 struct module;
37 struct class;
38 struct subsys_private;
39 struct bus_type;
40 struct device_node;
41 struct fwnode_handle;
42 struct iommu_ops;
43 struct iommu_group;
44
45 struct bus_attribute {
46 struct attribute attr;
47 ssize_t (*show)(struct bus_type *bus, char *buf);
48 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
49 };
50
51 #define BUS_ATTR(_name, _mode, _show, _store) \
52 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
53 #define BUS_ATTR_RW(_name) \
54 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
55 #define BUS_ATTR_RO(_name) \
56 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
57
58 extern int __must_check bus_create_file(struct bus_type *,
59 struct bus_attribute *);
60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
61
62 /**
63 * struct bus_type - The bus type of the device
64 *
65 * @name: The name of the bus.
66 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
67 * @dev_root: Default device to use as the parent.
68 * @dev_attrs: Default attributes of the devices on the bus.
69 * @bus_groups: Default attributes of the bus.
70 * @dev_groups: Default attributes of the devices on the bus.
71 * @drv_groups: Default attributes of the device drivers on the bus.
72 * @match: Called, perhaps multiple times, whenever a new device or driver
73 * is added for this bus. It should return a positive value if the
74 * given device can be handled by the given driver and zero
75 * otherwise. It may also return error code if determining that
76 * the driver supports the device is not possible. In case of
77 * -EPROBE_DEFER it will queue the device for deferred probing.
78 * @uevent: Called when a device is added, removed, or a few other things
79 * that generate uevents to add the environment variables.
80 * @probe: Called when a new device or driver add to this bus, and callback
81 * the specific driver's probe to initial the matched device.
82 * @remove: Called when a device removed from this bus.
83 * @shutdown: Called at shut-down time to quiesce the device.
84 *
85 * @online: Called to put the device back online (after offlining it).
86 * @offline: Called to put the device offline for hot-removal. May fail.
87 *
88 * @suspend: Called when a device on this bus wants to go to sleep mode.
89 * @resume: Called to bring a device on this bus out of sleep mode.
90 * @pm: Power management operations of this bus, callback the specific
91 * device driver's pm-ops.
92 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
93 * driver implementations to a bus and allow the driver to do
94 * bus-specific setup
95 * @p: The private data of the driver core, only the driver core can
96 * touch this.
97 * @lock_key: Lock class key for use by the lock validator
98 *
99 * A bus is a channel between the processor and one or more devices. For the
100 * purposes of the device model, all devices are connected via a bus, even if
101 * it is an internal, virtual, "platform" bus. Buses can plug into each other.
102 * A USB controller is usually a PCI device, for example. The device model
103 * represents the actual connections between buses and the devices they control.
104 * A bus is represented by the bus_type structure. It contains the name, the
105 * default attributes, the bus' methods, PM operations, and the driver core's
106 * private data.
107 */
108 struct bus_type {
109 const char *name;
110 const char *dev_name;
111 struct device *dev_root;
112 struct device_attribute *dev_attrs; /* use dev_groups instead */
113 const struct attribute_group **bus_groups;
114 const struct attribute_group **dev_groups;
115 const struct attribute_group **drv_groups;
116
117 int (*match)(struct device *dev, struct device_driver *drv);
118 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
119 int (*probe)(struct device *dev);
120 int (*remove)(struct device *dev);
121 void (*shutdown)(struct device *dev);
122
123 int (*online)(struct device *dev);
124 int (*offline)(struct device *dev);
125
126 int (*suspend)(struct device *dev, pm_message_t state);
127 int (*resume)(struct device *dev);
128
129 const struct dev_pm_ops *pm;
130
131 const struct iommu_ops *iommu_ops;
132
133 struct subsys_private *p;
134 struct lock_class_key lock_key;
135 };
136
137 extern int __must_check bus_register(struct bus_type *bus);
138
139 extern void bus_unregister(struct bus_type *bus);
140
141 extern int __must_check bus_rescan_devices(struct bus_type *bus);
142
143 /* iterator helpers for buses */
144 struct subsys_dev_iter {
145 struct klist_iter ki;
146 const struct device_type *type;
147 };
148 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
149 struct bus_type *subsys,
150 struct device *start,
151 const struct device_type *type);
152 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
153 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
154
155 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
156 int (*fn)(struct device *dev, void *data));
157 struct device *bus_find_device(struct bus_type *bus, struct device *start,
158 void *data,
159 int (*match)(struct device *dev, void *data));
160 struct device *bus_find_device_by_name(struct bus_type *bus,
161 struct device *start,
162 const char *name);
163 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
164 struct device *hint);
165 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
166 void *data, int (*fn)(struct device_driver *, void *));
167 void bus_sort_breadthfirst(struct bus_type *bus,
168 int (*compare)(const struct device *a,
169 const struct device *b));
170 /*
171 * Bus notifiers: Get notified of addition/removal of devices
172 * and binding/unbinding of drivers to devices.
173 * In the long run, it should be a replacement for the platform
174 * notify hooks.
175 */
176 struct notifier_block;
177
178 extern int bus_register_notifier(struct bus_type *bus,
179 struct notifier_block *nb);
180 extern int bus_unregister_notifier(struct bus_type *bus,
181 struct notifier_block *nb);
182
183 /* All 4 notifers below get called with the target struct device *
184 * as an argument. Note that those functions are likely to be called
185 * with the device lock held in the core, so be careful.
186 */
187 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
188 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
189 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
190 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
191 bound */
192 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
193 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
194 unbound */
195 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
196 from the device */
197 #define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
198
199 extern struct kset *bus_get_kset(struct bus_type *bus);
200 extern struct klist *bus_get_device_klist(struct bus_type *bus);
201
202 /**
203 * enum probe_type - device driver probe type to try
204 * Device drivers may opt in for special handling of their
205 * respective probe routines. This tells the core what to
206 * expect and prefer.
207 *
208 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
209 * whether probed synchronously or asynchronously.
210 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
211 * probing order is not essential for booting the system may
212 * opt into executing their probes asynchronously.
213 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
214 * their probe routines to run synchronously with driver and
215 * device registration (with the exception of -EPROBE_DEFER
216 * handling - re-probing always ends up being done asynchronously).
217 *
218 * Note that the end goal is to switch the kernel to use asynchronous
219 * probing by default, so annotating drivers with
220 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
221 * to speed up boot process while we are validating the rest of the
222 * drivers.
223 */
224 enum probe_type {
225 PROBE_DEFAULT_STRATEGY,
226 PROBE_PREFER_ASYNCHRONOUS,
227 PROBE_FORCE_SYNCHRONOUS,
228 };
229
230 /**
231 * struct device_driver - The basic device driver structure
232 * @name: Name of the device driver.
233 * @bus: The bus which the device of this driver belongs to.
234 * @owner: The module owner.
235 * @mod_name: Used for built-in modules.
236 * @suppress_bind_attrs: Disables bind/unbind via sysfs.
237 * @probe_type: Type of the probe (synchronous or asynchronous) to use.
238 * @of_match_table: The open firmware table.
239 * @acpi_match_table: The ACPI match table.
240 * @probe: Called to query the existence of a specific device,
241 * whether this driver can work with it, and bind the driver
242 * to a specific device.
243 * @remove: Called when the device is removed from the system to
244 * unbind a device from this driver.
245 * @shutdown: Called at shut-down time to quiesce the device.
246 * @suspend: Called to put the device to sleep mode. Usually to a
247 * low power state.
248 * @resume: Called to bring a device from sleep mode.
249 * @groups: Default attributes that get created by the driver core
250 * automatically.
251 * @pm: Power management operations of the device which matched
252 * this driver.
253 * @p: Driver core's private data, no one other than the driver
254 * core can touch this.
255 *
256 * The device driver-model tracks all of the drivers known to the system.
257 * The main reason for this tracking is to enable the driver core to match
258 * up drivers with new devices. Once drivers are known objects within the
259 * system, however, a number of other things become possible. Device drivers
260 * can export information and configuration variables that are independent
261 * of any specific device.
262 */
263 struct device_driver {
264 const char *name;
265 struct bus_type *bus;
266
267 struct module *owner;
268 const char *mod_name; /* used for built-in modules */
269
270 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
271 enum probe_type probe_type;
272
273 const struct of_device_id *of_match_table;
274 const struct acpi_device_id *acpi_match_table;
275
276 int (*probe) (struct device *dev);
277 int (*remove) (struct device *dev);
278 void (*shutdown) (struct device *dev);
279 int (*suspend) (struct device *dev, pm_message_t state);
280 int (*resume) (struct device *dev);
281 const struct attribute_group **groups;
282
283 const struct dev_pm_ops *pm;
284
285 struct driver_private *p;
286 };
287
288
289 extern int __must_check driver_register(struct device_driver *drv);
290 extern void driver_unregister(struct device_driver *drv);
291
292 extern struct device_driver *driver_find(const char *name,
293 struct bus_type *bus);
294 extern int driver_probe_done(void);
295 extern void wait_for_device_probe(void);
296
297
298 /* sysfs interface for exporting driver attributes */
299
300 struct driver_attribute {
301 struct attribute attr;
302 ssize_t (*show)(struct device_driver *driver, char *buf);
303 ssize_t (*store)(struct device_driver *driver, const char *buf,
304 size_t count);
305 };
306
307 #define DRIVER_ATTR(_name, _mode, _show, _store) \
308 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
309 #define DRIVER_ATTR_RW(_name) \
310 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
311 #define DRIVER_ATTR_RO(_name) \
312 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
313 #define DRIVER_ATTR_WO(_name) \
314 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
315
316 extern int __must_check driver_create_file(struct device_driver *driver,
317 const struct driver_attribute *attr);
318 extern void driver_remove_file(struct device_driver *driver,
319 const struct driver_attribute *attr);
320
321 extern int __must_check driver_for_each_device(struct device_driver *drv,
322 struct device *start,
323 void *data,
324 int (*fn)(struct device *dev,
325 void *));
326 struct device *driver_find_device(struct device_driver *drv,
327 struct device *start, void *data,
328 int (*match)(struct device *dev, void *data));
329
330 /**
331 * struct subsys_interface - interfaces to device functions
332 * @name: name of the device function
333 * @subsys: subsytem of the devices to attach to
334 * @node: the list of functions registered at the subsystem
335 * @add_dev: device hookup to device function handler
336 * @remove_dev: device hookup to device function handler
337 *
338 * Simple interfaces attached to a subsystem. Multiple interfaces can
339 * attach to a subsystem and its devices. Unlike drivers, they do not
340 * exclusively claim or control devices. Interfaces usually represent
341 * a specific functionality of a subsystem/class of devices.
342 */
343 struct subsys_interface {
344 const char *name;
345 struct bus_type *subsys;
346 struct list_head node;
347 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
348 void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
349 };
350
351 int subsys_interface_register(struct subsys_interface *sif);
352 void subsys_interface_unregister(struct subsys_interface *sif);
353
354 int subsys_system_register(struct bus_type *subsys,
355 const struct attribute_group **groups);
356 int subsys_virtual_register(struct bus_type *subsys,
357 const struct attribute_group **groups);
358
359 /**
360 * struct class - device classes
361 * @name: Name of the class.
362 * @owner: The module owner.
363 * @class_attrs: Default attributes of this class.
364 * @dev_groups: Default attributes of the devices that belong to the class.
365 * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
366 * @dev_uevent: Called when a device is added, removed from this class, or a
367 * few other things that generate uevents to add the environment
368 * variables.
369 * @devnode: Callback to provide the devtmpfs.
370 * @class_release: Called to release this class.
371 * @dev_release: Called to release the device.
372 * @suspend: Used to put the device to sleep mode, usually to a low power
373 * state.
374 * @resume: Used to bring the device from the sleep mode.
375 * @ns_type: Callbacks so sysfs can detemine namespaces.
376 * @namespace: Namespace of the device belongs to this class.
377 * @pm: The default device power management operations of this class.
378 * @p: The private data of the driver core, no one other than the
379 * driver core can touch this.
380 *
381 * A class is a higher-level view of a device that abstracts out low-level
382 * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
383 * at the class level, they are all simply disks. Classes allow user space
384 * to work with devices based on what they do, rather than how they are
385 * connected or how they work.
386 */
387 struct class {
388 const char *name;
389 struct module *owner;
390
391 struct class_attribute *class_attrs;
392 const struct attribute_group **dev_groups;
393 struct kobject *dev_kobj;
394
395 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
396 char *(*devnode)(struct device *dev, umode_t *mode);
397
398 void (*class_release)(struct class *class);
399 void (*dev_release)(struct device *dev);
400
401 int (*suspend)(struct device *dev, pm_message_t state);
402 int (*resume)(struct device *dev);
403
404 const struct kobj_ns_type_operations *ns_type;
405 const void *(*namespace)(struct device *dev);
406
407 const struct dev_pm_ops *pm;
408
409 struct subsys_private *p;
410 };
411
412 struct class_dev_iter {
413 struct klist_iter ki;
414 const struct device_type *type;
415 };
416
417 extern struct kobject *sysfs_dev_block_kobj;
418 extern struct kobject *sysfs_dev_char_kobj;
419 extern int __must_check __class_register(struct class *class,
420 struct lock_class_key *key);
421 extern void class_unregister(struct class *class);
422
423 /* This is a #define to keep the compiler from merging different
424 * instances of the __key variable */
425 #define class_register(class) \
426 ({ \
427 static struct lock_class_key __key; \
428 __class_register(class, &__key); \
429 })
430
431 struct class_compat;
432 struct class_compat *class_compat_register(const char *name);
433 void class_compat_unregister(struct class_compat *cls);
434 int class_compat_create_link(struct class_compat *cls, struct device *dev,
435 struct device *device_link);
436 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
437 struct device *device_link);
438
439 extern void class_dev_iter_init(struct class_dev_iter *iter,
440 struct class *class,
441 struct device *start,
442 const struct device_type *type);
443 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
444 extern void class_dev_iter_exit(struct class_dev_iter *iter);
445
446 extern int class_for_each_device(struct class *class, struct device *start,
447 void *data,
448 int (*fn)(struct device *dev, void *data));
449 extern struct device *class_find_device(struct class *class,
450 struct device *start, const void *data,
451 int (*match)(struct device *, const void *));
452
453 struct class_attribute {
454 struct attribute attr;
455 ssize_t (*show)(struct class *class, struct class_attribute *attr,
456 char *buf);
457 ssize_t (*store)(struct class *class, struct class_attribute *attr,
458 const char *buf, size_t count);
459 };
460
461 #define CLASS_ATTR(_name, _mode, _show, _store) \
462 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
463 #define CLASS_ATTR_RW(_name) \
464 struct class_attribute class_attr_##_name = __ATTR_RW(_name)
465 #define CLASS_ATTR_RO(_name) \
466 struct class_attribute class_attr_##_name = __ATTR_RO(_name)
467
468 extern int __must_check class_create_file_ns(struct class *class,
469 const struct class_attribute *attr,
470 const void *ns);
471 extern void class_remove_file_ns(struct class *class,
472 const struct class_attribute *attr,
473 const void *ns);
474
475 static inline int __must_check class_create_file(struct class *class,
476 const struct class_attribute *attr)
477 {
478 return class_create_file_ns(class, attr, NULL);
479 }
480
481 static inline void class_remove_file(struct class *class,
482 const struct class_attribute *attr)
483 {
484 return class_remove_file_ns(class, attr, NULL);
485 }
486
487 /* Simple class attribute that is just a static string */
488 struct class_attribute_string {
489 struct class_attribute attr;
490 char *str;
491 };
492
493 /* Currently read-only only */
494 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
495 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
496 #define CLASS_ATTR_STRING(_name, _mode, _str) \
497 struct class_attribute_string class_attr_##_name = \
498 _CLASS_ATTR_STRING(_name, _mode, _str)
499
500 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
501 char *buf);
502
503 struct class_interface {
504 struct list_head node;
505 struct class *class;
506
507 int (*add_dev) (struct device *, struct class_interface *);
508 void (*remove_dev) (struct device *, struct class_interface *);
509 };
510
511 extern int __must_check class_interface_register(struct class_interface *);
512 extern void class_interface_unregister(struct class_interface *);
513
514 extern struct class * __must_check __class_create(struct module *owner,
515 const char *name,
516 struct lock_class_key *key);
517 extern void class_destroy(struct class *cls);
518
519 /* This is a #define to keep the compiler from merging different
520 * instances of the __key variable */
521 #define class_create(owner, name) \
522 ({ \
523 static struct lock_class_key __key; \
524 __class_create(owner, name, &__key); \
525 })
526
527 /*
528 * The type of device, "struct device" is embedded in. A class
529 * or bus can contain devices of different types
530 * like "partitions" and "disks", "mouse" and "event".
531 * This identifies the device type and carries type-specific
532 * information, equivalent to the kobj_type of a kobject.
533 * If "name" is specified, the uevent will contain it in
534 * the DEVTYPE variable.
535 */
536 struct device_type {
537 const char *name;
538 const struct attribute_group **groups;
539 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
540 char *(*devnode)(struct device *dev, umode_t *mode,
541 kuid_t *uid, kgid_t *gid);
542 void (*release)(struct device *dev);
543
544 const struct dev_pm_ops *pm;
545 };
546
547 /* interface for exporting device attributes */
548 struct device_attribute {
549 struct attribute attr;
550 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
551 char *buf);
552 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
553 const char *buf, size_t count);
554 };
555
556 struct dev_ext_attribute {
557 struct device_attribute attr;
558 void *var;
559 };
560
561 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
562 char *buf);
563 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
564 const char *buf, size_t count);
565 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
566 char *buf);
567 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
568 const char *buf, size_t count);
569 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
570 char *buf);
571 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
572 const char *buf, size_t count);
573
574 #define DEVICE_ATTR(_name, _mode, _show, _store) \
575 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
576 #define DEVICE_ATTR_RW(_name) \
577 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
578 #define DEVICE_ATTR_RO(_name) \
579 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
580 #define DEVICE_ATTR_WO(_name) \
581 struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
582 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
583 struct dev_ext_attribute dev_attr_##_name = \
584 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
585 #define DEVICE_INT_ATTR(_name, _mode, _var) \
586 struct dev_ext_attribute dev_attr_##_name = \
587 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
588 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
589 struct dev_ext_attribute dev_attr_##_name = \
590 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
591 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
592 struct device_attribute dev_attr_##_name = \
593 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
594
595 extern int device_create_file(struct device *device,
596 const struct device_attribute *entry);
597 extern void device_remove_file(struct device *dev,
598 const struct device_attribute *attr);
599 extern bool device_remove_file_self(struct device *dev,
600 const struct device_attribute *attr);
601 extern int __must_check device_create_bin_file(struct device *dev,
602 const struct bin_attribute *attr);
603 extern void device_remove_bin_file(struct device *dev,
604 const struct bin_attribute *attr);
605
606 /* device resource management */
607 typedef void (*dr_release_t)(struct device *dev, void *res);
608 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
609
610 #ifdef CONFIG_DEBUG_DEVRES
611 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
612 int nid, const char *name) __malloc;
613 #define devres_alloc(release, size, gfp) \
614 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
615 #define devres_alloc_node(release, size, gfp, nid) \
616 __devres_alloc_node(release, size, gfp, nid, #release)
617 #else
618 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
619 int nid) __malloc;
620 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
621 {
622 return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
623 }
624 #endif
625
626 extern void devres_for_each_res(struct device *dev, dr_release_t release,
627 dr_match_t match, void *match_data,
628 void (*fn)(struct device *, void *, void *),
629 void *data);
630 extern void devres_free(void *res);
631 extern void devres_add(struct device *dev, void *res);
632 extern void *devres_find(struct device *dev, dr_release_t release,
633 dr_match_t match, void *match_data);
634 extern void *devres_get(struct device *dev, void *new_res,
635 dr_match_t match, void *match_data);
636 extern void *devres_remove(struct device *dev, dr_release_t release,
637 dr_match_t match, void *match_data);
638 extern int devres_destroy(struct device *dev, dr_release_t release,
639 dr_match_t match, void *match_data);
640 extern int devres_release(struct device *dev, dr_release_t release,
641 dr_match_t match, void *match_data);
642
643 /* devres group */
644 extern void * __must_check devres_open_group(struct device *dev, void *id,
645 gfp_t gfp);
646 extern void devres_close_group(struct device *dev, void *id);
647 extern void devres_remove_group(struct device *dev, void *id);
648 extern int devres_release_group(struct device *dev, void *id);
649
650 /* managed devm_k.alloc/kfree for device drivers */
651 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
652 extern __printf(3, 0)
653 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
654 va_list ap) __malloc;
655 extern __printf(3, 4)
656 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
657 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
658 {
659 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
660 }
661 static inline void *devm_kmalloc_array(struct device *dev,
662 size_t n, size_t size, gfp_t flags)
663 {
664 if (size != 0 && n > SIZE_MAX / size)
665 return NULL;
666 return devm_kmalloc(dev, n * size, flags);
667 }
668 static inline void *devm_kcalloc(struct device *dev,
669 size_t n, size_t size, gfp_t flags)
670 {
671 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
672 }
673 extern void devm_kfree(struct device *dev, void *p);
674 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
675 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
676 gfp_t gfp);
677
678 extern unsigned long devm_get_free_pages(struct device *dev,
679 gfp_t gfp_mask, unsigned int order);
680 extern void devm_free_pages(struct device *dev, unsigned long addr);
681
682 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
683
684 /* allows to add/remove a custom action to devres stack */
685 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
686 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
687
688 static inline int devm_add_action_or_reset(struct device *dev,
689 void (*action)(void *), void *data)
690 {
691 int ret;
692
693 ret = devm_add_action(dev, action, data);
694 if (ret)
695 action(data);
696
697 return ret;
698 }
699
700 struct device_dma_parameters {
701 /*
702 * a low level driver may set these to teach IOMMU code about
703 * sg limitations.
704 */
705 unsigned int max_segment_size;
706 unsigned long segment_boundary_mask;
707 };
708
709 /**
710 * struct device - The basic device structure
711 * @parent: The device's "parent" device, the device to which it is attached.
712 * In most cases, a parent device is some sort of bus or host
713 * controller. If parent is NULL, the device, is a top-level device,
714 * which is not usually what you want.
715 * @p: Holds the private data of the driver core portions of the device.
716 * See the comment of the struct device_private for detail.
717 * @kobj: A top-level, abstract class from which other classes are derived.
718 * @init_name: Initial name of the device.
719 * @type: The type of device.
720 * This identifies the device type and carries type-specific
721 * information.
722 * @mutex: Mutex to synchronize calls to its driver.
723 * @bus: Type of bus device is on.
724 * @driver: Which driver has allocated this
725 * @platform_data: Platform data specific to the device.
726 * Example: For devices on custom boards, as typical of embedded
727 * and SOC based hardware, Linux often uses platform_data to point
728 * to board-specific structures describing devices and how they
729 * are wired. That can include what ports are available, chip
730 * variants, which GPIO pins act in what additional roles, and so
731 * on. This shrinks the "Board Support Packages" (BSPs) and
732 * minimizes board-specific #ifdefs in drivers.
733 * @driver_data: Private pointer for driver specific info.
734 * @power: For device power management.
735 * See Documentation/power/devices.txt for details.
736 * @pm_domain: Provide callbacks that are executed during system suspend,
737 * hibernation, system resume and during runtime PM transitions
738 * along with subsystem-level and driver-level callbacks.
739 * @pins: For device pin management.
740 * See Documentation/pinctrl.txt for details.
741 * @msi_list: Hosts MSI descriptors
742 * @msi_domain: The generic MSI domain this device is using.
743 * @numa_node: NUMA node this device is close to.
744 * @dma_mask: Dma mask (if dma'ble device).
745 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
746 * hardware supports 64-bit addresses for consistent allocations
747 * such descriptors.
748 * @dma_pfn_offset: offset of DMA memory range relatively of RAM
749 * @dma_parms: A low level driver may set these to teach IOMMU code about
750 * segment limitations.
751 * @dma_pools: Dma pools (if dma'ble device).
752 * @dma_mem: Internal for coherent mem override.
753 * @cma_area: Contiguous memory area for dma allocations
754 * @archdata: For arch-specific additions.
755 * @of_node: Associated device tree node.
756 * @fwnode: Associated device node supplied by platform firmware.
757 * @devt: For creating the sysfs "dev".
758 * @id: device instance
759 * @devres_lock: Spinlock to protect the resource of the device.
760 * @devres_head: The resources list of the device.
761 * @knode_class: The node used to add the device to the class list.
762 * @class: The class of the device.
763 * @groups: Optional attribute groups.
764 * @release: Callback to free the device after all references have
765 * gone away. This should be set by the allocator of the
766 * device (i.e. the bus driver that discovered the device).
767 * @iommu_group: IOMMU group the device belongs to.
768 *
769 * @offline_disabled: If set, the device is permanently online.
770 * @offline: Set after successful invocation of bus type's .offline().
771 *
772 * At the lowest level, every device in a Linux system is represented by an
773 * instance of struct device. The device structure contains the information
774 * that the device model core needs to model the system. Most subsystems,
775 * however, track additional information about the devices they host. As a
776 * result, it is rare for devices to be represented by bare device structures;
777 * instead, that structure, like kobject structures, is usually embedded within
778 * a higher-level representation of the device.
779 */
780 struct device {
781 struct device *parent;
782
783 struct device_private *p;
784
785 struct kobject kobj;
786 const char *init_name; /* initial name of the device */
787 const struct device_type *type;
788
789 struct mutex mutex; /* mutex to synchronize calls to
790 * its driver.
791 */
792
793 struct bus_type *bus; /* type of bus device is on */
794 struct device_driver *driver; /* which driver has allocated this
795 device */
796 void *platform_data; /* Platform specific data, device
797 core doesn't touch it */
798 void *driver_data; /* Driver data, set and get with
799 dev_set/get_drvdata */
800 struct dev_pm_info power;
801 struct dev_pm_domain *pm_domain;
802
803 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
804 struct irq_domain *msi_domain;
805 #endif
806 #ifdef CONFIG_PINCTRL
807 struct dev_pin_info *pins;
808 #endif
809 #ifdef CONFIG_GENERIC_MSI_IRQ
810 struct list_head msi_list;
811 #endif
812
813 #ifdef CONFIG_NUMA
814 int numa_node; /* NUMA node this device is close to */
815 #endif
816 u64 *dma_mask; /* dma mask (if dma'able device) */
817 u64 coherent_dma_mask;/* Like dma_mask, but for
818 alloc_coherent mappings as
819 not all hardware supports
820 64 bit addresses for consistent
821 allocations such descriptors. */
822 unsigned long dma_pfn_offset;
823
824 struct device_dma_parameters *dma_parms;
825
826 struct list_head dma_pools; /* dma pools (if dma'ble) */
827
828 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
829 override */
830 #ifdef CONFIG_DMA_CMA
831 struct cma *cma_area; /* contiguous memory area for dma
832 allocations */
833 #endif
834 /* arch specific additions */
835 struct dev_archdata archdata;
836
837 struct device_node *of_node; /* associated device tree node */
838 struct fwnode_handle *fwnode; /* firmware device node */
839
840 dev_t devt; /* dev_t, creates the sysfs "dev" */
841 u32 id; /* device instance */
842
843 spinlock_t devres_lock;
844 struct list_head devres_head;
845
846 struct klist_node knode_class;
847 struct class *class;
848 const struct attribute_group **groups; /* optional groups */
849
850 void (*release)(struct device *dev);
851 struct iommu_group *iommu_group;
852
853 bool offline_disabled:1;
854 bool offline:1;
855 };
856
857 static inline struct device *kobj_to_dev(struct kobject *kobj)
858 {
859 return container_of(kobj, struct device, kobj);
860 }
861
862 /* Get the wakeup routines, which depend on struct device */
863 #include <linux/pm_wakeup.h>
864
865 static inline const char *dev_name(const struct device *dev)
866 {
867 /* Use the init name until the kobject becomes available */
868 if (dev->init_name)
869 return dev->init_name;
870
871 return kobject_name(&dev->kobj);
872 }
873
874 extern __printf(2, 3)
875 int dev_set_name(struct device *dev, const char *name, ...);
876
877 #ifdef CONFIG_NUMA
878 static inline int dev_to_node(struct device *dev)
879 {
880 return dev->numa_node;
881 }
882 static inline void set_dev_node(struct device *dev, int node)
883 {
884 dev->numa_node = node;
885 }
886 #else
887 static inline int dev_to_node(struct device *dev)
888 {
889 return -1;
890 }
891 static inline void set_dev_node(struct device *dev, int node)
892 {
893 }
894 #endif
895
896 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
897 {
898 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
899 return dev->msi_domain;
900 #else
901 return NULL;
902 #endif
903 }
904
905 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
906 {
907 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
908 dev->msi_domain = d;
909 #endif
910 }
911
912 static inline void *dev_get_drvdata(const struct device *dev)
913 {
914 return dev->driver_data;
915 }
916
917 static inline void dev_set_drvdata(struct device *dev, void *data)
918 {
919 dev->driver_data = data;
920 }
921
922 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
923 {
924 return dev ? dev->power.subsys_data : NULL;
925 }
926
927 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
928 {
929 return dev->kobj.uevent_suppress;
930 }
931
932 static inline void dev_set_uevent_suppress(struct device *dev, int val)
933 {
934 dev->kobj.uevent_suppress = val;
935 }
936
937 static inline int device_is_registered(struct device *dev)
938 {
939 return dev->kobj.state_in_sysfs;
940 }
941
942 static inline void device_enable_async_suspend(struct device *dev)
943 {
944 if (!dev->power.is_prepared)
945 dev->power.async_suspend = true;
946 }
947
948 static inline void device_disable_async_suspend(struct device *dev)
949 {
950 if (!dev->power.is_prepared)
951 dev->power.async_suspend = false;
952 }
953
954 static inline bool device_async_suspend_enabled(struct device *dev)
955 {
956 return !!dev->power.async_suspend;
957 }
958
959 static inline void dev_pm_syscore_device(struct device *dev, bool val)
960 {
961 #ifdef CONFIG_PM_SLEEP
962 dev->power.syscore = val;
963 #endif
964 }
965
966 static inline void device_lock(struct device *dev)
967 {
968 mutex_lock(&dev->mutex);
969 }
970
971 static inline int device_lock_interruptible(struct device *dev)
972 {
973 return mutex_lock_interruptible(&dev->mutex);
974 }
975
976 static inline int device_trylock(struct device *dev)
977 {
978 return mutex_trylock(&dev->mutex);
979 }
980
981 static inline void device_unlock(struct device *dev)
982 {
983 mutex_unlock(&dev->mutex);
984 }
985
986 static inline void device_lock_assert(struct device *dev)
987 {
988 lockdep_assert_held(&dev->mutex);
989 }
990
991 static inline struct device_node *dev_of_node(struct device *dev)
992 {
993 if (!IS_ENABLED(CONFIG_OF))
994 return NULL;
995 return dev->of_node;
996 }
997
998 void driver_init(void);
999
1000 /*
1001 * High level routines for use by the bus drivers
1002 */
1003 extern int __must_check device_register(struct device *dev);
1004 extern void device_unregister(struct device *dev);
1005 extern void device_initialize(struct device *dev);
1006 extern int __must_check device_add(struct device *dev);
1007 extern void device_del(struct device *dev);
1008 extern int device_for_each_child(struct device *dev, void *data,
1009 int (*fn)(struct device *dev, void *data));
1010 extern int device_for_each_child_reverse(struct device *dev, void *data,
1011 int (*fn)(struct device *dev, void *data));
1012 extern struct device *device_find_child(struct device *dev, void *data,
1013 int (*match)(struct device *dev, void *data));
1014 extern int device_rename(struct device *dev, const char *new_name);
1015 extern int device_move(struct device *dev, struct device *new_parent,
1016 enum dpm_order dpm_order);
1017 extern const char *device_get_devnode(struct device *dev,
1018 umode_t *mode, kuid_t *uid, kgid_t *gid,
1019 const char **tmp);
1020
1021 static inline bool device_supports_offline(struct device *dev)
1022 {
1023 return dev->bus && dev->bus->offline && dev->bus->online;
1024 }
1025
1026 extern void lock_device_hotplug(void);
1027 extern void unlock_device_hotplug(void);
1028 extern int lock_device_hotplug_sysfs(void);
1029 extern int device_offline(struct device *dev);
1030 extern int device_online(struct device *dev);
1031 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1032 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1033
1034 /*
1035 * Root device objects for grouping under /sys/devices
1036 */
1037 extern struct device *__root_device_register(const char *name,
1038 struct module *owner);
1039
1040 /* This is a macro to avoid include problems with THIS_MODULE */
1041 #define root_device_register(name) \
1042 __root_device_register(name, THIS_MODULE)
1043
1044 extern void root_device_unregister(struct device *root);
1045
1046 static inline void *dev_get_platdata(const struct device *dev)
1047 {
1048 return dev->platform_data;
1049 }
1050
1051 /*
1052 * Manual binding of a device to driver. See drivers/base/bus.c
1053 * for information on use.
1054 */
1055 extern int __must_check device_bind_driver(struct device *dev);
1056 extern void device_release_driver(struct device *dev);
1057 extern int __must_check device_attach(struct device *dev);
1058 extern int __must_check driver_attach(struct device_driver *drv);
1059 extern void device_initial_probe(struct device *dev);
1060 extern int __must_check device_reprobe(struct device *dev);
1061
1062 extern bool device_is_bound(struct device *dev);
1063
1064 /*
1065 * Easy functions for dynamically creating devices on the fly
1066 */
1067 extern __printf(5, 0)
1068 struct device *device_create_vargs(struct class *cls, struct device *parent,
1069 dev_t devt, void *drvdata,
1070 const char *fmt, va_list vargs);
1071 extern __printf(5, 6)
1072 struct device *device_create(struct class *cls, struct device *parent,
1073 dev_t devt, void *drvdata,
1074 const char *fmt, ...);
1075 extern __printf(6, 7)
1076 struct device *device_create_with_groups(struct class *cls,
1077 struct device *parent, dev_t devt, void *drvdata,
1078 const struct attribute_group **groups,
1079 const char *fmt, ...);
1080 extern void device_destroy(struct class *cls, dev_t devt);
1081
1082 /*
1083 * Platform "fixup" functions - allow the platform to have their say
1084 * about devices and actions that the general device layer doesn't
1085 * know about.
1086 */
1087 /* Notify platform of device discovery */
1088 extern int (*platform_notify)(struct device *dev);
1089
1090 extern int (*platform_notify_remove)(struct device *dev);
1091
1092
1093 /*
1094 * get_device - atomically increment the reference count for the device.
1095 *
1096 */
1097 extern struct device *get_device(struct device *dev);
1098 extern void put_device(struct device *dev);
1099
1100 #ifdef CONFIG_DEVTMPFS
1101 extern int devtmpfs_create_node(struct device *dev);
1102 extern int devtmpfs_delete_node(struct device *dev);
1103 extern int devtmpfs_mount(const char *mntdir);
1104 #else
1105 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
1106 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
1107 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
1108 #endif
1109
1110 /* drivers/base/power/shutdown.c */
1111 extern void device_shutdown(void);
1112
1113 /* debugging and troubleshooting/diagnostic helpers. */
1114 extern const char *dev_driver_string(const struct device *dev);
1115
1116
1117 #ifdef CONFIG_PRINTK
1118
1119 extern __printf(3, 0)
1120 int dev_vprintk_emit(int level, const struct device *dev,
1121 const char *fmt, va_list args);
1122 extern __printf(3, 4)
1123 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
1124
1125 extern __printf(3, 4)
1126 void dev_printk(const char *level, const struct device *dev,
1127 const char *fmt, ...);
1128 extern __printf(2, 3)
1129 void dev_emerg(const struct device *dev, const char *fmt, ...);
1130 extern __printf(2, 3)
1131 void dev_alert(const struct device *dev, const char *fmt, ...);
1132 extern __printf(2, 3)
1133 void dev_crit(const struct device *dev, const char *fmt, ...);
1134 extern __printf(2, 3)
1135 void dev_err(const struct device *dev, const char *fmt, ...);
1136 extern __printf(2, 3)
1137 void dev_warn(const struct device *dev, const char *fmt, ...);
1138 extern __printf(2, 3)
1139 void dev_notice(const struct device *dev, const char *fmt, ...);
1140 extern __printf(2, 3)
1141 void _dev_info(const struct device *dev, const char *fmt, ...);
1142
1143 #else
1144
1145 static inline __printf(3, 0)
1146 int dev_vprintk_emit(int level, const struct device *dev,
1147 const char *fmt, va_list args)
1148 { return 0; }
1149 static inline __printf(3, 4)
1150 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
1151 { return 0; }
1152
1153 static inline void __dev_printk(const char *level, const struct device *dev,
1154 struct va_format *vaf)
1155 {}
1156 static inline __printf(3, 4)
1157 void dev_printk(const char *level, const struct device *dev,
1158 const char *fmt, ...)
1159 {}
1160
1161 static inline __printf(2, 3)
1162 void dev_emerg(const struct device *dev, const char *fmt, ...)
1163 {}
1164 static inline __printf(2, 3)
1165 void dev_crit(const struct device *dev, const char *fmt, ...)
1166 {}
1167 static inline __printf(2, 3)
1168 void dev_alert(const struct device *dev, const char *fmt, ...)
1169 {}
1170 static inline __printf(2, 3)
1171 void dev_err(const struct device *dev, const char *fmt, ...)
1172 {}
1173 static inline __printf(2, 3)
1174 void dev_warn(const struct device *dev, const char *fmt, ...)
1175 {}
1176 static inline __printf(2, 3)
1177 void dev_notice(const struct device *dev, const char *fmt, ...)
1178 {}
1179 static inline __printf(2, 3)
1180 void _dev_info(const struct device *dev, const char *fmt, ...)
1181 {}
1182
1183 #endif
1184
1185 /*
1186 * Stupid hackaround for existing uses of non-printk uses dev_info
1187 *
1188 * Note that the definition of dev_info below is actually _dev_info
1189 * and a macro is used to avoid redefining dev_info
1190 */
1191
1192 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
1193
1194 #if defined(CONFIG_DYNAMIC_DEBUG)
1195 #define dev_dbg(dev, format, ...) \
1196 do { \
1197 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
1198 } while (0)
1199 #elif defined(DEBUG)
1200 #define dev_dbg(dev, format, arg...) \
1201 dev_printk(KERN_DEBUG, dev, format, ##arg)
1202 #else
1203 #define dev_dbg(dev, format, arg...) \
1204 ({ \
1205 if (0) \
1206 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1207 })
1208 #endif
1209
1210 #ifdef CONFIG_PRINTK
1211 #define dev_level_once(dev_level, dev, fmt, ...) \
1212 do { \
1213 static bool __print_once __read_mostly; \
1214 \
1215 if (!__print_once) { \
1216 __print_once = true; \
1217 dev_level(dev, fmt, ##__VA_ARGS__); \
1218 } \
1219 } while (0)
1220 #else
1221 #define dev_level_once(dev_level, dev, fmt, ...) \
1222 do { \
1223 if (0) \
1224 dev_level(dev, fmt, ##__VA_ARGS__); \
1225 } while (0)
1226 #endif
1227
1228 #define dev_emerg_once(dev, fmt, ...) \
1229 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
1230 #define dev_alert_once(dev, fmt, ...) \
1231 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
1232 #define dev_crit_once(dev, fmt, ...) \
1233 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
1234 #define dev_err_once(dev, fmt, ...) \
1235 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
1236 #define dev_warn_once(dev, fmt, ...) \
1237 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
1238 #define dev_notice_once(dev, fmt, ...) \
1239 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
1240 #define dev_info_once(dev, fmt, ...) \
1241 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
1242 #define dev_dbg_once(dev, fmt, ...) \
1243 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
1244
1245 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1246 do { \
1247 static DEFINE_RATELIMIT_STATE(_rs, \
1248 DEFAULT_RATELIMIT_INTERVAL, \
1249 DEFAULT_RATELIMIT_BURST); \
1250 if (__ratelimit(&_rs)) \
1251 dev_level(dev, fmt, ##__VA_ARGS__); \
1252 } while (0)
1253
1254 #define dev_emerg_ratelimited(dev, fmt, ...) \
1255 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
1256 #define dev_alert_ratelimited(dev, fmt, ...) \
1257 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
1258 #define dev_crit_ratelimited(dev, fmt, ...) \
1259 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
1260 #define dev_err_ratelimited(dev, fmt, ...) \
1261 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
1262 #define dev_warn_ratelimited(dev, fmt, ...) \
1263 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
1264 #define dev_notice_ratelimited(dev, fmt, ...) \
1265 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
1266 #define dev_info_ratelimited(dev, fmt, ...) \
1267 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
1268 #if defined(CONFIG_DYNAMIC_DEBUG)
1269 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
1270 #define dev_dbg_ratelimited(dev, fmt, ...) \
1271 do { \
1272 static DEFINE_RATELIMIT_STATE(_rs, \
1273 DEFAULT_RATELIMIT_INTERVAL, \
1274 DEFAULT_RATELIMIT_BURST); \
1275 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
1276 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
1277 __ratelimit(&_rs)) \
1278 __dynamic_dev_dbg(&descriptor, dev, fmt, \
1279 ##__VA_ARGS__); \
1280 } while (0)
1281 #elif defined(DEBUG)
1282 #define dev_dbg_ratelimited(dev, fmt, ...) \
1283 do { \
1284 static DEFINE_RATELIMIT_STATE(_rs, \
1285 DEFAULT_RATELIMIT_INTERVAL, \
1286 DEFAULT_RATELIMIT_BURST); \
1287 if (__ratelimit(&_rs)) \
1288 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1289 } while (0)
1290 #else
1291 #define dev_dbg_ratelimited(dev, fmt, ...) \
1292 do { \
1293 if (0) \
1294 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1295 } while (0)
1296 #endif
1297
1298 #ifdef VERBOSE_DEBUG
1299 #define dev_vdbg dev_dbg
1300 #else
1301 #define dev_vdbg(dev, format, arg...) \
1302 ({ \
1303 if (0) \
1304 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1305 })
1306 #endif
1307
1308 /*
1309 * dev_WARN*() acts like dev_printk(), but with the key difference of
1310 * using WARN/WARN_ONCE to include file/line information and a backtrace.
1311 */
1312 #define dev_WARN(dev, format, arg...) \
1313 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
1314
1315 #define dev_WARN_ONCE(dev, condition, format, arg...) \
1316 WARN_ONCE(condition, "%s %s: " format, \
1317 dev_driver_string(dev), dev_name(dev), ## arg)
1318
1319 /* Create alias, so I can be autoloaded. */
1320 #define MODULE_ALIAS_CHARDEV(major,minor) \
1321 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
1322 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
1323 MODULE_ALIAS("char-major-" __stringify(major) "-*")
1324
1325 #ifdef CONFIG_SYSFS_DEPRECATED
1326 extern long sysfs_deprecated;
1327 #else
1328 #define sysfs_deprecated 0
1329 #endif
1330
1331 /**
1332 * module_driver() - Helper macro for drivers that don't do anything
1333 * special in module init/exit. This eliminates a lot of boilerplate.
1334 * Each module may only use this macro once, and calling it replaces
1335 * module_init() and module_exit().
1336 *
1337 * @__driver: driver name
1338 * @__register: register function for this driver type
1339 * @__unregister: unregister function for this driver type
1340 * @...: Additional arguments to be passed to __register and __unregister.
1341 *
1342 * Use this macro to construct bus specific macros for registering
1343 * drivers, and do not use it on its own.
1344 */
1345 #define module_driver(__driver, __register, __unregister, ...) \
1346 static int __init __driver##_init(void) \
1347 { \
1348 return __register(&(__driver) , ##__VA_ARGS__); \
1349 } \
1350 module_init(__driver##_init); \
1351 static void __exit __driver##_exit(void) \
1352 { \
1353 __unregister(&(__driver) , ##__VA_ARGS__); \
1354 } \
1355 module_exit(__driver##_exit);
1356
1357 /**
1358 * builtin_driver() - Helper macro for drivers that don't do anything
1359 * special in init and have no exit. This eliminates some boilerplate.
1360 * Each driver may only use this macro once, and calling it replaces
1361 * device_initcall (or in some cases, the legacy __initcall). This is
1362 * meant to be a direct parallel of module_driver() above but without
1363 * the __exit stuff that is not used for builtin cases.
1364 *
1365 * @__driver: driver name
1366 * @__register: register function for this driver type
1367 * @...: Additional arguments to be passed to __register
1368 *
1369 * Use this macro to construct bus specific macros for registering
1370 * drivers, and do not use it on its own.
1371 */
1372 #define builtin_driver(__driver, __register, ...) \
1373 static int __init __driver##_init(void) \
1374 { \
1375 return __register(&(__driver) , ##__VA_ARGS__); \
1376 } \
1377 device_initcall(__driver##_init);
1378
1379 #endif /* _DEVICE_H_ */ 1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/irq.h>
21
22 /*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
37
38 /*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
42 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in an shared interrupt is considered for
49 * performance reasons)
50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the
52 * irq line disabled until the threaded handler has been run.
53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 * that this interrupt will wake the system from a suspended
55 * state. See Documentation/power/suspend-and-interrupts.txt
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time.
60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 * interrupt handler after suspending interrupts. For system
62 * wakeup devices users need to implement wakeup detection in
63 * their interrupt handlers.
64 */
65 #define IRQF_SHARED 0x00000080
66 #define IRQF_PROBE_SHARED 0x00000100
67 #define __IRQF_TIMER 0x00000200
68 #define IRQF_PERCPU 0x00000400
69 #define IRQF_NOBALANCING 0x00000800
70 #define IRQF_IRQPOLL 0x00001000
71 #define IRQF_ONESHOT 0x00002000
72 #define IRQF_NO_SUSPEND 0x00004000
73 #define IRQF_FORCE_RESUME 0x00008000
74 #define IRQF_NO_THREAD 0x00010000
75 #define IRQF_EARLY_RESUME 0x00020000
76 #define IRQF_COND_SUSPEND 0x00040000
77
78 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79
80 /*
81 * These values can be returned by request_any_context_irq() and
82 * describe the context the interrupt will be run in.
83 *
84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86 */
87 enum {
88 IRQC_IS_HARDIRQ = 0,
89 IRQC_IS_NESTED,
90 };
91
92 typedef irqreturn_t (*irq_handler_t)(int, void *);
93
94 /**
95 * struct irqaction - per interrupt action descriptor
96 * @handler: interrupt handler function
97 * @name: name of the device
98 * @dev_id: cookie to identify the device
99 * @percpu_dev_id: cookie to identify the device
100 * @next: pointer to the next irqaction for shared interrupts
101 * @irq: interrupt number
102 * @flags: flags (see IRQF_* above)
103 * @thread_fn: interrupt handler function for threaded interrupts
104 * @thread: thread pointer for threaded interrupts
105 * @secondary: pointer to secondary irqaction (force threading)
106 * @thread_flags: flags related to @thread
107 * @thread_mask: bitmask for keeping track of @thread activity
108 * @dir: pointer to the proc/irq/NN/name entry
109 */
110 struct irqaction {
111 irq_handler_t handler;
112 void *dev_id;
113 void __percpu *percpu_dev_id;
114 struct irqaction *next;
115 irq_handler_t thread_fn;
116 struct task_struct *thread;
117 struct irqaction *secondary;
118 unsigned int irq;
119 unsigned int flags;
120 unsigned long thread_flags;
121 unsigned long thread_mask;
122 const char *name;
123 struct proc_dir_entry *dir;
124 } ____cacheline_internodealigned_in_smp;
125
126 extern irqreturn_t no_action(int cpl, void *dev_id);
127
128 /*
129 * If a (PCI) device interrupt is not connected we set dev->irq to
130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 * can distingiush that case from other error returns.
132 *
133 * 0x80000000 is guaranteed to be outside the available range of interrupts
134 * and easy to distinguish from other possible incorrect values.
135 */
136 #define IRQ_NOTCONNECTED (1U << 31)
137
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 irq_handler_t thread_fn,
141 unsigned long flags, const char *name, void *dev);
142
143 static inline int __must_check
144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 const char *name, void *dev)
146 {
147 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
148 }
149
150 extern int __must_check
151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
152 unsigned long flags, const char *name, void *dev_id);
153
154 extern int __must_check
155 request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 const char *devname, void __percpu *percpu_dev_id);
157
158 extern void free_irq(unsigned int, void *);
159 extern void free_percpu_irq(unsigned int, void __percpu *);
160
161 struct device;
162
163 extern int __must_check
164 devm_request_threaded_irq(struct device *dev, unsigned int irq,
165 irq_handler_t handler, irq_handler_t thread_fn,
166 unsigned long irqflags, const char *devname,
167 void *dev_id);
168
169 static inline int __must_check
170 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
171 unsigned long irqflags, const char *devname, void *dev_id)
172 {
173 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
174 devname, dev_id);
175 }
176
177 extern int __must_check
178 devm_request_any_context_irq(struct device *dev, unsigned int irq,
179 irq_handler_t handler, unsigned long irqflags,
180 const char *devname, void *dev_id);
181
182 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
183
184 /*
185 * On lockdep we dont want to enable hardirqs in hardirq
186 * context. Use local_irq_enable_in_hardirq() to annotate
187 * kernel code that has to do this nevertheless (pretty much
188 * the only valid case is for old/broken hardware that is
189 * insanely slow).
190 *
191 * NOTE: in theory this might break fragile code that relies
192 * on hardirq delivery - in practice we dont seem to have such
193 * places left. So the only effect should be slightly increased
194 * irqs-off latencies.
195 */
196 #ifdef CONFIG_LOCKDEP
197 # define local_irq_enable_in_hardirq() do { } while (0)
198 #else
199 # define local_irq_enable_in_hardirq() local_irq_enable()
200 #endif
201
202 extern void disable_irq_nosync(unsigned int irq);
203 extern bool disable_hardirq(unsigned int irq);
204 extern void disable_irq(unsigned int irq);
205 extern void disable_percpu_irq(unsigned int irq);
206 extern void enable_irq(unsigned int irq);
207 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
208 extern bool irq_percpu_is_enabled(unsigned int irq);
209 extern void irq_wake_thread(unsigned int irq, void *dev_id);
210
211 /* The following three functions are for the core kernel use only. */
212 extern void suspend_device_irqs(void);
213 extern void resume_device_irqs(void);
214
215 /**
216 * struct irq_affinity_notify - context for notification of IRQ affinity changes
217 * @irq: Interrupt to which notification applies
218 * @kref: Reference count, for internal use
219 * @work: Work item, for internal use
220 * @notify: Function to be called on change. This will be
221 * called in process context.
222 * @release: Function to be called on release. This will be
223 * called in process context. Once registered, the
224 * structure must only be freed when this function is
225 * called or later.
226 */
227 struct irq_affinity_notify {
228 unsigned int irq;
229 struct kref kref;
230 struct work_struct work;
231 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
232 void (*release)(struct kref *ref);
233 };
234
235 #if defined(CONFIG_SMP)
236
237 extern cpumask_var_t irq_default_affinity;
238
239 /* Internal implementation. Use the helpers below */
240 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
241 bool force);
242
243 /**
244 * irq_set_affinity - Set the irq affinity of a given irq
245 * @irq: Interrupt to set affinity
246 * @cpumask: cpumask
247 *
248 * Fails if cpumask does not contain an online CPU
249 */
250 static inline int
251 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
252 {
253 return __irq_set_affinity(irq, cpumask, false);
254 }
255
256 /**
257 * irq_force_affinity - Force the irq affinity of a given irq
258 * @irq: Interrupt to set affinity
259 * @cpumask: cpumask
260 *
261 * Same as irq_set_affinity, but without checking the mask against
262 * online cpus.
263 *
264 * Solely for low level cpu hotplug code, where we need to make per
265 * cpu interrupts affine before the cpu becomes online.
266 */
267 static inline int
268 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
269 {
270 return __irq_set_affinity(irq, cpumask, true);
271 }
272
273 extern int irq_can_set_affinity(unsigned int irq);
274 extern int irq_select_affinity(unsigned int irq);
275
276 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
277
278 extern int
279 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
280
281 struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs);
282
283 #else /* CONFIG_SMP */
284
285 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
286 {
287 return -EINVAL;
288 }
289
290 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
291 {
292 return 0;
293 }
294
295 static inline int irq_can_set_affinity(unsigned int irq)
296 {
297 return 0;
298 }
299
300 static inline int irq_select_affinity(unsigned int irq) { return 0; }
301
302 static inline int irq_set_affinity_hint(unsigned int irq,
303 const struct cpumask *m)
304 {
305 return -EINVAL;
306 }
307
308 static inline int
309 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
310 {
311 return 0;
312 }
313
314 static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
315 {
316 *nr_vecs = 1;
317 return NULL;
318 }
319 #endif /* CONFIG_SMP */
320
321 /*
322 * Special lockdep variants of irq disabling/enabling.
323 * These should be used for locking constructs that
324 * know that a particular irq context which is disabled,
325 * and which is the only irq-context user of a lock,
326 * that it's safe to take the lock in the irq-disabled
327 * section without disabling hardirqs.
328 *
329 * On !CONFIG_LOCKDEP they are equivalent to the normal
330 * irq disable/enable methods.
331 */
332 static inline void disable_irq_nosync_lockdep(unsigned int irq)
333 {
334 disable_irq_nosync(irq);
335 #ifdef CONFIG_LOCKDEP
336 local_irq_disable();
337 #endif
338 }
339
340 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
341 {
342 disable_irq_nosync(irq);
343 #ifdef CONFIG_LOCKDEP
344 local_irq_save(*flags);
345 #endif
346 }
347
348 static inline void disable_irq_lockdep(unsigned int irq)
349 {
350 disable_irq(irq);
351 #ifdef CONFIG_LOCKDEP
352 local_irq_disable();
353 #endif
354 }
355
356 static inline void enable_irq_lockdep(unsigned int irq)
357 {
358 #ifdef CONFIG_LOCKDEP
359 local_irq_enable();
360 #endif
361 enable_irq(irq);
362 }
363
364 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
365 {
366 #ifdef CONFIG_LOCKDEP
367 local_irq_restore(*flags);
368 #endif
369 enable_irq(irq);
370 }
371
372 /* IRQ wakeup (PM) control: */
373 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
374
375 static inline int enable_irq_wake(unsigned int irq)
376 {
377 return irq_set_irq_wake(irq, 1);
378 }
379
380 static inline int disable_irq_wake(unsigned int irq)
381 {
382 return irq_set_irq_wake(irq, 0);
383 }
384
385 /*
386 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
387 */
388 enum irqchip_irq_state {
389 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
390 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
391 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
392 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
393 };
394
395 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
396 bool *state);
397 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
398 bool state);
399
400 #ifdef CONFIG_IRQ_FORCED_THREADING
401 extern bool force_irqthreads;
402 #else
403 #define force_irqthreads (0)
404 #endif
405
406 #ifndef __ARCH_SET_SOFTIRQ_PENDING
407 #define set_softirq_pending(x) (local_softirq_pending() = (x))
408 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
409 #endif
410
411 /* Some architectures might implement lazy enabling/disabling of
412 * interrupts. In some cases, such as stop_machine, we might want
413 * to ensure that after a local_irq_disable(), interrupts have
414 * really been disabled in hardware. Such architectures need to
415 * implement the following hook.
416 */
417 #ifndef hard_irq_disable
418 #define hard_irq_disable() do { } while(0)
419 #endif
420
421 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
422 frequency threaded job scheduling. For almost all the purposes
423 tasklets are more than enough. F.e. all serial device BHs et
424 al. should be converted to tasklets, not to softirqs.
425 */
426
427 enum
428 {
429 HI_SOFTIRQ=0,
430 TIMER_SOFTIRQ,
431 NET_TX_SOFTIRQ,
432 NET_RX_SOFTIRQ,
433 BLOCK_SOFTIRQ,
434 IRQ_POLL_SOFTIRQ,
435 TASKLET_SOFTIRQ,
436 SCHED_SOFTIRQ,
437 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
438 numbering. Sigh! */
439 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
440
441 NR_SOFTIRQS
442 };
443
444 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
445
446 /* map softirq index to softirq name. update 'softirq_to_name' in
447 * kernel/softirq.c when adding a new softirq.
448 */
449 extern const char * const softirq_to_name[NR_SOFTIRQS];
450
451 /* softirq mask and active fields moved to irq_cpustat_t in
452 * asm/hardirq.h to get better cache usage. KAO
453 */
454
455 struct softirq_action
456 {
457 void (*action)(struct softirq_action *);
458 };
459
460 asmlinkage void do_softirq(void);
461 asmlinkage void __do_softirq(void);
462
463 #ifdef __ARCH_HAS_DO_SOFTIRQ
464 void do_softirq_own_stack(void);
465 #else
466 static inline void do_softirq_own_stack(void)
467 {
468 __do_softirq();
469 }
470 #endif
471
472 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
473 extern void softirq_init(void);
474 extern void __raise_softirq_irqoff(unsigned int nr);
475
476 extern void raise_softirq_irqoff(unsigned int nr);
477 extern void raise_softirq(unsigned int nr);
478
479 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
480
481 static inline struct task_struct *this_cpu_ksoftirqd(void)
482 {
483 return this_cpu_read(ksoftirqd);
484 }
485
486 /* Tasklets --- multithreaded analogue of BHs.
487
488 Main feature differing them of generic softirqs: tasklet
489 is running only on one CPU simultaneously.
490
491 Main feature differing them of BHs: different tasklets
492 may be run simultaneously on different CPUs.
493
494 Properties:
495 * If tasklet_schedule() is called, then tasklet is guaranteed
496 to be executed on some cpu at least once after this.
497 * If the tasklet is already scheduled, but its execution is still not
498 started, it will be executed only once.
499 * If this tasklet is already running on another CPU (or schedule is called
500 from tasklet itself), it is rescheduled for later.
501 * Tasklet is strictly serialized wrt itself, but not
502 wrt another tasklets. If client needs some intertask synchronization,
503 he makes it with spinlocks.
504 */
505
506 struct tasklet_struct
507 {
508 struct tasklet_struct *next;
509 unsigned long state;
510 atomic_t count;
511 void (*func)(unsigned long);
512 unsigned long data;
513 };
514
515 #define DECLARE_TASKLET(name, func, data) \
516 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
517
518 #define DECLARE_TASKLET_DISABLED(name, func, data) \
519 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
520
521
522 enum
523 {
524 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
525 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
526 };
527
528 #ifdef CONFIG_SMP
529 static inline int tasklet_trylock(struct tasklet_struct *t)
530 {
531 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
532 }
533
534 static inline void tasklet_unlock(struct tasklet_struct *t)
535 {
536 smp_mb__before_atomic();
537 clear_bit(TASKLET_STATE_RUN, &(t)->state);
538 }
539
540 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
541 {
542 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
543 }
544 #else
545 #define tasklet_trylock(t) 1
546 #define tasklet_unlock_wait(t) do { } while (0)
547 #define tasklet_unlock(t) do { } while (0)
548 #endif
549
550 extern void __tasklet_schedule(struct tasklet_struct *t);
551
552 static inline void tasklet_schedule(struct tasklet_struct *t)
553 {
554 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
555 __tasklet_schedule(t);
556 }
557
558 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
559
560 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
561 {
562 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
563 __tasklet_hi_schedule(t);
564 }
565
566 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
567
568 /*
569 * This version avoids touching any other tasklets. Needed for kmemcheck
570 * in order not to take any page faults while enqueueing this tasklet;
571 * consider VERY carefully whether you really need this or
572 * tasklet_hi_schedule()...
573 */
574 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
575 {
576 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
577 __tasklet_hi_schedule_first(t);
578 }
579
580
581 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
582 {
583 atomic_inc(&t->count);
584 smp_mb__after_atomic();
585 }
586
587 static inline void tasklet_disable(struct tasklet_struct *t)
588 {
589 tasklet_disable_nosync(t);
590 tasklet_unlock_wait(t);
591 smp_mb();
592 }
593
594 static inline void tasklet_enable(struct tasklet_struct *t)
595 {
596 smp_mb__before_atomic();
597 atomic_dec(&t->count);
598 }
599
600 extern void tasklet_kill(struct tasklet_struct *t);
601 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
602 extern void tasklet_init(struct tasklet_struct *t,
603 void (*func)(unsigned long), unsigned long data);
604
605 struct tasklet_hrtimer {
606 struct hrtimer timer;
607 struct tasklet_struct tasklet;
608 enum hrtimer_restart (*function)(struct hrtimer *);
609 };
610
611 extern void
612 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
613 enum hrtimer_restart (*function)(struct hrtimer *),
614 clockid_t which_clock, enum hrtimer_mode mode);
615
616 static inline
617 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
618 const enum hrtimer_mode mode)
619 {
620 hrtimer_start(&ttimer->timer, time, mode);
621 }
622
623 static inline
624 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
625 {
626 hrtimer_cancel(&ttimer->timer);
627 tasklet_kill(&ttimer->tasklet);
628 }
629
630 /*
631 * Autoprobing for irqs:
632 *
633 * probe_irq_on() and probe_irq_off() provide robust primitives
634 * for accurate IRQ probing during kernel initialization. They are
635 * reasonably simple to use, are not "fooled" by spurious interrupts,
636 * and, unlike other attempts at IRQ probing, they do not get hung on
637 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
638 *
639 * For reasonably foolproof probing, use them as follows:
640 *
641 * 1. clear and/or mask the device's internal interrupt.
642 * 2. sti();
643 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
644 * 4. enable the device and cause it to trigger an interrupt.
645 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
646 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
647 * 7. service the device to clear its pending interrupt.
648 * 8. loop again if paranoia is required.
649 *
650 * probe_irq_on() returns a mask of allocated irq's.
651 *
652 * probe_irq_off() takes the mask as a parameter,
653 * and returns the irq number which occurred,
654 * or zero if none occurred, or a negative irq number
655 * if more than one irq occurred.
656 */
657
658 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
659 static inline unsigned long probe_irq_on(void)
660 {
661 return 0;
662 }
663 static inline int probe_irq_off(unsigned long val)
664 {
665 return 0;
666 }
667 static inline unsigned int probe_irq_mask(unsigned long val)
668 {
669 return 0;
670 }
671 #else
672 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
673 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
674 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
675 #endif
676
677 #ifdef CONFIG_PROC_FS
678 /* Initialize /proc/irq/ */
679 extern void init_irq_proc(void);
680 #else
681 static inline void init_irq_proc(void)
682 {
683 }
684 #endif
685
686 struct seq_file;
687 int show_interrupts(struct seq_file *p, void *v);
688 int arch_show_interrupts(struct seq_file *p, int prec);
689
690 extern int early_irq_init(void);
691 extern int arch_probe_nr_irqs(void);
692 extern int arch_early_irq_init(void);
693
694 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
695 /*
696 * We want to know which function is an entrypoint of a hardirq or a softirq.
697 */
698 #define __irq_entry __attribute__((__section__(".irqentry.text")))
699 #define __softirq_entry \
700 __attribute__((__section__(".softirqentry.text")))
701
702 /* Limits of hardirq entrypoints */
703 extern char __irqentry_text_start[];
704 extern char __irqentry_text_end[];
705 /* Limits of softirq entrypoints */
706 extern char __softirqentry_text_start[];
707 extern char __softirqentry_text_end[];
708
709 #else
710 #define __irq_entry
711 #define __softirq_entry
712 #endif
713
714 #endif 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27
28 #include <linux/timer.h>
29 #include <linux/bug.h>
30 #include <linux/delay.h>
31 #include <linux/atomic.h>
32 #include <linux/prefetch.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
35
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
41
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
44 #include <net/dsa.h>
45 #ifdef CONFIG_DCB
46 #include <net/dcbnl.h>
47 #endif
48 #include <net/netprio_cgroup.h>
49
50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h>
53 #include <uapi/linux/if_bonding.h>
54 #include <uapi/linux/pkt_cls.h>
55
56 struct netpoll_info;
57 struct device;
58 struct phy_device;
59 /* 802.11 specific */
60 struct wireless_dev;
61 /* 802.15.4 specific */
62 struct wpan_dev;
63 struct mpls_dev;
64 /* UDP Tunnel offloads */
65 struct udp_tunnel_info;
66 struct bpf_prog;
67
68 void netdev_set_default_ethtool_ops(struct net_device *dev,
69 const struct ethtool_ops *ops);
70
71 /* Backlog congestion levels */
72 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
73 #define NET_RX_DROP 1 /* packet dropped */
74
75 /*
76 * Transmit return codes: transmit return codes originate from three different
77 * namespaces:
78 *
79 * - qdisc return codes
80 * - driver transmit return codes
81 * - errno values
82 *
83 * Drivers are allowed to return any one of those in their hard_start_xmit()
84 * function. Real network devices commonly used with qdiscs should only return
85 * the driver transmit return codes though - when qdiscs are used, the actual
86 * transmission happens asynchronously, so the value is not propagated to
87 * higher layers. Virtual network devices transmit synchronously; in this case
88 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
89 * others are propagated to higher layers.
90 */
91
92 /* qdisc ->enqueue() return codes. */
93 #define NET_XMIT_SUCCESS 0x00
94 #define NET_XMIT_DROP 0x01 /* skb dropped */
95 #define NET_XMIT_CN 0x02 /* congestion notification */
96 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
97
98 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
99 * indicates that the device will soon be dropping packets, or already drops
100 * some packets of the same priority; prompting us to send less aggressively. */
101 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
102 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
103
104 /* Driver transmit return codes */
105 #define NETDEV_TX_MASK 0xf0
106
107 enum netdev_tx {
108 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
109 NETDEV_TX_OK = 0x00, /* driver took care of packet */
110 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
111 };
112 typedef enum netdev_tx netdev_tx_t;
113
114 /*
115 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
116 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
117 */
118 static inline bool dev_xmit_complete(int rc)
119 {
120 /*
121 * Positive cases with an skb consumed by a driver:
122 * - successful transmission (rc == NETDEV_TX_OK)
123 * - error while transmitting (rc < 0)
124 * - error while queueing to a different device (rc & NET_XMIT_MASK)
125 */
126 if (likely(rc < NET_XMIT_MASK))
127 return true;
128
129 return false;
130 }
131
132 /*
133 * Compute the worst-case header length according to the protocols
134 * used.
135 */
136
137 #if defined(CONFIG_HYPERV_NET)
138 # define LL_MAX_HEADER 128
139 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
140 # if defined(CONFIG_MAC80211_MESH)
141 # define LL_MAX_HEADER 128
142 # else
143 # define LL_MAX_HEADER 96
144 # endif
145 #else
146 # define LL_MAX_HEADER 32
147 #endif
148
149 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
150 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
151 #define MAX_HEADER LL_MAX_HEADER
152 #else
153 #define MAX_HEADER (LL_MAX_HEADER + 48)
154 #endif
155
156 /*
157 * Old network device statistics. Fields are native words
158 * (unsigned long) so they can be read and written atomically.
159 */
160
161 struct net_device_stats {
162 unsigned long rx_packets;
163 unsigned long tx_packets;
164 unsigned long rx_bytes;
165 unsigned long tx_bytes;
166 unsigned long rx_errors;
167 unsigned long tx_errors;
168 unsigned long rx_dropped;
169 unsigned long tx_dropped;
170 unsigned long multicast;
171 unsigned long collisions;
172 unsigned long rx_length_errors;
173 unsigned long rx_over_errors;
174 unsigned long rx_crc_errors;
175 unsigned long rx_frame_errors;
176 unsigned long rx_fifo_errors;
177 unsigned long rx_missed_errors;
178 unsigned long tx_aborted_errors;
179 unsigned long tx_carrier_errors;
180 unsigned long tx_fifo_errors;
181 unsigned long tx_heartbeat_errors;
182 unsigned long tx_window_errors;
183 unsigned long rx_compressed;
184 unsigned long tx_compressed;
185 };
186
187
188 #include <linux/cache.h>
189 #include <linux/skbuff.h>
190
191 #ifdef CONFIG_RPS
192 #include <linux/static_key.h>
193 extern struct static_key rps_needed;
194 #endif
195
196 struct neighbour;
197 struct neigh_parms;
198 struct sk_buff;
199
200 struct netdev_hw_addr {
201 struct list_head list;
202 unsigned char addr[MAX_ADDR_LEN];
203 unsigned char type;
204 #define NETDEV_HW_ADDR_T_LAN 1
205 #define NETDEV_HW_ADDR_T_SAN 2
206 #define NETDEV_HW_ADDR_T_SLAVE 3
207 #define NETDEV_HW_ADDR_T_UNICAST 4
208 #define NETDEV_HW_ADDR_T_MULTICAST 5
209 bool global_use;
210 int sync_cnt;
211 int refcount;
212 int synced;
213 struct rcu_head rcu_head;
214 };
215
216 struct netdev_hw_addr_list {
217 struct list_head list;
218 int count;
219 };
220
221 #define netdev_hw_addr_list_count(l) ((l)->count)
222 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
223 #define netdev_hw_addr_list_for_each(ha, l) \
224 list_for_each_entry(ha, &(l)->list, list)
225
226 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
227 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
228 #define netdev_for_each_uc_addr(ha, dev) \
229 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
230
231 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
232 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
233 #define netdev_for_each_mc_addr(ha, dev) \
234 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
235
236 struct hh_cache {
237 u16 hh_len;
238 u16 __pad;
239 seqlock_t hh_lock;
240
241 /* cached hardware header; allow for machine alignment needs. */
242 #define HH_DATA_MOD 16
243 #define HH_DATA_OFF(__len) \
244 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
245 #define HH_DATA_ALIGN(__len) \
246 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
247 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
248 };
249
250 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
251 * Alternative is:
252 * dev->hard_header_len ? (dev->hard_header_len +
253 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
254 *
255 * We could use other alignment values, but we must maintain the
256 * relationship HH alignment <= LL alignment.
257 */
258 #define LL_RESERVED_SPACE(dev) \
259 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
260 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262
263 struct header_ops {
264 int (*create) (struct sk_buff *skb, struct net_device *dev,
265 unsigned short type, const void *daddr,
266 const void *saddr, unsigned int len);
267 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
268 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
269 void (*cache_update)(struct hh_cache *hh,
270 const struct net_device *dev,
271 const unsigned char *haddr);
272 bool (*validate)(const char *ll_header, unsigned int len);
273 };
274
275 /* These flag bits are private to the generic network queueing
276 * layer; they may not be explicitly referenced by any other
277 * code.
278 */
279
280 enum netdev_state_t {
281 __LINK_STATE_START,
282 __LINK_STATE_PRESENT,
283 __LINK_STATE_NOCARRIER,
284 __LINK_STATE_LINKWATCH_PENDING,
285 __LINK_STATE_DORMANT,
286 };
287
288
289 /*
290 * This structure holds boot-time configured netdevice settings. They
291 * are then used in the device probing.
292 */
293 struct netdev_boot_setup {
294 char name[IFNAMSIZ];
295 struct ifmap map;
296 };
297 #define NETDEV_BOOT_SETUP_MAX 8
298
299 int __init netdev_boot_setup(char *str);
300
301 /*
302 * Structure for NAPI scheduling similar to tasklet but with weighting
303 */
304 struct napi_struct {
305 /* The poll_list must only be managed by the entity which
306 * changes the state of the NAPI_STATE_SCHED bit. This means
307 * whoever atomically sets that bit can add this napi_struct
308 * to the per-CPU poll_list, and whoever clears that bit
309 * can remove from the list right before clearing the bit.
310 */
311 struct list_head poll_list;
312
313 unsigned long state;
314 int weight;
315 unsigned int gro_count;
316 int (*poll)(struct napi_struct *, int);
317 #ifdef CONFIG_NETPOLL
318 spinlock_t poll_lock;
319 int poll_owner;
320 #endif
321 struct net_device *dev;
322 struct sk_buff *gro_list;
323 struct sk_buff *skb;
324 struct hrtimer timer;
325 struct list_head dev_list;
326 struct hlist_node napi_hash_node;
327 unsigned int napi_id;
328 };
329
330 enum {
331 NAPI_STATE_SCHED, /* Poll is scheduled */
332 NAPI_STATE_DISABLE, /* Disable pending */
333 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
334 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
335 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
336 };
337
338 enum gro_result {
339 GRO_MERGED,
340 GRO_MERGED_FREE,
341 GRO_HELD,
342 GRO_NORMAL,
343 GRO_DROP,
344 };
345 typedef enum gro_result gro_result_t;
346
347 /*
348 * enum rx_handler_result - Possible return values for rx_handlers.
349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
350 * further.
351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
352 * case skb->dev was changed by rx_handler.
353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
354 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
355 *
356 * rx_handlers are functions called from inside __netif_receive_skb(), to do
357 * special processing of the skb, prior to delivery to protocol handlers.
358 *
359 * Currently, a net_device can only have a single rx_handler registered. Trying
360 * to register a second rx_handler will return -EBUSY.
361 *
362 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
363 * To unregister a rx_handler on a net_device, use
364 * netdev_rx_handler_unregister().
365 *
366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
367 * do with the skb.
368 *
369 * If the rx_handler consumed the skb in some way, it should return
370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
371 * the skb to be delivered in some other way.
372 *
373 * If the rx_handler changed skb->dev, to divert the skb to another
374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
375 * new device will be called if it exists.
376 *
377 * If the rx_handler decides the skb should be ignored, it should return
378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
379 * are registered on exact device (ptype->dev == skb->dev).
380 *
381 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
382 * delivered, it should return RX_HANDLER_PASS.
383 *
384 * A device without a registered rx_handler will behave as if rx_handler
385 * returned RX_HANDLER_PASS.
386 */
387
388 enum rx_handler_result {
389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS,
393 };
394 typedef enum rx_handler_result rx_handler_result_t;
395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
396
397 void __napi_schedule(struct napi_struct *n);
398 void __napi_schedule_irqoff(struct napi_struct *n);
399
400 static inline bool napi_disable_pending(struct napi_struct *n)
401 {
402 return test_bit(NAPI_STATE_DISABLE, &n->state);
403 }
404
405 /**
406 * napi_schedule_prep - check if NAPI can be scheduled
407 * @n: NAPI context
408 *
409 * Test if NAPI routine is already running, and if not mark
410 * it as running. This is used as a condition variable to
411 * insure only one NAPI poll instance runs. We also make
412 * sure there is no pending NAPI disable.
413 */
414 static inline bool napi_schedule_prep(struct napi_struct *n)
415 {
416 return !napi_disable_pending(n) &&
417 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
418 }
419
420 /**
421 * napi_schedule - schedule NAPI poll
422 * @n: NAPI context
423 *
424 * Schedule NAPI poll routine to be called if it is not already
425 * running.
426 */
427 static inline void napi_schedule(struct napi_struct *n)
428 {
429 if (napi_schedule_prep(n))
430 __napi_schedule(n);
431 }
432
433 /**
434 * napi_schedule_irqoff - schedule NAPI poll
435 * @n: NAPI context
436 *
437 * Variant of napi_schedule(), assuming hard irqs are masked.
438 */
439 static inline void napi_schedule_irqoff(struct napi_struct *n)
440 {
441 if (napi_schedule_prep(n))
442 __napi_schedule_irqoff(n);
443 }
444
445 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
446 static inline bool napi_reschedule(struct napi_struct *napi)
447 {
448 if (napi_schedule_prep(napi)) {
449 __napi_schedule(napi);
450 return true;
451 }
452 return false;
453 }
454
455 void __napi_complete(struct napi_struct *n);
456 void napi_complete_done(struct napi_struct *n, int work_done);
457 /**
458 * napi_complete - NAPI processing complete
459 * @n: NAPI context
460 *
461 * Mark NAPI processing as complete.
462 * Consider using napi_complete_done() instead.
463 */
464 static inline void napi_complete(struct napi_struct *n)
465 {
466 return napi_complete_done(n, 0);
467 }
468
469 /**
470 * napi_hash_add - add a NAPI to global hashtable
471 * @napi: NAPI context
472 *
473 * Generate a new napi_id and store a @napi under it in napi_hash.
474 * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
475 * Note: This is normally automatically done from netif_napi_add(),
476 * so might disappear in a future Linux version.
477 */
478 void napi_hash_add(struct napi_struct *napi);
479
480 /**
481 * napi_hash_del - remove a NAPI from global table
482 * @napi: NAPI context
483 *
484 * Warning: caller must observe RCU grace period
485 * before freeing memory containing @napi, if
486 * this function returns true.
487 * Note: core networking stack automatically calls it
488 * from netif_napi_del().
489 * Drivers might want to call this helper to combine all
490 * the needed RCU grace periods into a single one.
491 */
492 bool napi_hash_del(struct napi_struct *napi);
493
494 /**
495 * napi_disable - prevent NAPI from scheduling
496 * @n: NAPI context
497 *
498 * Stop NAPI from being scheduled on this context.
499 * Waits till any outstanding processing completes.
500 */
501 void napi_disable(struct napi_struct *n);
502
503 /**
504 * napi_enable - enable NAPI scheduling
505 * @n: NAPI context
506 *
507 * Resume NAPI from being scheduled on this context.
508 * Must be paired with napi_disable.
509 */
510 static inline void napi_enable(struct napi_struct *n)
511 {
512 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
513 smp_mb__before_atomic();
514 clear_bit(NAPI_STATE_SCHED, &n->state);
515 clear_bit(NAPI_STATE_NPSVC, &n->state);
516 }
517
518 /**
519 * napi_synchronize - wait until NAPI is not running
520 * @n: NAPI context
521 *
522 * Wait until NAPI is done being scheduled on this context.
523 * Waits till any outstanding processing completes but
524 * does not disable future activations.
525 */
526 static inline void napi_synchronize(const struct napi_struct *n)
527 {
528 if (IS_ENABLED(CONFIG_SMP))
529 while (test_bit(NAPI_STATE_SCHED, &n->state))
530 msleep(1);
531 else
532 barrier();
533 }
534
535 enum netdev_queue_state_t {
536 __QUEUE_STATE_DRV_XOFF,
537 __QUEUE_STATE_STACK_XOFF,
538 __QUEUE_STATE_FROZEN,
539 };
540
541 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
542 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
543 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
544
545 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
546 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
547 QUEUE_STATE_FROZEN)
548 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
549 QUEUE_STATE_FROZEN)
550
551 /*
552 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
553 * netif_tx_* functions below are used to manipulate this flag. The
554 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
555 * queue independently. The netif_xmit_*stopped functions below are called
556 * to check if the queue has been stopped by the driver or stack (either
557 * of the XOFF bits are set in the state). Drivers should not need to call
558 * netif_xmit*stopped functions, they should only be using netif_tx_*.
559 */
560
561 struct netdev_queue {
562 /*
563 * read-mostly part
564 */
565 struct net_device *dev;
566 struct Qdisc __rcu *qdisc;
567 struct Qdisc *qdisc_sleeping;
568 #ifdef CONFIG_SYSFS
569 struct kobject kobj;
570 #endif
571 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
572 int numa_node;
573 #endif
574 unsigned long tx_maxrate;
575 /*
576 * Number of TX timeouts for this queue
577 * (/sys/class/net/DEV/Q/trans_timeout)
578 */
579 unsigned long trans_timeout;
580 /*
581 * write-mostly part
582 */
583 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
584 int xmit_lock_owner;
585 /*
586 * Time (in jiffies) of last Tx
587 */
588 unsigned long trans_start;
589
590 unsigned long state;
591
592 #ifdef CONFIG_BQL
593 struct dql dql;
594 #endif
595 } ____cacheline_aligned_in_smp;
596
597 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
598 {
599 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
600 return q->numa_node;
601 #else
602 return NUMA_NO_NODE;
603 #endif
604 }
605
606 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
607 {
608 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
609 q->numa_node = node;
610 #endif
611 }
612
613 #ifdef CONFIG_RPS
614 /*
615 * This structure holds an RPS map which can be of variable length. The
616 * map is an array of CPUs.
617 */
618 struct rps_map {
619 unsigned int len;
620 struct rcu_head rcu;
621 u16 cpus[0];
622 };
623 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
624
625 /*
626 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
627 * tail pointer for that CPU's input queue at the time of last enqueue, and
628 * a hardware filter index.
629 */
630 struct rps_dev_flow {
631 u16 cpu;
632 u16 filter;
633 unsigned int last_qtail;
634 };
635 #define RPS_NO_FILTER 0xffff
636
637 /*
638 * The rps_dev_flow_table structure contains a table of flow mappings.
639 */
640 struct rps_dev_flow_table {
641 unsigned int mask;
642 struct rcu_head rcu;
643 struct rps_dev_flow flows[0];
644 };
645 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
646 ((_num) * sizeof(struct rps_dev_flow)))
647
648 /*
649 * The rps_sock_flow_table contains mappings of flows to the last CPU
650 * on which they were processed by the application (set in recvmsg).
651 * Each entry is a 32bit value. Upper part is the high-order bits
652 * of flow hash, lower part is CPU number.
653 * rps_cpu_mask is used to partition the space, depending on number of
654 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
655 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
656 * meaning we use 32-6=26 bits for the hash.
657 */
658 struct rps_sock_flow_table {
659 u32 mask;
660
661 u32 ents[0] ____cacheline_aligned_in_smp;
662 };
663 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
664
665 #define RPS_NO_CPU 0xffff
666
667 extern u32 rps_cpu_mask;
668 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
669
670 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
671 u32 hash)
672 {
673 if (table && hash) {
674 unsigned int index = hash & table->mask;
675 u32 val = hash & ~rps_cpu_mask;
676
677 /* We only give a hint, preemption can change CPU under us */
678 val |= raw_smp_processor_id();
679
680 if (table->ents[index] != val)
681 table->ents[index] = val;
682 }
683 }
684
685 #ifdef CONFIG_RFS_ACCEL
686 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
687 u16 filter_id);
688 #endif
689 #endif /* CONFIG_RPS */
690
691 /* This structure contains an instance of an RX queue. */
692 struct netdev_rx_queue {
693 #ifdef CONFIG_RPS
694 struct rps_map __rcu *rps_map;
695 struct rps_dev_flow_table __rcu *rps_flow_table;
696 #endif
697 struct kobject kobj;
698 struct net_device *dev;
699 } ____cacheline_aligned_in_smp;
700
701 /*
702 * RX queue sysfs structures and functions.
703 */
704 struct rx_queue_attribute {
705 struct attribute attr;
706 ssize_t (*show)(struct netdev_rx_queue *queue,
707 struct rx_queue_attribute *attr, char *buf);
708 ssize_t (*store)(struct netdev_rx_queue *queue,
709 struct rx_queue_attribute *attr, const char *buf, size_t len);
710 };
711
712 #ifdef CONFIG_XPS
713 /*
714 * This structure holds an XPS map which can be of variable length. The
715 * map is an array of queues.
716 */
717 struct xps_map {
718 unsigned int len;
719 unsigned int alloc_len;
720 struct rcu_head rcu;
721 u16 queues[0];
722 };
723 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
724 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
725 - sizeof(struct xps_map)) / sizeof(u16))
726
727 /*
728 * This structure holds all XPS maps for device. Maps are indexed by CPU.
729 */
730 struct xps_dev_maps {
731 struct rcu_head rcu;
732 struct xps_map __rcu *cpu_map[0];
733 };
734 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
735 (nr_cpu_ids * sizeof(struct xps_map *)))
736 #endif /* CONFIG_XPS */
737
738 #define TC_MAX_QUEUE 16
739 #define TC_BITMASK 15
740 /* HW offloaded queuing disciplines txq count and offset maps */
741 struct netdev_tc_txq {
742 u16 count;
743 u16 offset;
744 };
745
746 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
747 /*
748 * This structure is to hold information about the device
749 * configured to run FCoE protocol stack.
750 */
751 struct netdev_fcoe_hbainfo {
752 char manufacturer[64];
753 char serial_number[64];
754 char hardware_version[64];
755 char driver_version[64];
756 char optionrom_version[64];
757 char firmware_version[64];
758 char model[256];
759 char model_description[256];
760 };
761 #endif
762
763 #define MAX_PHYS_ITEM_ID_LEN 32
764
765 /* This structure holds a unique identifier to identify some
766 * physical item (port for example) used by a netdevice.
767 */
768 struct netdev_phys_item_id {
769 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
770 unsigned char id_len;
771 };
772
773 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
774 struct netdev_phys_item_id *b)
775 {
776 return a->id_len == b->id_len &&
777 memcmp(a->id, b->id, a->id_len) == 0;
778 }
779
780 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
781 struct sk_buff *skb);
782
783 /* These structures hold the attributes of qdisc and classifiers
784 * that are being passed to the netdevice through the setup_tc op.
785 */
786 enum {
787 TC_SETUP_MQPRIO,
788 TC_SETUP_CLSU32,
789 TC_SETUP_CLSFLOWER,
790 TC_SETUP_MATCHALL,
791 };
792
793 struct tc_cls_u32_offload;
794
795 struct tc_to_netdev {
796 unsigned int type;
797 union {
798 u8 tc;
799 struct tc_cls_u32_offload *cls_u32;
800 struct tc_cls_flower_offload *cls_flower;
801 struct tc_cls_matchall_offload *cls_mall;
802 };
803 };
804
805 /* These structures hold the attributes of xdp state that are being passed
806 * to the netdevice through the xdp op.
807 */
808 enum xdp_netdev_command {
809 /* Set or clear a bpf program used in the earliest stages of packet
810 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
811 * is responsible for calling bpf_prog_put on any old progs that are
812 * stored. In case of error, the callee need not release the new prog
813 * reference, but on success it takes ownership and must bpf_prog_put
814 * when it is no longer used.
815 */
816 XDP_SETUP_PROG,
817 /* Check if a bpf program is set on the device. The callee should
818 * return true if a program is currently attached and running.
819 */
820 XDP_QUERY_PROG,
821 };
822
823 struct netdev_xdp {
824 enum xdp_netdev_command command;
825 union {
826 /* XDP_SETUP_PROG */
827 struct bpf_prog *prog;
828 /* XDP_QUERY_PROG */
829 bool prog_attached;
830 };
831 };
832
833 /*
834 * This structure defines the management hooks for network devices.
835 * The following hooks can be defined; unless noted otherwise, they are
836 * optional and can be filled with a null pointer.
837 *
838 * int (*ndo_init)(struct net_device *dev);
839 * This function is called once when a network device is registered.
840 * The network device can use this for any late stage initialization
841 * or semantic validation. It can fail with an error code which will
842 * be propagated back to register_netdev.
843 *
844 * void (*ndo_uninit)(struct net_device *dev);
845 * This function is called when device is unregistered or when registration
846 * fails. It is not called if init fails.
847 *
848 * int (*ndo_open)(struct net_device *dev);
849 * This function is called when a network device transitions to the up
850 * state.
851 *
852 * int (*ndo_stop)(struct net_device *dev);
853 * This function is called when a network device transitions to the down
854 * state.
855 *
856 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
857 * struct net_device *dev);
858 * Called when a packet needs to be transmitted.
859 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
860 * the queue before that can happen; it's for obsolete devices and weird
861 * corner cases, but the stack really does a non-trivial amount
862 * of useless work if you return NETDEV_TX_BUSY.
863 * Required; cannot be NULL.
864 *
865 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
866 * netdev_features_t features);
867 * Adjusts the requested feature flags according to device-specific
868 * constraints, and returns the resulting flags. Must not modify
869 * the device state.
870 *
871 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
872 * void *accel_priv, select_queue_fallback_t fallback);
873 * Called to decide which queue to use when device supports multiple
874 * transmit queues.
875 *
876 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
877 * This function is called to allow device receiver to make
878 * changes to configuration when multicast or promiscuous is enabled.
879 *
880 * void (*ndo_set_rx_mode)(struct net_device *dev);
881 * This function is called device changes address list filtering.
882 * If driver handles unicast address filtering, it should set
883 * IFF_UNICAST_FLT in its priv_flags.
884 *
885 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
886 * This function is called when the Media Access Control address
887 * needs to be changed. If this interface is not defined, the
888 * MAC address can not be changed.
889 *
890 * int (*ndo_validate_addr)(struct net_device *dev);
891 * Test if Media Access Control address is valid for the device.
892 *
893 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
894 * Called when a user requests an ioctl which can't be handled by
895 * the generic interface code. If not defined ioctls return
896 * not supported error code.
897 *
898 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
899 * Used to set network devices bus interface parameters. This interface
900 * is retained for legacy reasons; new devices should use the bus
901 * interface (PCI) for low level management.
902 *
903 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
904 * Called when a user wants to change the Maximum Transfer Unit
905 * of a device. If not defined, any request to change MTU will
906 * will return an error.
907 *
908 * void (*ndo_tx_timeout)(struct net_device *dev);
909 * Callback used when the transmitter has not made any progress
910 * for dev->watchdog ticks.
911 *
912 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
913 * struct rtnl_link_stats64 *storage);
914 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
915 * Called when a user wants to get the network device usage
916 * statistics. Drivers must do one of the following:
917 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
918 * rtnl_link_stats64 structure passed by the caller.
919 * 2. Define @ndo_get_stats to update a net_device_stats structure
920 * (which should normally be dev->stats) and return a pointer to
921 * it. The structure may be changed asynchronously only if each
922 * field is written atomically.
923 * 3. Update dev->stats asynchronously and atomically, and define
924 * neither operation.
925 *
926 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
927 * If device supports VLAN filtering this function is called when a
928 * VLAN id is registered.
929 *
930 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
931 * If device supports VLAN filtering this function is called when a
932 * VLAN id is unregistered.
933 *
934 * void (*ndo_poll_controller)(struct net_device *dev);
935 *
936 * SR-IOV management functions.
937 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
938 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
939 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
940 * int max_tx_rate);
941 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
942 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
943 * int (*ndo_get_vf_config)(struct net_device *dev,
944 * int vf, struct ifla_vf_info *ivf);
945 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
946 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
947 * struct nlattr *port[]);
948 *
949 * Enable or disable the VF ability to query its RSS Redirection Table and
950 * Hash Key. This is needed since on some devices VF share this information
951 * with PF and querying it may introduce a theoretical security risk.
952 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
953 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
954 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
955 * Called to setup 'tc' number of traffic classes in the net device. This
956 * is always called from the stack with the rtnl lock held and netif tx
957 * queues stopped. This allows the netdevice to perform queue management
958 * safely.
959 *
960 * Fiber Channel over Ethernet (FCoE) offload functions.
961 * int (*ndo_fcoe_enable)(struct net_device *dev);
962 * Called when the FCoE protocol stack wants to start using LLD for FCoE
963 * so the underlying device can perform whatever needed configuration or
964 * initialization to support acceleration of FCoE traffic.
965 *
966 * int (*ndo_fcoe_disable)(struct net_device *dev);
967 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
968 * so the underlying device can perform whatever needed clean-ups to
969 * stop supporting acceleration of FCoE traffic.
970 *
971 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
972 * struct scatterlist *sgl, unsigned int sgc);
973 * Called when the FCoE Initiator wants to initialize an I/O that
974 * is a possible candidate for Direct Data Placement (DDP). The LLD can
975 * perform necessary setup and returns 1 to indicate the device is set up
976 * successfully to perform DDP on this I/O, otherwise this returns 0.
977 *
978 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
979 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
980 * indicated by the FC exchange id 'xid', so the underlying device can
981 * clean up and reuse resources for later DDP requests.
982 *
983 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
984 * struct scatterlist *sgl, unsigned int sgc);
985 * Called when the FCoE Target wants to initialize an I/O that
986 * is a possible candidate for Direct Data Placement (DDP). The LLD can
987 * perform necessary setup and returns 1 to indicate the device is set up
988 * successfully to perform DDP on this I/O, otherwise this returns 0.
989 *
990 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
991 * struct netdev_fcoe_hbainfo *hbainfo);
992 * Called when the FCoE Protocol stack wants information on the underlying
993 * device. This information is utilized by the FCoE protocol stack to
994 * register attributes with Fiber Channel management service as per the
995 * FC-GS Fabric Device Management Information(FDMI) specification.
996 *
997 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
998 * Called when the underlying device wants to override default World Wide
999 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1000 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1001 * protocol stack to use.
1002 *
1003 * RFS acceleration.
1004 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1005 * u16 rxq_index, u32 flow_id);
1006 * Set hardware filter for RFS. rxq_index is the target queue index;
1007 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1008 * Return the filter ID on success, or a negative error code.
1009 *
1010 * Slave management functions (for bridge, bonding, etc).
1011 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1012 * Called to make another netdev an underling.
1013 *
1014 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1015 * Called to release previously enslaved netdev.
1016 *
1017 * Feature/offload setting functions.
1018 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1019 * Called to update device configuration to new features. Passed
1020 * feature set might be less than what was returned by ndo_fix_features()).
1021 * Must return >0 or -errno if it changed dev->features itself.
1022 *
1023 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1024 * struct net_device *dev,
1025 * const unsigned char *addr, u16 vid, u16 flags)
1026 * Adds an FDB entry to dev for addr.
1027 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1028 * struct net_device *dev,
1029 * const unsigned char *addr, u16 vid)
1030 * Deletes the FDB entry from dev coresponding to addr.
1031 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1032 * struct net_device *dev, struct net_device *filter_dev,
1033 * int idx)
1034 * Used to add FDB entries to dump requests. Implementers should add
1035 * entries to skb and update idx with the number of entries.
1036 *
1037 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1038 * u16 flags)
1039 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1040 * struct net_device *dev, u32 filter_mask,
1041 * int nlflags)
1042 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1043 * u16 flags);
1044 *
1045 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1046 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1047 * which do not represent real hardware may define this to allow their
1048 * userspace components to manage their virtual carrier state. Devices
1049 * that determine carrier state from physical hardware properties (eg
1050 * network cables) or protocol-dependent mechanisms (eg
1051 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1052 *
1053 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1054 * struct netdev_phys_item_id *ppid);
1055 * Called to get ID of physical port of this device. If driver does
1056 * not implement this, it is assumed that the hw is not able to have
1057 * multiple net devices on single physical port.
1058 *
1059 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1060 * struct udp_tunnel_info *ti);
1061 * Called by UDP tunnel to notify a driver about the UDP port and socket
1062 * address family that a UDP tunnel is listnening to. It is called only
1063 * when a new port starts listening. The operation is protected by the
1064 * RTNL.
1065 *
1066 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1067 * struct udp_tunnel_info *ti);
1068 * Called by UDP tunnel to notify the driver about a UDP port and socket
1069 * address family that the UDP tunnel is not listening to anymore. The
1070 * operation is protected by the RTNL.
1071 *
1072 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1073 * struct net_device *dev)
1074 * Called by upper layer devices to accelerate switching or other
1075 * station functionality into hardware. 'pdev is the lowerdev
1076 * to use for the offload and 'dev' is the net device that will
1077 * back the offload. Returns a pointer to the private structure
1078 * the upper layer will maintain.
1079 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1080 * Called by upper layer device to delete the station created
1081 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1082 * the station and priv is the structure returned by the add
1083 * operation.
1084 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
1085 * struct net_device *dev,
1086 * void *priv);
1087 * Callback to use for xmit over the accelerated station. This
1088 * is used in place of ndo_start_xmit on accelerated net
1089 * devices.
1090 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1091 * struct net_device *dev
1092 * netdev_features_t features);
1093 * Called by core transmit path to determine if device is capable of
1094 * performing offload operations on a given packet. This is to give
1095 * the device an opportunity to implement any restrictions that cannot
1096 * be otherwise expressed by feature flags. The check is called with
1097 * the set of features that the stack has calculated and it returns
1098 * those the driver believes to be appropriate.
1099 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1100 * int queue_index, u32 maxrate);
1101 * Called when a user wants to set a max-rate limitation of specific
1102 * TX queue.
1103 * int (*ndo_get_iflink)(const struct net_device *dev);
1104 * Called to get the iflink value of this device.
1105 * void (*ndo_change_proto_down)(struct net_device *dev,
1106 * bool proto_down);
1107 * This function is used to pass protocol port error state information
1108 * to the switch driver. The switch driver can react to the proto_down
1109 * by doing a phys down on the associated switch port.
1110 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1111 * This function is used to get egress tunnel information for given skb.
1112 * This is useful for retrieving outer tunnel header parameters while
1113 * sampling packet.
1114 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1115 * This function is used to specify the headroom that the skb must
1116 * consider when allocation skb during packet reception. Setting
1117 * appropriate rx headroom value allows avoiding skb head copy on
1118 * forward. Setting a negative value resets the rx headroom to the
1119 * default value.
1120 * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
1121 * This function is used to set or query state related to XDP on the
1122 * netdevice. See definition of enum xdp_netdev_command for details.
1123 *
1124 */
1125 struct net_device_ops {
1126 int (*ndo_init)(struct net_device *dev);
1127 void (*ndo_uninit)(struct net_device *dev);
1128 int (*ndo_open)(struct net_device *dev);
1129 int (*ndo_stop)(struct net_device *dev);
1130 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1131 struct net_device *dev);
1132 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1133 struct net_device *dev,
1134 netdev_features_t features);
1135 u16 (*ndo_select_queue)(struct net_device *dev,
1136 struct sk_buff *skb,
1137 void *accel_priv,
1138 select_queue_fallback_t fallback);
1139 void (*ndo_change_rx_flags)(struct net_device *dev,
1140 int flags);
1141 void (*ndo_set_rx_mode)(struct net_device *dev);
1142 int (*ndo_set_mac_address)(struct net_device *dev,
1143 void *addr);
1144 int (*ndo_validate_addr)(struct net_device *dev);
1145 int (*ndo_do_ioctl)(struct net_device *dev,
1146 struct ifreq *ifr, int cmd);
1147 int (*ndo_set_config)(struct net_device *dev,
1148 struct ifmap *map);
1149 int (*ndo_change_mtu)(struct net_device *dev,
1150 int new_mtu);
1151 int (*ndo_neigh_setup)(struct net_device *dev,
1152 struct neigh_parms *);
1153 void (*ndo_tx_timeout) (struct net_device *dev);
1154
1155 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1156 struct rtnl_link_stats64 *storage);
1157 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1158
1159 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1160 __be16 proto, u16 vid);
1161 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1162 __be16 proto, u16 vid);
1163 #ifdef CONFIG_NET_POLL_CONTROLLER
1164 void (*ndo_poll_controller)(struct net_device *dev);
1165 int (*ndo_netpoll_setup)(struct net_device *dev,
1166 struct netpoll_info *info);
1167 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1168 #endif
1169 #ifdef CONFIG_NET_RX_BUSY_POLL
1170 int (*ndo_busy_poll)(struct napi_struct *dev);
1171 #endif
1172 int (*ndo_set_vf_mac)(struct net_device *dev,
1173 int queue, u8 *mac);
1174 int (*ndo_set_vf_vlan)(struct net_device *dev,
1175 int queue, u16 vlan, u8 qos);
1176 int (*ndo_set_vf_rate)(struct net_device *dev,
1177 int vf, int min_tx_rate,
1178 int max_tx_rate);
1179 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1180 int vf, bool setting);
1181 int (*ndo_set_vf_trust)(struct net_device *dev,
1182 int vf, bool setting);
1183 int (*ndo_get_vf_config)(struct net_device *dev,
1184 int vf,
1185 struct ifla_vf_info *ivf);
1186 int (*ndo_set_vf_link_state)(struct net_device *dev,
1187 int vf, int link_state);
1188 int (*ndo_get_vf_stats)(struct net_device *dev,
1189 int vf,
1190 struct ifla_vf_stats
1191 *vf_stats);
1192 int (*ndo_set_vf_port)(struct net_device *dev,
1193 int vf,
1194 struct nlattr *port[]);
1195 int (*ndo_get_vf_port)(struct net_device *dev,
1196 int vf, struct sk_buff *skb);
1197 int (*ndo_set_vf_guid)(struct net_device *dev,
1198 int vf, u64 guid,
1199 int guid_type);
1200 int (*ndo_set_vf_rss_query_en)(
1201 struct net_device *dev,
1202 int vf, bool setting);
1203 int (*ndo_setup_tc)(struct net_device *dev,
1204 u32 handle,
1205 __be16 protocol,
1206 struct tc_to_netdev *tc);
1207 #if IS_ENABLED(CONFIG_FCOE)
1208 int (*ndo_fcoe_enable)(struct net_device *dev);
1209 int (*ndo_fcoe_disable)(struct net_device *dev);
1210 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1211 u16 xid,
1212 struct scatterlist *sgl,
1213 unsigned int sgc);
1214 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1215 u16 xid);
1216 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1217 u16 xid,
1218 struct scatterlist *sgl,
1219 unsigned int sgc);
1220 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1221 struct netdev_fcoe_hbainfo *hbainfo);
1222 #endif
1223
1224 #if IS_ENABLED(CONFIG_LIBFCOE)
1225 #define NETDEV_FCOE_WWNN 0
1226 #define NETDEV_FCOE_WWPN 1
1227 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1228 u64 *wwn, int type);
1229 #endif
1230
1231 #ifdef CONFIG_RFS_ACCEL
1232 int (*ndo_rx_flow_steer)(struct net_device *dev,
1233 const struct sk_buff *skb,
1234 u16 rxq_index,
1235 u32 flow_id);
1236 #endif
1237 int (*ndo_add_slave)(struct net_device *dev,
1238 struct net_device *slave_dev);
1239 int (*ndo_del_slave)(struct net_device *dev,
1240 struct net_device *slave_dev);
1241 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1242 netdev_features_t features);
1243 int (*ndo_set_features)(struct net_device *dev,
1244 netdev_features_t features);
1245 int (*ndo_neigh_construct)(struct net_device *dev,
1246 struct neighbour *n);
1247 void (*ndo_neigh_destroy)(struct net_device *dev,
1248 struct neighbour *n);
1249
1250 int (*ndo_fdb_add)(struct ndmsg *ndm,
1251 struct nlattr *tb[],
1252 struct net_device *dev,
1253 const unsigned char *addr,
1254 u16 vid,
1255 u16 flags);
1256 int (*ndo_fdb_del)(struct ndmsg *ndm,
1257 struct nlattr *tb[],
1258 struct net_device *dev,
1259 const unsigned char *addr,
1260 u16 vid);
1261 int (*ndo_fdb_dump)(struct sk_buff *skb,
1262 struct netlink_callback *cb,
1263 struct net_device *dev,
1264 struct net_device *filter_dev,
1265 int idx);
1266
1267 int (*ndo_bridge_setlink)(struct net_device *dev,
1268 struct nlmsghdr *nlh,
1269 u16 flags);
1270 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1271 u32 pid, u32 seq,
1272 struct net_device *dev,
1273 u32 filter_mask,
1274 int nlflags);
1275 int (*ndo_bridge_dellink)(struct net_device *dev,
1276 struct nlmsghdr *nlh,
1277 u16 flags);
1278 int (*ndo_change_carrier)(struct net_device *dev,
1279 bool new_carrier);
1280 int (*ndo_get_phys_port_id)(struct net_device *dev,
1281 struct netdev_phys_item_id *ppid);
1282 int (*ndo_get_phys_port_name)(struct net_device *dev,
1283 char *name, size_t len);
1284 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1285 struct udp_tunnel_info *ti);
1286 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1287 struct udp_tunnel_info *ti);
1288 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1289 struct net_device *dev);
1290 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1291 void *priv);
1292
1293 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1294 struct net_device *dev,
1295 void *priv);
1296 int (*ndo_get_lock_subclass)(struct net_device *dev);
1297 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1298 int queue_index,
1299 u32 maxrate);
1300 int (*ndo_get_iflink)(const struct net_device *dev);
1301 int (*ndo_change_proto_down)(struct net_device *dev,
1302 bool proto_down);
1303 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1304 struct sk_buff *skb);
1305 void (*ndo_set_rx_headroom)(struct net_device *dev,
1306 int needed_headroom);
1307 int (*ndo_xdp)(struct net_device *dev,
1308 struct netdev_xdp *xdp);
1309 };
1310
1311 /**
1312 * enum net_device_priv_flags - &struct net_device priv_flags
1313 *
1314 * These are the &struct net_device, they are only set internally
1315 * by drivers and used in the kernel. These flags are invisible to
1316 * userspace; this means that the order of these flags can change
1317 * during any kernel release.
1318 *
1319 * You should have a pretty good reason to be extending these flags.
1320 *
1321 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1322 * @IFF_EBRIDGE: Ethernet bridging device
1323 * @IFF_BONDING: bonding master or slave
1324 * @IFF_ISATAP: ISATAP interface (RFC4214)
1325 * @IFF_WAN_HDLC: WAN HDLC device
1326 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1327 * release skb->dst
1328 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1329 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1330 * @IFF_MACVLAN_PORT: device used as macvlan port
1331 * @IFF_BRIDGE_PORT: device used as bridge port
1332 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1333 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1334 * @IFF_UNICAST_FLT: Supports unicast filtering
1335 * @IFF_TEAM_PORT: device used as team port
1336 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1337 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1338 * change when it's running
1339 * @IFF_MACVLAN: Macvlan device
1340 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1341 * underlying stacked devices
1342 * @IFF_IPVLAN_MASTER: IPvlan master device
1343 * @IFF_IPVLAN_SLAVE: IPvlan slave device
1344 * @IFF_L3MDEV_MASTER: device is an L3 master device
1345 * @IFF_NO_QUEUE: device can run without qdisc attached
1346 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1347 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1348 * @IFF_TEAM: device is a team device
1349 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1350 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1351 * entity (i.e. the master device for bridged veth)
1352 * @IFF_MACSEC: device is a MACsec device
1353 */
1354 enum netdev_priv_flags {
1355 IFF_802_1Q_VLAN = 1<<0,
1356 IFF_EBRIDGE = 1<<1,
1357 IFF_BONDING = 1<<2,
1358 IFF_ISATAP = 1<<3,
1359 IFF_WAN_HDLC = 1<<4,
1360 IFF_XMIT_DST_RELEASE = 1<<5,
1361 IFF_DONT_BRIDGE = 1<<6,
1362 IFF_DISABLE_NETPOLL = 1<<7,
1363 IFF_MACVLAN_PORT = 1<<8,
1364 IFF_BRIDGE_PORT = 1<<9,
1365 IFF_OVS_DATAPATH = 1<<10,
1366 IFF_TX_SKB_SHARING = 1<<11,
1367 IFF_UNICAST_FLT = 1<<12,
1368 IFF_TEAM_PORT = 1<<13,
1369 IFF_SUPP_NOFCS = 1<<14,
1370 IFF_LIVE_ADDR_CHANGE = 1<<15,
1371 IFF_MACVLAN = 1<<16,
1372 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1373 IFF_IPVLAN_MASTER = 1<<18,
1374 IFF_IPVLAN_SLAVE = 1<<19,
1375 IFF_L3MDEV_MASTER = 1<<20,
1376 IFF_NO_QUEUE = 1<<21,
1377 IFF_OPENVSWITCH = 1<<22,
1378 IFF_L3MDEV_SLAVE = 1<<23,
1379 IFF_TEAM = 1<<24,
1380 IFF_RXFH_CONFIGURED = 1<<25,
1381 IFF_PHONY_HEADROOM = 1<<26,
1382 IFF_MACSEC = 1<<27,
1383 };
1384
1385 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1386 #define IFF_EBRIDGE IFF_EBRIDGE
1387 #define IFF_BONDING IFF_BONDING
1388 #define IFF_ISATAP IFF_ISATAP
1389 #define IFF_WAN_HDLC IFF_WAN_HDLC
1390 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1391 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1392 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1393 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1394 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1395 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1396 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1397 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1398 #define IFF_TEAM_PORT IFF_TEAM_PORT
1399 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1400 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1401 #define IFF_MACVLAN IFF_MACVLAN
1402 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1403 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1404 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1405 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1406 #define IFF_NO_QUEUE IFF_NO_QUEUE
1407 #define IFF_OPENVSWITCH IFF_OPENVSWITCH
1408 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1409 #define IFF_TEAM IFF_TEAM
1410 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1411 #define IFF_MACSEC IFF_MACSEC
1412
1413 /**
1414 * struct net_device - The DEVICE structure.
1415 * Actually, this whole structure is a big mistake. It mixes I/O
1416 * data with strictly "high-level" data, and it has to know about
1417 * almost every data structure used in the INET module.
1418 *
1419 * @name: This is the first field of the "visible" part of this structure
1420 * (i.e. as seen by users in the "Space.c" file). It is the name
1421 * of the interface.
1422 *
1423 * @name_hlist: Device name hash chain, please keep it close to name[]
1424 * @ifalias: SNMP alias
1425 * @mem_end: Shared memory end
1426 * @mem_start: Shared memory start
1427 * @base_addr: Device I/O address
1428 * @irq: Device IRQ number
1429 *
1430 * @carrier_changes: Stats to monitor carrier on<->off transitions
1431 *
1432 * @state: Generic network queuing layer state, see netdev_state_t
1433 * @dev_list: The global list of network devices
1434 * @napi_list: List entry used for polling NAPI devices
1435 * @unreg_list: List entry when we are unregistering the
1436 * device; see the function unregister_netdev
1437 * @close_list: List entry used when we are closing the device
1438 * @ptype_all: Device-specific packet handlers for all protocols
1439 * @ptype_specific: Device-specific, protocol-specific packet handlers
1440 *
1441 * @adj_list: Directly linked devices, like slaves for bonding
1442 * @all_adj_list: All linked devices, *including* neighbours
1443 * @features: Currently active device features
1444 * @hw_features: User-changeable features
1445 *
1446 * @wanted_features: User-requested features
1447 * @vlan_features: Mask of features inheritable by VLAN devices
1448 *
1449 * @hw_enc_features: Mask of features inherited by encapsulating devices
1450 * This field indicates what encapsulation
1451 * offloads the hardware is capable of doing,
1452 * and drivers will need to set them appropriately.
1453 *
1454 * @mpls_features: Mask of features inheritable by MPLS
1455 *
1456 * @ifindex: interface index
1457 * @group: The group the device belongs to
1458 *
1459 * @stats: Statistics struct, which was left as a legacy, use
1460 * rtnl_link_stats64 instead
1461 *
1462 * @rx_dropped: Dropped packets by core network,
1463 * do not use this in drivers
1464 * @tx_dropped: Dropped packets by core network,
1465 * do not use this in drivers
1466 * @rx_nohandler: nohandler dropped packets by core network on
1467 * inactive devices, do not use this in drivers
1468 *
1469 * @wireless_handlers: List of functions to handle Wireless Extensions,
1470 * instead of ioctl,
1471 * see <net/iw_handler.h> for details.
1472 * @wireless_data: Instance data managed by the core of wireless extensions
1473 *
1474 * @netdev_ops: Includes several pointers to callbacks,
1475 * if one wants to override the ndo_*() functions
1476 * @ethtool_ops: Management operations
1477 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1478 * discovery handling. Necessary for e.g. 6LoWPAN.
1479 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1480 * of Layer 2 headers.
1481 *
1482 * @flags: Interface flags (a la BSD)
1483 * @priv_flags: Like 'flags' but invisible to userspace,
1484 * see if.h for the definitions
1485 * @gflags: Global flags ( kept as legacy )
1486 * @padded: How much padding added by alloc_netdev()
1487 * @operstate: RFC2863 operstate
1488 * @link_mode: Mapping policy to operstate
1489 * @if_port: Selectable AUI, TP, ...
1490 * @dma: DMA channel
1491 * @mtu: Interface MTU value
1492 * @type: Interface hardware type
1493 * @hard_header_len: Maximum hardware header length.
1494 *
1495 * @needed_headroom: Extra headroom the hardware may need, but not in all
1496 * cases can this be guaranteed
1497 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1498 * cases can this be guaranteed. Some cases also use
1499 * LL_MAX_HEADER instead to allocate the skb
1500 *
1501 * interface address info:
1502 *
1503 * @perm_addr: Permanent hw address
1504 * @addr_assign_type: Hw address assignment type
1505 * @addr_len: Hardware address length
1506 * @neigh_priv_len: Used in neigh_alloc()
1507 * @dev_id: Used to differentiate devices that share
1508 * the same link layer address
1509 * @dev_port: Used to differentiate devices that share
1510 * the same function
1511 * @addr_list_lock: XXX: need comments on this one
1512 * @uc_promisc: Counter that indicates promiscuous mode
1513 * has been enabled due to the need to listen to
1514 * additional unicast addresses in a device that
1515 * does not implement ndo_set_rx_mode()
1516 * @uc: unicast mac addresses
1517 * @mc: multicast mac addresses
1518 * @dev_addrs: list of device hw addresses
1519 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1520 * @promiscuity: Number of times the NIC is told to work in
1521 * promiscuous mode; if it becomes 0 the NIC will
1522 * exit promiscuous mode
1523 * @allmulti: Counter, enables or disables allmulticast mode
1524 *
1525 * @vlan_info: VLAN info
1526 * @dsa_ptr: dsa specific data
1527 * @tipc_ptr: TIPC specific data
1528 * @atalk_ptr: AppleTalk link
1529 * @ip_ptr: IPv4 specific data
1530 * @dn_ptr: DECnet specific data
1531 * @ip6_ptr: IPv6 specific data
1532 * @ax25_ptr: AX.25 specific data
1533 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1534 *
1535 * @last_rx: Time of last Rx
1536 * @dev_addr: Hw address (before bcast,
1537 * because most packets are unicast)
1538 *
1539 * @_rx: Array of RX queues
1540 * @num_rx_queues: Number of RX queues
1541 * allocated at register_netdev() time
1542 * @real_num_rx_queues: Number of RX queues currently active in device
1543 *
1544 * @rx_handler: handler for received packets
1545 * @rx_handler_data: XXX: need comments on this one
1546 * @ingress_queue: XXX: need comments on this one
1547 * @broadcast: hw bcast address
1548 *
1549 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1550 * indexed by RX queue number. Assigned by driver.
1551 * This must only be set if the ndo_rx_flow_steer
1552 * operation is defined
1553 * @index_hlist: Device index hash chain
1554 *
1555 * @_tx: Array of TX queues
1556 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1557 * @real_num_tx_queues: Number of TX queues currently active in device
1558 * @qdisc: Root qdisc from userspace point of view
1559 * @tx_queue_len: Max frames per queue allowed
1560 * @tx_global_lock: XXX: need comments on this one
1561 *
1562 * @xps_maps: XXX: need comments on this one
1563 *
1564 * @offload_fwd_mark: Offload device fwding mark
1565 *
1566 * @watchdog_timeo: Represents the timeout that is used by
1567 * the watchdog (see dev_watchdog())
1568 * @watchdog_timer: List of timers
1569 *
1570 * @pcpu_refcnt: Number of references to this device
1571 * @todo_list: Delayed register/unregister
1572 * @link_watch_list: XXX: need comments on this one
1573 *
1574 * @reg_state: Register/unregister state machine
1575 * @dismantle: Device is going to be freed
1576 * @rtnl_link_state: This enum represents the phases of creating
1577 * a new link
1578 *
1579 * @destructor: Called from unregister,
1580 * can be used to call free_netdev
1581 * @npinfo: XXX: need comments on this one
1582 * @nd_net: Network namespace this network device is inside
1583 *
1584 * @ml_priv: Mid-layer private
1585 * @lstats: Loopback statistics
1586 * @tstats: Tunnel statistics
1587 * @dstats: Dummy statistics
1588 * @vstats: Virtual ethernet statistics
1589 *
1590 * @garp_port: GARP
1591 * @mrp_port: MRP
1592 *
1593 * @dev: Class/net/name entry
1594 * @sysfs_groups: Space for optional device, statistics and wireless
1595 * sysfs groups
1596 *
1597 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1598 * @rtnl_link_ops: Rtnl_link_ops
1599 *
1600 * @gso_max_size: Maximum size of generic segmentation offload
1601 * @gso_max_segs: Maximum number of segments that can be passed to the
1602 * NIC for GSO
1603 *
1604 * @dcbnl_ops: Data Center Bridging netlink ops
1605 * @num_tc: Number of traffic classes in the net device
1606 * @tc_to_txq: XXX: need comments on this one
1607 * @prio_tc_map XXX: need comments on this one
1608 *
1609 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1610 *
1611 * @priomap: XXX: need comments on this one
1612 * @phydev: Physical device may attach itself
1613 * for hardware timestamping
1614 *
1615 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1616 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1617 *
1618 * @proto_down: protocol port state information can be sent to the
1619 * switch driver and used to set the phys state of the
1620 * switch port.
1621 *
1622 * FIXME: cleanup struct net_device such that network protocol info
1623 * moves out.
1624 */
1625
1626 struct net_device {
1627 char name[IFNAMSIZ];
1628 struct hlist_node name_hlist;
1629 char *ifalias;
1630 /*
1631 * I/O specific fields
1632 * FIXME: Merge these and struct ifmap into one
1633 */
1634 unsigned long mem_end;
1635 unsigned long mem_start;
1636 unsigned long base_addr;
1637 int irq;
1638
1639 atomic_t carrier_changes;
1640
1641 /*
1642 * Some hardware also needs these fields (state,dev_list,
1643 * napi_list,unreg_list,close_list) but they are not
1644 * part of the usual set specified in Space.c.
1645 */
1646
1647 unsigned long state;
1648
1649 struct list_head dev_list;
1650 struct list_head napi_list;
1651 struct list_head unreg_list;
1652 struct list_head close_list;
1653 struct list_head ptype_all;
1654 struct list_head ptype_specific;
1655
1656 struct {
1657 struct list_head upper;
1658 struct list_head lower;
1659 } adj_list;
1660
1661 struct {
1662 struct list_head upper;
1663 struct list_head lower;
1664 } all_adj_list;
1665
1666 netdev_features_t features;
1667 netdev_features_t hw_features;
1668 netdev_features_t wanted_features;
1669 netdev_features_t vlan_features;
1670 netdev_features_t hw_enc_features;
1671 netdev_features_t mpls_features;
1672 netdev_features_t gso_partial_features;
1673
1674 int ifindex;
1675 int group;
1676
1677 struct net_device_stats stats;
1678
1679 atomic_long_t rx_dropped;
1680 atomic_long_t tx_dropped;
1681 atomic_long_t rx_nohandler;
1682
1683 #ifdef CONFIG_WIRELESS_EXT
1684 const struct iw_handler_def *wireless_handlers;
1685 struct iw_public_data *wireless_data;
1686 #endif
1687 const struct net_device_ops *netdev_ops;
1688 const struct ethtool_ops *ethtool_ops;
1689 #ifdef CONFIG_NET_SWITCHDEV
1690 const struct switchdev_ops *switchdev_ops;
1691 #endif
1692 #ifdef CONFIG_NET_L3_MASTER_DEV
1693 const struct l3mdev_ops *l3mdev_ops;
1694 #endif
1695 #if IS_ENABLED(CONFIG_IPV6)
1696 const struct ndisc_ops *ndisc_ops;
1697 #endif
1698
1699 const struct header_ops *header_ops;
1700
1701 unsigned int flags;
1702 unsigned int priv_flags;
1703
1704 unsigned short gflags;
1705 unsigned short padded;
1706
1707 unsigned char operstate;
1708 unsigned char link_mode;
1709
1710 unsigned char if_port;
1711 unsigned char dma;
1712
1713 unsigned int mtu;
1714 unsigned short type;
1715 unsigned short hard_header_len;
1716
1717 unsigned short needed_headroom;
1718 unsigned short needed_tailroom;
1719
1720 /* Interface address info. */
1721 unsigned char perm_addr[MAX_ADDR_LEN];
1722 unsigned char addr_assign_type;
1723 unsigned char addr_len;
1724 unsigned short neigh_priv_len;
1725 unsigned short dev_id;
1726 unsigned short dev_port;
1727 spinlock_t addr_list_lock;
1728 unsigned char name_assign_type;
1729 bool uc_promisc;
1730 struct netdev_hw_addr_list uc;
1731 struct netdev_hw_addr_list mc;
1732 struct netdev_hw_addr_list dev_addrs;
1733
1734 #ifdef CONFIG_SYSFS
1735 struct kset *queues_kset;
1736 #endif
1737 unsigned int promiscuity;
1738 unsigned int allmulti;
1739
1740
1741 /* Protocol-specific pointers */
1742
1743 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1744 struct vlan_info __rcu *vlan_info;
1745 #endif
1746 #if IS_ENABLED(CONFIG_NET_DSA)
1747 struct dsa_switch_tree *dsa_ptr;
1748 #endif
1749 #if IS_ENABLED(CONFIG_TIPC)
1750 struct tipc_bearer __rcu *tipc_ptr;
1751 #endif
1752 void *atalk_ptr;
1753 struct in_device __rcu *ip_ptr;
1754 struct dn_dev __rcu *dn_ptr;
1755 struct inet6_dev __rcu *ip6_ptr;
1756 void *ax25_ptr;
1757 struct wireless_dev *ieee80211_ptr;
1758 struct wpan_dev *ieee802154_ptr;
1759 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
1760 struct mpls_dev __rcu *mpls_ptr;
1761 #endif
1762
1763 /*
1764 * Cache lines mostly used on receive path (including eth_type_trans())
1765 */
1766 unsigned long last_rx;
1767
1768 /* Interface address info used in eth_type_trans() */
1769 unsigned char *dev_addr;
1770
1771 #ifdef CONFIG_SYSFS
1772 struct netdev_rx_queue *_rx;
1773
1774 unsigned int num_rx_queues;
1775 unsigned int real_num_rx_queues;
1776 #endif
1777
1778 unsigned long gro_flush_timeout;
1779 rx_handler_func_t __rcu *rx_handler;
1780 void __rcu *rx_handler_data;
1781
1782 #ifdef CONFIG_NET_CLS_ACT
1783 struct tcf_proto __rcu *ingress_cl_list;
1784 #endif
1785 struct netdev_queue __rcu *ingress_queue;
1786 #ifdef CONFIG_NETFILTER_INGRESS
1787 struct list_head nf_hooks_ingress;
1788 #endif
1789
1790 unsigned char broadcast[MAX_ADDR_LEN];
1791 #ifdef CONFIG_RFS_ACCEL
1792 struct cpu_rmap *rx_cpu_rmap;
1793 #endif
1794 struct hlist_node index_hlist;
1795
1796 /*
1797 * Cache lines mostly used on transmit path
1798 */
1799 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1800 unsigned int num_tx_queues;
1801 unsigned int real_num_tx_queues;
1802 struct Qdisc *qdisc;
1803 unsigned long tx_queue_len;
1804 spinlock_t tx_global_lock;
1805 int watchdog_timeo;
1806
1807 #ifdef CONFIG_XPS
1808 struct xps_dev_maps __rcu *xps_maps;
1809 #endif
1810 #ifdef CONFIG_NET_CLS_ACT
1811 struct tcf_proto __rcu *egress_cl_list;
1812 #endif
1813 #ifdef CONFIG_NET_SWITCHDEV
1814 u32 offload_fwd_mark;
1815 #endif
1816
1817 /* These may be needed for future network-power-down code. */
1818 struct timer_list watchdog_timer;
1819
1820 int __percpu *pcpu_refcnt;
1821 struct list_head todo_list;
1822
1823 struct list_head link_watch_list;
1824
1825 enum { NETREG_UNINITIALIZED=0,
1826 NETREG_REGISTERED, /* completed register_netdevice */
1827 NETREG_UNREGISTERING, /* called unregister_netdevice */
1828 NETREG_UNREGISTERED, /* completed unregister todo */
1829 NETREG_RELEASED, /* called free_netdev */
1830 NETREG_DUMMY, /* dummy device for NAPI poll */
1831 } reg_state:8;
1832
1833 bool dismantle;
1834
1835 enum {
1836 RTNL_LINK_INITIALIZED,
1837 RTNL_LINK_INITIALIZING,
1838 } rtnl_link_state:16;
1839
1840 void (*destructor)(struct net_device *dev);
1841
1842 #ifdef CONFIG_NETPOLL
1843 struct netpoll_info __rcu *npinfo;
1844 #endif
1845
1846 possible_net_t nd_net;
1847
1848 /* mid-layer private */
1849 union {
1850 void *ml_priv;
1851 struct pcpu_lstats __percpu *lstats;
1852 struct pcpu_sw_netstats __percpu *tstats;
1853 struct pcpu_dstats __percpu *dstats;
1854 struct pcpu_vstats __percpu *vstats;
1855 };
1856
1857 struct garp_port __rcu *garp_port;
1858 struct mrp_port __rcu *mrp_port;
1859
1860 struct device dev;
1861 const struct attribute_group *sysfs_groups[4];
1862 const struct attribute_group *sysfs_rx_queue_group;
1863
1864 const struct rtnl_link_ops *rtnl_link_ops;
1865
1866 /* for setting kernel sock attribute on TCP connection setup */
1867 #define GSO_MAX_SIZE 65536
1868 unsigned int gso_max_size;
1869 #define GSO_MAX_SEGS 65535
1870 u16 gso_max_segs;
1871
1872 #ifdef CONFIG_DCB
1873 const struct dcbnl_rtnl_ops *dcbnl_ops;
1874 #endif
1875 u8 num_tc;
1876 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1877 u8 prio_tc_map[TC_BITMASK + 1];
1878
1879 #if IS_ENABLED(CONFIG_FCOE)
1880 unsigned int fcoe_ddp_xid;
1881 #endif
1882 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1883 struct netprio_map __rcu *priomap;
1884 #endif
1885 struct phy_device *phydev;
1886 struct lock_class_key *qdisc_tx_busylock;
1887 struct lock_class_key *qdisc_running_key;
1888 bool proto_down;
1889 };
1890 #define to_net_dev(d) container_of(d, struct net_device, dev)
1891
1892 #define NETDEV_ALIGN 32
1893
1894 static inline
1895 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1896 {
1897 return dev->prio_tc_map[prio & TC_BITMASK];
1898 }
1899
1900 static inline
1901 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1902 {
1903 if (tc >= dev->num_tc)
1904 return -EINVAL;
1905
1906 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1907 return 0;
1908 }
1909
1910 static inline
1911 void netdev_reset_tc(struct net_device *dev)
1912 {
1913 dev->num_tc = 0;
1914 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1915 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1916 }
1917
1918 static inline
1919 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1920 {
1921 if (tc >= dev->num_tc)
1922 return -EINVAL;
1923
1924 dev->tc_to_txq[tc].count = count;
1925 dev->tc_to_txq[tc].offset = offset;
1926 return 0;
1927 }
1928
1929 static inline
1930 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1931 {
1932 if (num_tc > TC_MAX_QUEUE)
1933 return -EINVAL;
1934
1935 dev->num_tc = num_tc;
1936 return 0;
1937 }
1938
1939 static inline
1940 int netdev_get_num_tc(struct net_device *dev)
1941 {
1942 return dev->num_tc;
1943 }
1944
1945 static inline
1946 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1947 unsigned int index)
1948 {
1949 return &dev->_tx[index];
1950 }
1951
1952 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1953 const struct sk_buff *skb)
1954 {
1955 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1956 }
1957
1958 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1959 void (*f)(struct net_device *,
1960 struct netdev_queue *,
1961 void *),
1962 void *arg)
1963 {
1964 unsigned int i;
1965
1966 for (i = 0; i < dev->num_tx_queues; i++)
1967 f(dev, &dev->_tx[i], arg);
1968 }
1969
1970 #define netdev_lockdep_set_classes(dev) \
1971 { \
1972 static struct lock_class_key qdisc_tx_busylock_key; \
1973 static struct lock_class_key qdisc_running_key; \
1974 static struct lock_class_key qdisc_xmit_lock_key; \
1975 static struct lock_class_key dev_addr_list_lock_key; \
1976 unsigned int i; \
1977 \
1978 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1979 (dev)->qdisc_running_key = &qdisc_running_key; \
1980 lockdep_set_class(&(dev)->addr_list_lock, \
1981 &dev_addr_list_lock_key); \
1982 for (i = 0; i < (dev)->num_tx_queues; i++) \
1983 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1984 &qdisc_xmit_lock_key); \
1985 }
1986
1987 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1988 struct sk_buff *skb,
1989 void *accel_priv);
1990
1991 /* returns the headroom that the master device needs to take in account
1992 * when forwarding to this dev
1993 */
1994 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
1995 {
1996 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
1997 }
1998
1999 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2000 {
2001 if (dev->netdev_ops->ndo_set_rx_headroom)
2002 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2003 }
2004
2005 /* set the device rx headroom to the dev's default */
2006 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2007 {
2008 netdev_set_rx_headroom(dev, -1);
2009 }
2010
2011 /*
2012 * Net namespace inlines
2013 */
2014 static inline
2015 struct net *dev_net(const struct net_device *dev)
2016 {
2017 return read_pnet(&dev->nd_net);
2018 }
2019
2020 static inline
2021 void dev_net_set(struct net_device *dev, struct net *net)
2022 {
2023 write_pnet(&dev->nd_net, net);
2024 }
2025
2026 static inline bool netdev_uses_dsa(struct net_device *dev)
2027 {
2028 #if IS_ENABLED(CONFIG_NET_DSA)
2029 if (dev->dsa_ptr != NULL)
2030 return dsa_uses_tagged_protocol(dev->dsa_ptr);
2031 #endif
2032 return false;
2033 }
2034
2035 /**
2036 * netdev_priv - access network device private data
2037 * @dev: network device
2038 *
2039 * Get network device private data
2040 */
2041 static inline void *netdev_priv(const struct net_device *dev)
2042 {
2043 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2044 }
2045
2046 /* Set the sysfs physical device reference for the network logical device
2047 * if set prior to registration will cause a symlink during initialization.
2048 */
2049 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2050
2051 /* Set the sysfs device type for the network logical device to allow
2052 * fine-grained identification of different network device types. For
2053 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2054 */
2055 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2056
2057 /* Default NAPI poll() weight
2058 * Device drivers are strongly advised to not use bigger value
2059 */
2060 #define NAPI_POLL_WEIGHT 64
2061
2062 /**
2063 * netif_napi_add - initialize a NAPI context
2064 * @dev: network device
2065 * @napi: NAPI context
2066 * @poll: polling function
2067 * @weight: default weight
2068 *
2069 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2070 * *any* of the other NAPI-related functions.
2071 */
2072 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2073 int (*poll)(struct napi_struct *, int), int weight);
2074
2075 /**
2076 * netif_tx_napi_add - initialize a NAPI context
2077 * @dev: network device
2078 * @napi: NAPI context
2079 * @poll: polling function
2080 * @weight: default weight
2081 *
2082 * This variant of netif_napi_add() should be used from drivers using NAPI
2083 * to exclusively poll a TX queue.
2084 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2085 */
2086 static inline void netif_tx_napi_add(struct net_device *dev,
2087 struct napi_struct *napi,
2088 int (*poll)(struct napi_struct *, int),
2089 int weight)
2090 {
2091 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2092 netif_napi_add(dev, napi, poll, weight);
2093 }
2094
2095 /**
2096 * netif_napi_del - remove a NAPI context
2097 * @napi: NAPI context
2098 *
2099 * netif_napi_del() removes a NAPI context from the network device NAPI list
2100 */
2101 void netif_napi_del(struct napi_struct *napi);
2102
2103 struct napi_gro_cb {
2104 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2105 void *frag0;
2106
2107 /* Length of frag0. */
2108 unsigned int frag0_len;
2109
2110 /* This indicates where we are processing relative to skb->data. */
2111 int data_offset;
2112
2113 /* This is non-zero if the packet cannot be merged with the new skb. */
2114 u16 flush;
2115
2116 /* Save the IP ID here and check when we get to the transport layer */
2117 u16 flush_id;
2118
2119 /* Number of segments aggregated. */
2120 u16 count;
2121
2122 /* Start offset for remote checksum offload */
2123 u16 gro_remcsum_start;
2124
2125 /* jiffies when first packet was created/queued */
2126 unsigned long age;
2127
2128 /* Used in ipv6_gro_receive() and foo-over-udp */
2129 u16 proto;
2130
2131 /* This is non-zero if the packet may be of the same flow. */
2132 u8 same_flow:1;
2133
2134 /* Used in tunnel GRO receive */
2135 u8 encap_mark:1;
2136
2137 /* GRO checksum is valid */
2138 u8 csum_valid:1;
2139
2140 /* Number of checksums via CHECKSUM_UNNECESSARY */
2141 u8 csum_cnt:3;
2142
2143 /* Free the skb? */
2144 u8 free:2;
2145 #define NAPI_GRO_FREE 1
2146 #define NAPI_GRO_FREE_STOLEN_HEAD 2
2147
2148 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2149 u8 is_ipv6:1;
2150
2151 /* Used in GRE, set in fou/gue_gro_receive */
2152 u8 is_fou:1;
2153
2154 /* Used to determine if flush_id can be ignored */
2155 u8 is_atomic:1;
2156
2157 /* 5 bit hole */
2158
2159 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2160 __wsum csum;
2161
2162 /* used in skb_gro_receive() slow path */
2163 struct sk_buff *last;
2164 };
2165
2166 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2167
2168 struct packet_type {
2169 __be16 type; /* This is really htons(ether_type). */
2170 struct net_device *dev; /* NULL is wildcarded here */
2171 int (*func) (struct sk_buff *,
2172 struct net_device *,
2173 struct packet_type *,
2174 struct net_device *);
2175 bool (*id_match)(struct packet_type *ptype,
2176 struct sock *sk);
2177 void *af_packet_priv;
2178 struct list_head list;
2179 };
2180
2181 struct offload_callbacks {
2182 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2183 netdev_features_t features);
2184 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2185 struct sk_buff *skb);
2186 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2187 };
2188
2189 struct packet_offload {
2190 __be16 type; /* This is really htons(ether_type). */
2191 u16 priority;
2192 struct offload_callbacks callbacks;
2193 struct list_head list;
2194 };
2195
2196 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2197 struct pcpu_sw_netstats {
2198 u64 rx_packets;
2199 u64 rx_bytes;
2200 u64 tx_packets;
2201 u64 tx_bytes;
2202 struct u64_stats_sync syncp;
2203 };
2204
2205 #define __netdev_alloc_pcpu_stats(type, gfp) \
2206 ({ \
2207 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2208 if (pcpu_stats) { \
2209 int __cpu; \
2210 for_each_possible_cpu(__cpu) { \
2211 typeof(type) *stat; \
2212 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2213 u64_stats_init(&stat->syncp); \
2214 } \
2215 } \
2216 pcpu_stats; \
2217 })
2218
2219 #define netdev_alloc_pcpu_stats(type) \
2220 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2221
2222 enum netdev_lag_tx_type {
2223 NETDEV_LAG_TX_TYPE_UNKNOWN,
2224 NETDEV_LAG_TX_TYPE_RANDOM,
2225 NETDEV_LAG_TX_TYPE_BROADCAST,
2226 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2227 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2228 NETDEV_LAG_TX_TYPE_HASH,
2229 };
2230
2231 struct netdev_lag_upper_info {
2232 enum netdev_lag_tx_type tx_type;
2233 };
2234
2235 struct netdev_lag_lower_state_info {
2236 u8 link_up : 1,
2237 tx_enabled : 1;
2238 };
2239
2240 #include <linux/notifier.h>
2241
2242 /* netdevice notifier chain. Please remember to update the rtnetlink
2243 * notification exclusion list in rtnetlink_event() when adding new
2244 * types.
2245 */
2246 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
2247 #define NETDEV_DOWN 0x0002
2248 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
2249 detected a hardware crash and restarted
2250 - we can use this eg to kick tcp sessions
2251 once done */
2252 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
2253 #define NETDEV_REGISTER 0x0005
2254 #define NETDEV_UNREGISTER 0x0006
2255 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
2256 #define NETDEV_CHANGEADDR 0x0008
2257 #define NETDEV_GOING_DOWN 0x0009
2258 #define NETDEV_CHANGENAME 0x000A
2259 #define NETDEV_FEAT_CHANGE 0x000B
2260 #define NETDEV_BONDING_FAILOVER 0x000C
2261 #define NETDEV_PRE_UP 0x000D
2262 #define NETDEV_PRE_TYPE_CHANGE 0x000E
2263 #define NETDEV_POST_TYPE_CHANGE 0x000F
2264 #define NETDEV_POST_INIT 0x0010
2265 #define NETDEV_UNREGISTER_FINAL 0x0011
2266 #define NETDEV_RELEASE 0x0012
2267 #define NETDEV_NOTIFY_PEERS 0x0013
2268 #define NETDEV_JOIN 0x0014
2269 #define NETDEV_CHANGEUPPER 0x0015
2270 #define NETDEV_RESEND_IGMP 0x0016
2271 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2272 #define NETDEV_CHANGEINFODATA 0x0018
2273 #define NETDEV_BONDING_INFO 0x0019
2274 #define NETDEV_PRECHANGEUPPER 0x001A
2275 #define NETDEV_CHANGELOWERSTATE 0x001B
2276 #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2277 #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2278
2279 int register_netdevice_notifier(struct notifier_block *nb);
2280 int unregister_netdevice_notifier(struct notifier_block *nb);
2281
2282 struct netdev_notifier_info {
2283 struct net_device *dev;
2284 };
2285
2286 struct netdev_notifier_change_info {
2287 struct netdev_notifier_info info; /* must be first */
2288 unsigned int flags_changed;
2289 };
2290
2291 struct netdev_notifier_changeupper_info {
2292 struct netdev_notifier_info info; /* must be first */
2293 struct net_device *upper_dev; /* new upper dev */
2294 bool master; /* is upper dev master */
2295 bool linking; /* is the notification for link or unlink */
2296 void *upper_info; /* upper dev info */
2297 };
2298
2299 struct netdev_notifier_changelowerstate_info {
2300 struct netdev_notifier_info info; /* must be first */
2301 void *lower_state_info; /* is lower dev state */
2302 };
2303
2304 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2305 struct net_device *dev)
2306 {
2307 info->dev = dev;
2308 }
2309
2310 static inline struct net_device *
2311 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2312 {
2313 return info->dev;
2314 }
2315
2316 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2317
2318
2319 extern rwlock_t dev_base_lock; /* Device list lock */
2320
2321 #define for_each_netdev(net, d) \
2322 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2323 #define for_each_netdev_reverse(net, d) \
2324 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2325 #define for_each_netdev_rcu(net, d) \
2326 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2327 #define for_each_netdev_safe(net, d, n) \
2328 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2329 #define for_each_netdev_continue(net, d) \
2330 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2331 #define for_each_netdev_continue_rcu(net, d) \
2332 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2333 #define for_each_netdev_in_bond_rcu(bond, slave) \
2334 for_each_netdev_rcu(&init_net, slave) \
2335 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2336 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2337
2338 static inline struct net_device *next_net_device(struct net_device *dev)
2339 {
2340 struct list_head *lh;
2341 struct net *net;
2342
2343 net = dev_net(dev);
2344 lh = dev->dev_list.next;
2345 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2346 }
2347
2348 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2349 {
2350 struct list_head *lh;
2351 struct net *net;
2352
2353 net = dev_net(dev);
2354 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2355 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2356 }
2357
2358 static inline struct net_device *first_net_device(struct net *net)
2359 {
2360 return list_empty(&net->dev_base_head) ? NULL :
2361 net_device_entry(net->dev_base_head.next);
2362 }
2363
2364 static inline struct net_device *first_net_device_rcu(struct net *net)
2365 {
2366 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2367
2368 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2369 }
2370
2371 int netdev_boot_setup_check(struct net_device *dev);
2372 unsigned long netdev_boot_base(const char *prefix, int unit);
2373 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2374 const char *hwaddr);
2375 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2376 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2377 void dev_add_pack(struct packet_type *pt);
2378 void dev_remove_pack(struct packet_type *pt);
2379 void __dev_remove_pack(struct packet_type *pt);
2380 void dev_add_offload(struct packet_offload *po);
2381 void dev_remove_offload(struct packet_offload *po);
2382
2383 int dev_get_iflink(const struct net_device *dev);
2384 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2385 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2386 unsigned short mask);
2387 struct net_device *dev_get_by_name(struct net *net, const char *name);
2388 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2389 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2390 int dev_alloc_name(struct net_device *dev, const char *name);
2391 int dev_open(struct net_device *dev);
2392 int dev_close(struct net_device *dev);
2393 int dev_close_many(struct list_head *head, bool unlink);
2394 void dev_disable_lro(struct net_device *dev);
2395 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2396 int dev_queue_xmit(struct sk_buff *skb);
2397 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2398 int register_netdevice(struct net_device *dev);
2399 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2400 void unregister_netdevice_many(struct list_head *head);
2401 static inline void unregister_netdevice(struct net_device *dev)
2402 {
2403 unregister_netdevice_queue(dev, NULL);
2404 }
2405
2406 int netdev_refcnt_read(const struct net_device *dev);
2407 void free_netdev(struct net_device *dev);
2408 void netdev_freemem(struct net_device *dev);
2409 void synchronize_net(void);
2410 int init_dummy_netdev(struct net_device *dev);
2411
2412 DECLARE_PER_CPU(int, xmit_recursion);
2413 #define XMIT_RECURSION_LIMIT 10
2414
2415 static inline int dev_recursion_level(void)
2416 {
2417 return this_cpu_read(xmit_recursion);
2418 }
2419
2420 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2421 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2422 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2423 int netdev_get_name(struct net *net, char *name, int ifindex);
2424 int dev_restart(struct net_device *dev);
2425 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2426
2427 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2428 {
2429 return NAPI_GRO_CB(skb)->data_offset;
2430 }
2431
2432 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2433 {
2434 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2435 }
2436
2437 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2438 {
2439 NAPI_GRO_CB(skb)->data_offset += len;
2440 }
2441
2442 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2443 unsigned int offset)
2444 {
2445 return NAPI_GRO_CB(skb)->frag0 + offset;
2446 }
2447
2448 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2449 {
2450 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2451 }
2452
2453 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2454 unsigned int offset)
2455 {
2456 if (!pskb_may_pull(skb, hlen))
2457 return NULL;
2458
2459 NAPI_GRO_CB(skb)->frag0 = NULL;
2460 NAPI_GRO_CB(skb)->frag0_len = 0;
2461 return skb->data + offset;
2462 }
2463
2464 static inline void *skb_gro_network_header(struct sk_buff *skb)
2465 {
2466 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2467 skb_network_offset(skb);
2468 }
2469
2470 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2471 const void *start, unsigned int len)
2472 {
2473 if (NAPI_GRO_CB(skb)->csum_valid)
2474 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2475 csum_partial(start, len, 0));
2476 }
2477
2478 /* GRO checksum functions. These are logical equivalents of the normal
2479 * checksum functions (in skbuff.h) except that they operate on the GRO
2480 * offsets and fields in sk_buff.
2481 */
2482
2483 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2484
2485 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2486 {
2487 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2488 }
2489
2490 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2491 bool zero_okay,
2492 __sum16 check)
2493 {
2494 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2495 skb_checksum_start_offset(skb) <
2496 skb_gro_offset(skb)) &&
2497 !skb_at_gro_remcsum_start(skb) &&
2498 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2499 (!zero_okay || check));
2500 }
2501
2502 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2503 __wsum psum)
2504 {
2505 if (NAPI_GRO_CB(skb)->csum_valid &&
2506 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2507 return 0;
2508
2509 NAPI_GRO_CB(skb)->csum = psum;
2510
2511 return __skb_gro_checksum_complete(skb);
2512 }
2513
2514 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2515 {
2516 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2517 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2518 NAPI_GRO_CB(skb)->csum_cnt--;
2519 } else {
2520 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2521 * verified a new top level checksum or an encapsulated one
2522 * during GRO. This saves work if we fallback to normal path.
2523 */
2524 __skb_incr_checksum_unnecessary(skb);
2525 }
2526 }
2527
2528 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2529 compute_pseudo) \
2530 ({ \
2531 __sum16 __ret = 0; \
2532 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2533 __ret = __skb_gro_checksum_validate_complete(skb, \
2534 compute_pseudo(skb, proto)); \
2535 if (__ret) \
2536 __skb_mark_checksum_bad(skb); \
2537 else \
2538 skb_gro_incr_csum_unnecessary(skb); \
2539 __ret; \
2540 })
2541
2542 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2543 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2544
2545 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2546 compute_pseudo) \
2547 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2548
2549 #define skb_gro_checksum_simple_validate(skb) \
2550 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2551
2552 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2553 {
2554 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2555 !NAPI_GRO_CB(skb)->csum_valid);
2556 }
2557
2558 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2559 __sum16 check, __wsum pseudo)
2560 {
2561 NAPI_GRO_CB(skb)->csum = ~pseudo;
2562 NAPI_GRO_CB(skb)->csum_valid = 1;
2563 }
2564
2565 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2566 do { \
2567 if (__skb_gro_checksum_convert_check(skb)) \
2568 __skb_gro_checksum_convert(skb, check, \
2569 compute_pseudo(skb, proto)); \
2570 } while (0)
2571
2572 struct gro_remcsum {
2573 int offset;
2574 __wsum delta;
2575 };
2576
2577 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2578 {
2579 grc->offset = 0;
2580 grc->delta = 0;
2581 }
2582
2583 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2584 unsigned int off, size_t hdrlen,
2585 int start, int offset,
2586 struct gro_remcsum *grc,
2587 bool nopartial)
2588 {
2589 __wsum delta;
2590 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2591
2592 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2593
2594 if (!nopartial) {
2595 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2596 return ptr;
2597 }
2598
2599 ptr = skb_gro_header_fast(skb, off);
2600 if (skb_gro_header_hard(skb, off + plen)) {
2601 ptr = skb_gro_header_slow(skb, off + plen, off);
2602 if (!ptr)
2603 return NULL;
2604 }
2605
2606 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2607 start, offset);
2608
2609 /* Adjust skb->csum since we changed the packet */
2610 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2611
2612 grc->offset = off + hdrlen + offset;
2613 grc->delta = delta;
2614
2615 return ptr;
2616 }
2617
2618 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2619 struct gro_remcsum *grc)
2620 {
2621 void *ptr;
2622 size_t plen = grc->offset + sizeof(u16);
2623
2624 if (!grc->delta)
2625 return;
2626
2627 ptr = skb_gro_header_fast(skb, grc->offset);
2628 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2629 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2630 if (!ptr)
2631 return;
2632 }
2633
2634 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2635 }
2636
2637 struct skb_csum_offl_spec {
2638 __u16 ipv4_okay:1,
2639 ipv6_okay:1,
2640 encap_okay:1,
2641 ip_options_okay:1,
2642 ext_hdrs_okay:1,
2643 tcp_okay:1,
2644 udp_okay:1,
2645 sctp_okay:1,
2646 vlan_okay:1,
2647 no_encapped_ipv6:1,
2648 no_not_encapped:1;
2649 };
2650
2651 bool __skb_csum_offload_chk(struct sk_buff *skb,
2652 const struct skb_csum_offl_spec *spec,
2653 bool *csum_encapped,
2654 bool csum_help);
2655
2656 static inline bool skb_csum_offload_chk(struct sk_buff *skb,
2657 const struct skb_csum_offl_spec *spec,
2658 bool *csum_encapped,
2659 bool csum_help)
2660 {
2661 if (skb->ip_summed != CHECKSUM_PARTIAL)
2662 return false;
2663
2664 return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
2665 }
2666
2667 static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
2668 const struct skb_csum_offl_spec *spec)
2669 {
2670 bool csum_encapped;
2671
2672 return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
2673 }
2674
2675 static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
2676 {
2677 static const struct skb_csum_offl_spec csum_offl_spec = {
2678 .ipv4_okay = 1,
2679 .ip_options_okay = 1,
2680 .ipv6_okay = 1,
2681 .vlan_okay = 1,
2682 .tcp_okay = 1,
2683 .udp_okay = 1,
2684 };
2685
2686 return skb_csum_offload_chk_help(skb, &csum_offl_spec);
2687 }
2688
2689 static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
2690 {
2691 static const struct skb_csum_offl_spec csum_offl_spec = {
2692 .ipv4_okay = 1,
2693 .ip_options_okay = 1,
2694 .tcp_okay = 1,
2695 .udp_okay = 1,
2696 .vlan_okay = 1,
2697 };
2698
2699 return skb_csum_offload_chk_help(skb, &csum_offl_spec);
2700 }
2701
2702 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2703 unsigned short type,
2704 const void *daddr, const void *saddr,
2705 unsigned int len)
2706 {
2707 if (!dev->header_ops || !dev->header_ops->create)
2708 return 0;
2709
2710 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2711 }
2712
2713 static inline int dev_parse_header(const struct sk_buff *skb,
2714 unsigned char *haddr)
2715 {
2716 const struct net_device *dev = skb->dev;
2717
2718 if (!dev->header_ops || !dev->header_ops->parse)
2719 return 0;
2720 return dev->header_ops->parse(skb, haddr);
2721 }
2722
2723 /* ll_header must have at least hard_header_len allocated */
2724 static inline bool dev_validate_header(const struct net_device *dev,
2725 char *ll_header, int len)
2726 {
2727 if (likely(len >= dev->hard_header_len))
2728 return true;
2729
2730 if (capable(CAP_SYS_RAWIO)) {
2731 memset(ll_header + len, 0, dev->hard_header_len - len);
2732 return true;
2733 }
2734
2735 if (dev->header_ops && dev->header_ops->validate)
2736 return dev->header_ops->validate(ll_header, len);
2737
2738 return false;
2739 }
2740
2741 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2742 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2743 static inline int unregister_gifconf(unsigned int family)
2744 {
2745 return register_gifconf(family, NULL);
2746 }
2747
2748 #ifdef CONFIG_NET_FLOW_LIMIT
2749 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2750 struct sd_flow_limit {
2751 u64 count;
2752 unsigned int num_buckets;
2753 unsigned int history_head;
2754 u16 history[FLOW_LIMIT_HISTORY];
2755 u8 buckets[];
2756 };
2757
2758 extern int netdev_flow_limit_table_len;
2759 #endif /* CONFIG_NET_FLOW_LIMIT */
2760
2761 /*
2762 * Incoming packets are placed on per-CPU queues
2763 */
2764 struct softnet_data {
2765 struct list_head poll_list;
2766 struct sk_buff_head process_queue;
2767
2768 /* stats */
2769 unsigned int processed;
2770 unsigned int time_squeeze;
2771 unsigned int received_rps;
2772 #ifdef CONFIG_RPS
2773 struct softnet_data *rps_ipi_list;
2774 #endif
2775 #ifdef CONFIG_NET_FLOW_LIMIT
2776 struct sd_flow_limit __rcu *flow_limit;
2777 #endif
2778 struct Qdisc *output_queue;
2779 struct Qdisc **output_queue_tailp;
2780 struct sk_buff *completion_queue;
2781
2782 #ifdef CONFIG_RPS
2783 /* input_queue_head should be written by cpu owning this struct,
2784 * and only read by other cpus. Worth using a cache line.
2785 */
2786 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2787
2788 /* Elements below can be accessed between CPUs for RPS/RFS */
2789 struct call_single_data csd ____cacheline_aligned_in_smp;
2790 struct softnet_data *rps_ipi_next;
2791 unsigned int cpu;
2792 unsigned int input_queue_tail;
2793 #endif
2794 unsigned int dropped;
2795 struct sk_buff_head input_pkt_queue;
2796 struct napi_struct backlog;
2797
2798 };
2799
2800 static inline void input_queue_head_incr(struct softnet_data *sd)
2801 {
2802 #ifdef CONFIG_RPS
2803 sd->input_queue_head++;
2804 #endif
2805 }
2806
2807 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2808 unsigned int *qtail)
2809 {
2810 #ifdef CONFIG_RPS
2811 *qtail = ++sd->input_queue_tail;
2812 #endif
2813 }
2814
2815 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2816
2817 void __netif_schedule(struct Qdisc *q);
2818 void netif_schedule_queue(struct netdev_queue *txq);
2819
2820 static inline void netif_tx_schedule_all(struct net_device *dev)
2821 {
2822 unsigned int i;
2823
2824 for (i = 0; i < dev->num_tx_queues; i++)
2825 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2826 }
2827
2828 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2829 {
2830 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2831 }
2832
2833 /**
2834 * netif_start_queue - allow transmit
2835 * @dev: network device
2836 *
2837 * Allow upper layers to call the device hard_start_xmit routine.
2838 */
2839 static inline void netif_start_queue(struct net_device *dev)
2840 {
2841 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2842 }
2843
2844 static inline void netif_tx_start_all_queues(struct net_device *dev)
2845 {
2846 unsigned int i;
2847
2848 for (i = 0; i < dev->num_tx_queues; i++) {
2849 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2850 netif_tx_start_queue(txq);
2851 }
2852 }
2853
2854 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2855
2856 /**
2857 * netif_wake_queue - restart transmit
2858 * @dev: network device
2859 *
2860 * Allow upper layers to call the device hard_start_xmit routine.
2861 * Used for flow control when transmit resources are available.
2862 */
2863 static inline void netif_wake_queue(struct net_device *dev)
2864 {
2865 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2866 }
2867
2868 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2869 {
2870 unsigned int i;
2871
2872 for (i = 0; i < dev->num_tx_queues; i++) {
2873 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2874 netif_tx_wake_queue(txq);
2875 }
2876 }
2877
2878 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2879 {
2880 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2881 }
2882
2883 /**
2884 * netif_stop_queue - stop transmitted packets
2885 * @dev: network device
2886 *
2887 * Stop upper layers calling the device hard_start_xmit routine.
2888 * Used for flow control when transmit resources are unavailable.
2889 */
2890 static inline void netif_stop_queue(struct net_device *dev)
2891 {
2892 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2893 }
2894
2895 void netif_tx_stop_all_queues(struct net_device *dev);
2896
2897 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2898 {
2899 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2900 }
2901
2902 /**
2903 * netif_queue_stopped - test if transmit queue is flowblocked
2904 * @dev: network device
2905 *
2906 * Test if transmit queue on device is currently unable to send.
2907 */
2908 static inline bool netif_queue_stopped(const struct net_device *dev)
2909 {
2910 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2911 }
2912
2913 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2914 {
2915 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2916 }
2917
2918 static inline bool
2919 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2920 {
2921 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2922 }
2923
2924 static inline bool
2925 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2926 {
2927 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2928 }
2929
2930 /**
2931 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2932 * @dev_queue: pointer to transmit queue
2933 *
2934 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2935 * to give appropriate hint to the CPU.
2936 */
2937 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2938 {
2939 #ifdef CONFIG_BQL
2940 prefetchw(&dev_queue->dql.num_queued);
2941 #endif
2942 }
2943
2944 /**
2945 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2946 * @dev_queue: pointer to transmit queue
2947 *
2948 * BQL enabled drivers might use this helper in their TX completion path,
2949 * to give appropriate hint to the CPU.
2950 */
2951 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2952 {
2953 #ifdef CONFIG_BQL
2954 prefetchw(&dev_queue->dql.limit);
2955 #endif
2956 }
2957
2958 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2959 unsigned int bytes)
2960 {
2961 #ifdef CONFIG_BQL
2962 dql_queued(&dev_queue->dql, bytes);
2963
2964 if (likely(dql_avail(&dev_queue->dql) >= 0))
2965 return;
2966
2967 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2968
2969 /*
2970 * The XOFF flag must be set before checking the dql_avail below,
2971 * because in netdev_tx_completed_queue we update the dql_completed
2972 * before checking the XOFF flag.
2973 */
2974 smp_mb();
2975
2976 /* check again in case another CPU has just made room avail */
2977 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2978 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2979 #endif
2980 }
2981
2982 /**
2983 * netdev_sent_queue - report the number of bytes queued to hardware
2984 * @dev: network device
2985 * @bytes: number of bytes queued to the hardware device queue
2986 *
2987 * Report the number of bytes queued for sending/completion to the network
2988 * device hardware queue. @bytes should be a good approximation and should
2989 * exactly match netdev_completed_queue() @bytes
2990 */
2991 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2992 {
2993 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2994 }
2995
2996 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2997 unsigned int pkts, unsigned int bytes)
2998 {
2999 #ifdef CONFIG_BQL
3000 if (unlikely(!bytes))
3001 return;
3002
3003 dql_completed(&dev_queue->dql, bytes);
3004
3005 /*
3006 * Without the memory barrier there is a small possiblity that
3007 * netdev_tx_sent_queue will miss the update and cause the queue to
3008 * be stopped forever
3009 */
3010 smp_mb();
3011
3012 if (dql_avail(&dev_queue->dql) < 0)
3013 return;
3014
3015 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3016 netif_schedule_queue(dev_queue);
3017 #endif
3018 }
3019
3020 /**
3021 * netdev_completed_queue - report bytes and packets completed by device
3022 * @dev: network device
3023 * @pkts: actual number of packets sent over the medium
3024 * @bytes: actual number of bytes sent over the medium
3025 *
3026 * Report the number of bytes and packets transmitted by the network device
3027 * hardware queue over the physical medium, @bytes must exactly match the
3028 * @bytes amount passed to netdev_sent_queue()
3029 */
3030 static inline void netdev_completed_queue(struct net_device *dev,
3031 unsigned int pkts, unsigned int bytes)
3032 {
3033 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3034 }
3035
3036 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3037 {
3038 #ifdef CONFIG_BQL
3039 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3040 dql_reset(&q->dql);
3041 #endif
3042 }
3043
3044 /**
3045 * netdev_reset_queue - reset the packets and bytes count of a network device
3046 * @dev_queue: network device
3047 *
3048 * Reset the bytes and packet count of a network device and clear the
3049 * software flow control OFF bit for this network device
3050 */
3051 static inline void netdev_reset_queue(struct net_device *dev_queue)
3052 {
3053 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3054 }
3055
3056 /**
3057 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3058 * @dev: network device
3059 * @queue_index: given tx queue index
3060 *
3061 * Returns 0 if given tx queue index >= number of device tx queues,
3062 * otherwise returns the originally passed tx queue index.
3063 */
3064 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3065 {
3066 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3067 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3068 dev->name, queue_index,
3069 dev->real_num_tx_queues);
3070 return 0;
3071 }
3072
3073 return queue_index;
3074 }
3075
3076 /**
3077 * netif_running - test if up
3078 * @dev: network device
3079 *
3080 * Test if the device has been brought up.
3081 */
3082 static inline bool netif_running(const struct net_device *dev)
3083 {
3084 return test_bit(__LINK_STATE_START, &dev->state);
3085 }
3086
3087 /*
3088 * Routines to manage the subqueues on a device. We only need start,
3089 * stop, and a check if it's stopped. All other device management is
3090 * done at the overall netdevice level.
3091 * Also test the device if we're multiqueue.
3092 */
3093
3094 /**
3095 * netif_start_subqueue - allow sending packets on subqueue
3096 * @dev: network device
3097 * @queue_index: sub queue index
3098 *
3099 * Start individual transmit queue of a device with multiple transmit queues.
3100 */
3101 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3102 {
3103 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3104
3105 netif_tx_start_queue(txq);
3106 }
3107
3108 /**
3109 * netif_stop_subqueue - stop sending packets on subqueue
3110 * @dev: network device
3111 * @queue_index: sub queue index
3112 *
3113 * Stop individual transmit queue of a device with multiple transmit queues.
3114 */
3115 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3116 {
3117 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3118 netif_tx_stop_queue(txq);
3119 }
3120
3121 /**
3122 * netif_subqueue_stopped - test status of subqueue
3123 * @dev: network device
3124 * @queue_index: sub queue index
3125 *
3126 * Check individual transmit queue of a device with multiple transmit queues.
3127 */
3128 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3129 u16 queue_index)
3130 {
3131 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3132
3133 return netif_tx_queue_stopped(txq);
3134 }
3135
3136 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3137 struct sk_buff *skb)
3138 {
3139 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3140 }
3141
3142 void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
3143
3144 #ifdef CONFIG_XPS
3145 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3146 u16 index);
3147 #else
3148 static inline int netif_set_xps_queue(struct net_device *dev,
3149 const struct cpumask *mask,
3150 u16 index)
3151 {
3152 return 0;
3153 }
3154 #endif
3155
3156 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3157 unsigned int num_tx_queues);
3158
3159 /*
3160 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
3161 * as a distribution range limit for the returned value.
3162 */
3163 static inline u16 skb_tx_hash(const struct net_device *dev,
3164 struct sk_buff *skb)
3165 {
3166 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3167 }
3168
3169 /**
3170 * netif_is_multiqueue - test if device has multiple transmit queues
3171 * @dev: network device
3172 *
3173 * Check if device has multiple transmit queues
3174 */
3175 static inline bool netif_is_multiqueue(const struct net_device *dev)
3176 {
3177 return dev->num_tx_queues > 1;
3178 }
3179
3180 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3181
3182 #ifdef CONFIG_SYSFS
3183 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3184 #else
3185 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3186 unsigned int rxq)
3187 {
3188 return 0;
3189 }
3190 #endif
3191
3192 #ifdef CONFIG_SYSFS
3193 static inline unsigned int get_netdev_rx_queue_index(
3194 struct netdev_rx_queue *queue)
3195 {
3196 struct net_device *dev = queue->dev;
3197 int index = queue - dev->_rx;
3198
3199 BUG_ON(index >= dev->num_rx_queues);
3200 return index;
3201 }
3202 #endif
3203
3204 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3205 int netif_get_num_default_rss_queues(void);
3206
3207 enum skb_free_reason {
3208 SKB_REASON_CONSUMED,
3209 SKB_REASON_DROPPED,
3210 };
3211
3212 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3213 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3214
3215 /*
3216 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3217 * interrupt context or with hardware interrupts being disabled.
3218 * (in_irq() || irqs_disabled())
3219 *
3220 * We provide four helpers that can be used in following contexts :
3221 *
3222 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3223 * replacing kfree_skb(skb)
3224 *
3225 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3226 * Typically used in place of consume_skb(skb) in TX completion path
3227 *
3228 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3229 * replacing kfree_skb(skb)
3230 *
3231 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3232 * and consumed a packet. Used in place of consume_skb(skb)
3233 */
3234 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3235 {
3236 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3237 }
3238
3239 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3240 {
3241 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3242 }
3243
3244 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3245 {
3246 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3247 }
3248
3249 static inline void dev_consume_skb_any(struct sk_buff *skb)
3250 {
3251 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3252 }
3253
3254 int netif_rx(struct sk_buff *skb);
3255 int netif_rx_ni(struct sk_buff *skb);
3256 int netif_receive_skb(struct sk_buff *skb);
3257 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3258 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3259 struct sk_buff *napi_get_frags(struct napi_struct *napi);
3260 gro_result_t napi_gro_frags(struct napi_struct *napi);
3261 struct packet_offload *gro_find_receive_by_type(__be16 type);
3262 struct packet_offload *gro_find_complete_by_type(__be16 type);
3263
3264 static inline void napi_free_frags(struct napi_struct *napi)
3265 {
3266 kfree_skb(napi->skb);
3267 napi->skb = NULL;
3268 }
3269
3270 int netdev_rx_handler_register(struct net_device *dev,
3271 rx_handler_func_t *rx_handler,
3272 void *rx_handler_data);
3273 void netdev_rx_handler_unregister(struct net_device *dev);
3274
3275 bool dev_valid_name(const char *name);
3276 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
3277 int dev_ethtool(struct net *net, struct ifreq *);
3278 unsigned int dev_get_flags(const struct net_device *);
3279 int __dev_change_flags(struct net_device *, unsigned int flags);
3280 int dev_change_flags(struct net_device *, unsigned int);
3281 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3282 unsigned int gchanges);
3283 int dev_change_name(struct net_device *, const char *);
3284 int dev_set_alias(struct net_device *, const char *, size_t);
3285 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3286 int dev_set_mtu(struct net_device *, int);
3287 void dev_set_group(struct net_device *, int);
3288 int dev_set_mac_address(struct net_device *, struct sockaddr *);
3289 int dev_change_carrier(struct net_device *, bool new_carrier);
3290 int dev_get_phys_port_id(struct net_device *dev,
3291 struct netdev_phys_item_id *ppid);
3292 int dev_get_phys_port_name(struct net_device *dev,
3293 char *name, size_t len);
3294 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3295 int dev_change_xdp_fd(struct net_device *dev, int fd);
3296 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3297 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3298 struct netdev_queue *txq, int *ret);
3299 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3300 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3301 bool is_skb_forwardable(const struct net_device *dev,
3302 const struct sk_buff *skb);
3303
3304 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3305
3306 extern int netdev_budget;
3307
3308 /* Called by rtnetlink.c:rtnl_unlock() */
3309 void netdev_run_todo(void);
3310
3311 /**
3312 * dev_put - release reference to device
3313 * @dev: network device
3314 *
3315 * Release reference to device to allow it to be freed.
3316 */
3317 static inline void dev_put(struct net_device *dev)
3318 {
3319 this_cpu_dec(*dev->pcpu_refcnt);
3320 }
3321
3322 /**
3323 * dev_hold - get reference to device
3324 * @dev: network device
3325 *
3326 * Hold reference to device to keep it from being freed.
3327 */
3328 static inline void dev_hold(struct net_device *dev)
3329 {
3330 this_cpu_inc(*dev->pcpu_refcnt);
3331 }
3332
3333 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
3334 * and _off may be called from IRQ context, but it is caller
3335 * who is responsible for serialization of these calls.
3336 *
3337 * The name carrier is inappropriate, these functions should really be
3338 * called netif_lowerlayer_*() because they represent the state of any
3339 * kind of lower layer not just hardware media.
3340 */
3341
3342 void linkwatch_init_dev(struct net_device *dev);
3343 void linkwatch_fire_event(struct net_device *dev);
3344 void linkwatch_forget_dev(struct net_device *dev);
3345
3346 /**
3347 * netif_carrier_ok - test if carrier present
3348 * @dev: network device
3349 *
3350 * Check if carrier is present on device
3351 */
3352 static inline bool netif_carrier_ok(const struct net_device *dev)
3353 {
3354 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3355 }
3356
3357 unsigned long dev_trans_start(struct net_device *dev);
3358
3359 void __netdev_watchdog_up(struct net_device *dev);
3360
3361 void netif_carrier_on(struct net_device *dev);
3362
3363 void netif_carrier_off(struct net_device *dev);
3364
3365 /**
3366 * netif_dormant_on - mark device as dormant.
3367 * @dev: network device
3368 *
3369 * Mark device as dormant (as per RFC2863).
3370 *
3371 * The dormant state indicates that the relevant interface is not
3372 * actually in a condition to pass packets (i.e., it is not 'up') but is
3373 * in a "pending" state, waiting for some external event. For "on-
3374 * demand" interfaces, this new state identifies the situation where the
3375 * interface is waiting for events to place it in the up state.
3376 */
3377 static inline void netif_dormant_on(struct net_device *dev)
3378 {
3379 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3380 linkwatch_fire_event(dev);
3381 }
3382
3383 /**
3384 * netif_dormant_off - set device as not dormant.
3385 * @dev: network device
3386 *
3387 * Device is not in dormant state.
3388 */
3389 static inline void netif_dormant_off(struct net_device *dev)
3390 {
3391 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3392 linkwatch_fire_event(dev);
3393 }
3394
3395 /**
3396 * netif_dormant - test if carrier present
3397 * @dev: network device
3398 *
3399 * Check if carrier is present on device
3400 */
3401 static inline bool netif_dormant(const struct net_device *dev)
3402 {
3403 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3404 }
3405
3406
3407 /**
3408 * netif_oper_up - test if device is operational
3409 * @dev: network device
3410 *
3411 * Check if carrier is operational
3412 */
3413 static inline bool netif_oper_up(const struct net_device *dev)
3414 {
3415 return (dev->operstate == IF_OPER_UP ||
3416 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3417 }
3418
3419 /**
3420 * netif_device_present - is device available or removed
3421 * @dev: network device
3422 *
3423 * Check if device has not been removed from system.
3424 */
3425 static inline bool netif_device_present(struct net_device *dev)
3426 {
3427 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3428 }
3429
3430 void netif_device_detach(struct net_device *dev);
3431
3432 void netif_device_attach(struct net_device *dev);
3433
3434 /*
3435 * Network interface message level settings
3436 */
3437
3438 enum {
3439 NETIF_MSG_DRV = 0x0001,
3440 NETIF_MSG_PROBE = 0x0002,
3441 NETIF_MSG_LINK = 0x0004,
3442 NETIF_MSG_TIMER = 0x0008,
3443 NETIF_MSG_IFDOWN = 0x0010,
3444 NETIF_MSG_IFUP = 0x0020,
3445 NETIF_MSG_RX_ERR = 0x0040,
3446 NETIF_MSG_TX_ERR = 0x0080,
3447 NETIF_MSG_TX_QUEUED = 0x0100,
3448 NETIF_MSG_INTR = 0x0200,
3449 NETIF_MSG_TX_DONE = 0x0400,
3450 NETIF_MSG_RX_STATUS = 0x0800,
3451 NETIF_MSG_PKTDATA = 0x1000,
3452 NETIF_MSG_HW = 0x2000,
3453 NETIF_MSG_WOL = 0x4000,
3454 };
3455
3456 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3457 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3458 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3459 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3460 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3461 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3462 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3463 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3464 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3465 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3466 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3467 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3468 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3469 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3470 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3471
3472 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3473 {
3474 /* use default */
3475 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3476 return default_msg_enable_bits;
3477 if (debug_value == 0) /* no output */
3478 return 0;
3479 /* set low N bits */
3480 return (1 << debug_value) - 1;
3481 }
3482
3483 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3484 {
3485 spin_lock(&txq->_xmit_lock);
3486 txq->xmit_lock_owner = cpu;
3487 }
3488
3489 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3490 {
3491 spin_lock_bh(&txq->_xmit_lock);
3492 txq->xmit_lock_owner = smp_processor_id();
3493 }
3494
3495 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3496 {
3497 bool ok = spin_trylock(&txq->_xmit_lock);
3498 if (likely(ok))
3499 txq->xmit_lock_owner = smp_processor_id();
3500 return ok;
3501 }
3502
3503 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3504 {
3505 txq->xmit_lock_owner = -1;
3506 spin_unlock(&txq->_xmit_lock);
3507 }
3508
3509 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3510 {
3511 txq->xmit_lock_owner = -1;
3512 spin_unlock_bh(&txq->_xmit_lock);
3513 }
3514
3515 static inline void txq_trans_update(struct netdev_queue *txq)
3516 {
3517 if (txq->xmit_lock_owner != -1)
3518 txq->trans_start = jiffies;
3519 }
3520
3521 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
3522 static inline void netif_trans_update(struct net_device *dev)
3523 {
3524 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3525
3526 if (txq->trans_start != jiffies)
3527 txq->trans_start = jiffies;
3528 }
3529
3530 /**
3531 * netif_tx_lock - grab network device transmit lock
3532 * @dev: network device
3533 *
3534 * Get network device transmit lock
3535 */
3536 static inline void netif_tx_lock(struct net_device *dev)
3537 {
3538 unsigned int i;
3539 int cpu;
3540
3541 spin_lock(&dev->tx_global_lock);
3542 cpu = smp_processor_id();
3543 for (i = 0; i < dev->num_tx_queues; i++) {
3544 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3545
3546 /* We are the only thread of execution doing a
3547 * freeze, but we have to grab the _xmit_lock in
3548 * order to synchronize with threads which are in
3549 * the ->hard_start_xmit() handler and already
3550 * checked the frozen bit.
3551 */
3552 __netif_tx_lock(txq, cpu);
3553 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3554 __netif_tx_unlock(txq);
3555 }
3556 }
3557
3558 static inline void netif_tx_lock_bh(struct net_device *dev)
3559 {
3560 local_bh_disable();
3561 netif_tx_lock(dev);
3562 }
3563
3564 static inline void netif_tx_unlock(struct net_device *dev)
3565 {
3566 unsigned int i;
3567
3568 for (i = 0; i < dev->num_tx_queues; i++) {
3569 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3570
3571 /* No need to grab the _xmit_lock here. If the
3572 * queue is not stopped for another reason, we
3573 * force a schedule.
3574 */
3575 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3576 netif_schedule_queue(txq);
3577 }
3578 spin_unlock(&dev->tx_global_lock);
3579 }
3580
3581 static inline void netif_tx_unlock_bh(struct net_device *dev)
3582 {
3583 netif_tx_unlock(dev);
3584 local_bh_enable();
3585 }
3586
3587 #define HARD_TX_LOCK(dev, txq, cpu) { \
3588 if ((dev->features & NETIF_F_LLTX) == 0) { \
3589 __netif_tx_lock(txq, cpu); \
3590 } \
3591 }
3592
3593 #define HARD_TX_TRYLOCK(dev, txq) \
3594 (((dev->features & NETIF_F_LLTX) == 0) ? \
3595 __netif_tx_trylock(txq) : \
3596 true )
3597
3598 #define HARD_TX_UNLOCK(dev, txq) { \
3599 if ((dev->features & NETIF_F_LLTX) == 0) { \
3600 __netif_tx_unlock(txq); \
3601 } \
3602 }
3603
3604 static inline void netif_tx_disable(struct net_device *dev)
3605 {
3606 unsigned int i;
3607 int cpu;
3608
3609 local_bh_disable();
3610 cpu = smp_processor_id();
3611 for (i = 0; i < dev->num_tx_queues; i++) {
3612 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3613
3614 __netif_tx_lock(txq, cpu);
3615 netif_tx_stop_queue(txq);
3616 __netif_tx_unlock(txq);
3617 }
3618 local_bh_enable();
3619 }
3620
3621 static inline void netif_addr_lock(struct net_device *dev)
3622 {
3623 spin_lock(&dev->addr_list_lock);
3624 }
3625
3626 static inline void netif_addr_lock_nested(struct net_device *dev)
3627 {
3628 int subclass = SINGLE_DEPTH_NESTING;
3629
3630 if (dev->netdev_ops->ndo_get_lock_subclass)
3631 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3632
3633 spin_lock_nested(&dev->addr_list_lock, subclass);
3634 }
3635
3636 static inline void netif_addr_lock_bh(struct net_device *dev)
3637 {
3638 spin_lock_bh(&dev->addr_list_lock);
3639 }
3640
3641 static inline void netif_addr_unlock(struct net_device *dev)
3642 {
3643 spin_unlock(&dev->addr_list_lock);
3644 }
3645
3646 static inline void netif_addr_unlock_bh(struct net_device *dev)
3647 {
3648 spin_unlock_bh(&dev->addr_list_lock);
3649 }
3650
3651 /*
3652 * dev_addrs walker. Should be used only for read access. Call with
3653 * rcu_read_lock held.
3654 */
3655 #define for_each_dev_addr(dev, ha) \
3656 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3657
3658 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
3659
3660 void ether_setup(struct net_device *dev);
3661
3662 /* Support for loadable net-drivers */
3663 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3664 unsigned char name_assign_type,
3665 void (*setup)(struct net_device *),
3666 unsigned int txqs, unsigned int rxqs);
3667 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3668 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3669
3670 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3671 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3672 count)
3673
3674 int register_netdev(struct net_device *dev);
3675 void unregister_netdev(struct net_device *dev);
3676
3677 /* General hardware address lists handling functions */
3678 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3679 struct netdev_hw_addr_list *from_list, int addr_len);
3680 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3681 struct netdev_hw_addr_list *from_list, int addr_len);
3682 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3683 struct net_device *dev,
3684 int (*sync)(struct net_device *, const unsigned char *),
3685 int (*unsync)(struct net_device *,
3686 const unsigned char *));
3687 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3688 struct net_device *dev,
3689 int (*unsync)(struct net_device *,
3690 const unsigned char *));
3691 void __hw_addr_init(struct netdev_hw_addr_list *list);
3692
3693 /* Functions used for device addresses handling */
3694 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3695 unsigned char addr_type);
3696 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3697 unsigned char addr_type);
3698 void dev_addr_flush(struct net_device *dev);
3699 int dev_addr_init(struct net_device *dev);
3700
3701 /* Functions used for unicast addresses handling */
3702 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3703 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3704 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3705 int dev_uc_sync(struct net_device *to, struct net_device *from);
3706 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3707 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3708 void dev_uc_flush(struct net_device *dev);
3709 void dev_uc_init(struct net_device *dev);
3710
3711 /**
3712 * __dev_uc_sync - Synchonize device's unicast list
3713 * @dev: device to sync
3714 * @sync: function to call if address should be added
3715 * @unsync: function to call if address should be removed
3716 *
3717 * Add newly added addresses to the interface, and release
3718 * addresses that have been deleted.
3719 */
3720 static inline int __dev_uc_sync(struct net_device *dev,
3721 int (*sync)(struct net_device *,
3722 const unsigned char *),
3723 int (*unsync)(struct net_device *,
3724 const unsigned char *))
3725 {
3726 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3727 }
3728
3729 /**
3730 * __dev_uc_unsync - Remove synchronized addresses from device
3731 * @dev: device to sync
3732 * @unsync: function to call if address should be removed
3733 *
3734 * Remove all addresses that were added to the device by dev_uc_sync().
3735 */
3736 static inline void __dev_uc_unsync(struct net_device *dev,
3737 int (*unsync)(struct net_device *,
3738 const unsigned char *))
3739 {
3740 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3741 }
3742
3743 /* Functions used for multicast addresses handling */
3744 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3745 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3746 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3747 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3748 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3749 int dev_mc_sync(struct net_device *to, struct net_device *from);
3750 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3751 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3752 void dev_mc_flush(struct net_device *dev);
3753 void dev_mc_init(struct net_device *dev);
3754
3755 /**
3756 * __dev_mc_sync - Synchonize device's multicast list
3757 * @dev: device to sync
3758 * @sync: function to call if address should be added
3759 * @unsync: function to call if address should be removed
3760 *
3761 * Add newly added addresses to the interface, and release
3762 * addresses that have been deleted.
3763 */
3764 static inline int __dev_mc_sync(struct net_device *dev,
3765 int (*sync)(struct net_device *,
3766 const unsigned char *),
3767 int (*unsync)(struct net_device *,
3768 const unsigned char *))
3769 {
3770 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3771 }
3772
3773 /**
3774 * __dev_mc_unsync - Remove synchronized addresses from device
3775 * @dev: device to sync
3776 * @unsync: function to call if address should be removed
3777 *
3778 * Remove all addresses that were added to the device by dev_mc_sync().
3779 */
3780 static inline void __dev_mc_unsync(struct net_device *dev,
3781 int (*unsync)(struct net_device *,
3782 const unsigned char *))
3783 {
3784 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3785 }
3786
3787 /* Functions used for secondary unicast and multicast support */
3788 void dev_set_rx_mode(struct net_device *dev);
3789 void __dev_set_rx_mode(struct net_device *dev);
3790 int dev_set_promiscuity(struct net_device *dev, int inc);
3791 int dev_set_allmulti(struct net_device *dev, int inc);
3792 void netdev_state_change(struct net_device *dev);
3793 void netdev_notify_peers(struct net_device *dev);
3794 void netdev_features_change(struct net_device *dev);
3795 /* Load a device via the kmod */
3796 void dev_load(struct net *net, const char *name);
3797 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3798 struct rtnl_link_stats64 *storage);
3799 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3800 const struct net_device_stats *netdev_stats);
3801
3802 extern int netdev_max_backlog;
3803 extern int netdev_tstamp_prequeue;
3804 extern int weight_p;
3805
3806 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3807 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3808 struct list_head **iter);
3809 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3810 struct list_head **iter);
3811
3812 /* iterate through upper list, must be called under RCU read lock */
3813 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3814 for (iter = &(dev)->adj_list.upper, \
3815 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3816 updev; \
3817 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3818
3819 /* iterate through upper list, must be called under RCU read lock */
3820 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3821 for (iter = &(dev)->all_adj_list.upper, \
3822 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3823 updev; \
3824 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3825
3826 void *netdev_lower_get_next_private(struct net_device *dev,
3827 struct list_head **iter);
3828 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3829 struct list_head **iter);
3830
3831 #define netdev_for_each_lower_private(dev, priv, iter) \
3832 for (iter = (dev)->adj_list.lower.next, \
3833 priv = netdev_lower_get_next_private(dev, &(iter)); \
3834 priv; \
3835 priv = netdev_lower_get_next_private(dev, &(iter)))
3836
3837 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3838 for (iter = &(dev)->adj_list.lower, \
3839 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3840 priv; \
3841 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3842
3843 void *netdev_lower_get_next(struct net_device *dev,
3844 struct list_head **iter);
3845
3846 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3847 for (iter = (dev)->adj_list.lower.next, \
3848 ldev = netdev_lower_get_next(dev, &(iter)); \
3849 ldev; \
3850 ldev = netdev_lower_get_next(dev, &(iter)))
3851
3852 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3853 struct list_head **iter);
3854 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3855 struct list_head **iter);
3856
3857 #define netdev_for_each_all_lower_dev(dev, ldev, iter) \
3858 for (iter = (dev)->all_adj_list.lower.next, \
3859 ldev = netdev_all_lower_get_next(dev, &(iter)); \
3860 ldev; \
3861 ldev = netdev_all_lower_get_next(dev, &(iter)))
3862
3863 #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
3864 for (iter = (dev)->all_adj_list.lower.next, \
3865 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
3866 ldev; \
3867 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
3868
3869 void *netdev_adjacent_get_private(struct list_head *adj_list);
3870 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3871 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3872 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3873 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3874 int netdev_master_upper_dev_link(struct net_device *dev,
3875 struct net_device *upper_dev,
3876 void *upper_priv, void *upper_info);
3877 void netdev_upper_dev_unlink(struct net_device *dev,
3878 struct net_device *upper_dev);
3879 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3880 void *netdev_lower_dev_get_private(struct net_device *dev,
3881 struct net_device *lower_dev);
3882 void netdev_lower_state_changed(struct net_device *lower_dev,
3883 void *lower_state_info);
3884 int netdev_default_l2upper_neigh_construct(struct net_device *dev,
3885 struct neighbour *n);
3886 void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3887 struct neighbour *n);
3888
3889 /* RSS keys are 40 or 52 bytes long */
3890 #define NETDEV_RSS_KEY_LEN 52
3891 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3892 void netdev_rss_key_fill(void *buffer, size_t len);
3893
3894 int dev_get_nest_level(struct net_device *dev,
3895 bool (*type_check)(const struct net_device *dev));
3896 int skb_checksum_help(struct sk_buff *skb);
3897 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3898 netdev_features_t features, bool tx_path);
3899 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3900 netdev_features_t features);
3901
3902 struct netdev_bonding_info {
3903 ifslave slave;
3904 ifbond master;
3905 };
3906
3907 struct netdev_notifier_bonding_info {
3908 struct netdev_notifier_info info; /* must be first */
3909 struct netdev_bonding_info bonding_info;
3910 };
3911
3912 void netdev_bonding_info_change(struct net_device *dev,
3913 struct netdev_bonding_info *bonding_info);
3914
3915 static inline
3916 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3917 {
3918 return __skb_gso_segment(skb, features, true);
3919 }
3920 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3921
3922 static inline bool can_checksum_protocol(netdev_features_t features,
3923 __be16 protocol)
3924 {
3925 if (protocol == htons(ETH_P_FCOE))
3926 return !!(features & NETIF_F_FCOE_CRC);
3927
3928 /* Assume this is an IP checksum (not SCTP CRC) */
3929
3930 if (features & NETIF_F_HW_CSUM) {
3931 /* Can checksum everything */
3932 return true;
3933 }
3934
3935 switch (protocol) {
3936 case htons(ETH_P_IP):
3937 return !!(features & NETIF_F_IP_CSUM);
3938 case htons(ETH_P_IPV6):
3939 return !!(features & NETIF_F_IPV6_CSUM);
3940 default:
3941 return false;
3942 }
3943 }
3944
3945 /* Map an ethertype into IP protocol if possible */
3946 static inline int eproto_to_ipproto(int eproto)
3947 {
3948 switch (eproto) {
3949 case htons(ETH_P_IP):
3950 return IPPROTO_IP;
3951 case htons(ETH_P_IPV6):
3952 return IPPROTO_IPV6;
3953 default:
3954 return -1;
3955 }
3956 }
3957
3958 #ifdef CONFIG_BUG
3959 void netdev_rx_csum_fault(struct net_device *dev);
3960 #else
3961 static inline void netdev_rx_csum_fault(struct net_device *dev)
3962 {
3963 }
3964 #endif
3965 /* rx skb timestamps */
3966 void net_enable_timestamp(void);
3967 void net_disable_timestamp(void);
3968
3969 #ifdef CONFIG_PROC_FS
3970 int __init dev_proc_init(void);
3971 #else
3972 #define dev_proc_init() 0
3973 #endif
3974
3975 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3976 struct sk_buff *skb, struct net_device *dev,
3977 bool more)
3978 {
3979 skb->xmit_more = more ? 1 : 0;
3980 return ops->ndo_start_xmit(skb, dev);
3981 }
3982
3983 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3984 struct netdev_queue *txq, bool more)
3985 {
3986 const struct net_device_ops *ops = dev->netdev_ops;
3987 int rc;
3988
3989 rc = __netdev_start_xmit(ops, skb, dev, more);
3990 if (rc == NETDEV_TX_OK)
3991 txq_trans_update(txq);
3992
3993 return rc;
3994 }
3995
3996 int netdev_class_create_file_ns(struct class_attribute *class_attr,
3997 const void *ns);
3998 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3999 const void *ns);
4000
4001 static inline int netdev_class_create_file(struct class_attribute *class_attr)
4002 {
4003 return netdev_class_create_file_ns(class_attr, NULL);
4004 }
4005
4006 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
4007 {
4008 netdev_class_remove_file_ns(class_attr, NULL);
4009 }
4010
4011 extern struct kobj_ns_type_operations net_ns_type_operations;
4012
4013 const char *netdev_drivername(const struct net_device *dev);
4014
4015 void linkwatch_run_queue(void);
4016
4017 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4018 netdev_features_t f2)
4019 {
4020 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4021 if (f1 & NETIF_F_HW_CSUM)
4022 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4023 else
4024 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4025 }
4026
4027 return f1 & f2;
4028 }
4029
4030 static inline netdev_features_t netdev_get_wanted_features(
4031 struct net_device *dev)
4032 {
4033 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4034 }
4035 netdev_features_t netdev_increment_features(netdev_features_t all,
4036 netdev_features_t one, netdev_features_t mask);
4037
4038 /* Allow TSO being used on stacked device :
4039 * Performing the GSO segmentation before last device
4040 * is a performance improvement.
4041 */
4042 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4043 netdev_features_t mask)
4044 {
4045 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4046 }
4047
4048 int __netdev_update_features(struct net_device *dev);
4049 void netdev_update_features(struct net_device *dev);
4050 void netdev_change_features(struct net_device *dev);
4051
4052 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4053 struct net_device *dev);
4054
4055 netdev_features_t passthru_features_check(struct sk_buff *skb,
4056 struct net_device *dev,
4057 netdev_features_t features);
4058 netdev_features_t netif_skb_features(struct sk_buff *skb);
4059
4060 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4061 {
4062 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4063
4064 /* check flags correspondence */
4065 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4066 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
4067 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4068 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4069 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4070 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4071 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4072 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4073 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4074 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4075 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4076 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4077 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4078 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4079 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4080 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4081
4082 return (features & feature) == feature;
4083 }
4084
4085 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4086 {
4087 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4088 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4089 }
4090
4091 static inline bool netif_needs_gso(struct sk_buff *skb,
4092 netdev_features_t features)
4093 {
4094 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4095 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4096 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4097 }
4098
4099 static inline void netif_set_gso_max_size(struct net_device *dev,
4100 unsigned int size)
4101 {
4102 dev->gso_max_size = size;
4103 }
4104
4105 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4106 int pulled_hlen, u16 mac_offset,
4107 int mac_len)
4108 {
4109 skb->protocol = protocol;
4110 skb->encapsulation = 1;
4111 skb_push(skb, pulled_hlen);
4112 skb_reset_transport_header(skb);
4113 skb->mac_header = mac_offset;
4114 skb->network_header = skb->mac_header + mac_len;
4115 skb->mac_len = mac_len;
4116 }
4117
4118 static inline bool netif_is_macsec(const struct net_device *dev)
4119 {
4120 return dev->priv_flags & IFF_MACSEC;
4121 }
4122
4123 static inline bool netif_is_macvlan(const struct net_device *dev)
4124 {
4125 return dev->priv_flags & IFF_MACVLAN;
4126 }
4127
4128 static inline bool netif_is_macvlan_port(const struct net_device *dev)
4129 {
4130 return dev->priv_flags & IFF_MACVLAN_PORT;
4131 }
4132
4133 static inline bool netif_is_ipvlan(const struct net_device *dev)
4134 {
4135 return dev->priv_flags & IFF_IPVLAN_SLAVE;
4136 }
4137
4138 static inline bool netif_is_ipvlan_port(const struct net_device *dev)
4139 {
4140 return dev->priv_flags & IFF_IPVLAN_MASTER;
4141 }
4142
4143 static inline bool netif_is_bond_master(const struct net_device *dev)
4144 {
4145 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4146 }
4147
4148 static inline bool netif_is_bond_slave(const struct net_device *dev)
4149 {
4150 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4151 }
4152
4153 static inline bool netif_supports_nofcs(struct net_device *dev)
4154 {
4155 return dev->priv_flags & IFF_SUPP_NOFCS;
4156 }
4157
4158 static inline bool netif_is_l3_master(const struct net_device *dev)
4159 {
4160 return dev->priv_flags & IFF_L3MDEV_MASTER;
4161 }
4162
4163 static inline bool netif_is_l3_slave(const struct net_device *dev)
4164 {
4165 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4166 }
4167
4168 static inline bool netif_is_bridge_master(const struct net_device *dev)
4169 {
4170 return dev->priv_flags & IFF_EBRIDGE;
4171 }
4172
4173 static inline bool netif_is_bridge_port(const struct net_device *dev)
4174 {
4175 return dev->priv_flags & IFF_BRIDGE_PORT;
4176 }
4177
4178 static inline bool netif_is_ovs_master(const struct net_device *dev)
4179 {
4180 return dev->priv_flags & IFF_OPENVSWITCH;
4181 }
4182
4183 static inline bool netif_is_team_master(const struct net_device *dev)
4184 {
4185 return dev->priv_flags & IFF_TEAM;
4186 }
4187
4188 static inline bool netif_is_team_port(const struct net_device *dev)
4189 {
4190 return dev->priv_flags & IFF_TEAM_PORT;
4191 }
4192
4193 static inline bool netif_is_lag_master(const struct net_device *dev)
4194 {
4195 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4196 }
4197
4198 static inline bool netif_is_lag_port(const struct net_device *dev)
4199 {
4200 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4201 }
4202
4203 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4204 {
4205 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4206 }
4207
4208 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4209 static inline void netif_keep_dst(struct net_device *dev)
4210 {
4211 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4212 }
4213
4214 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
4215 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4216 {
4217 /* TODO: reserve and use an additional IFF bit, if we get more users */
4218 return dev->priv_flags & IFF_MACSEC;
4219 }
4220
4221 extern struct pernet_operations __net_initdata loopback_net_ops;
4222
4223 /* Logging, debugging and troubleshooting/diagnostic helpers. */
4224
4225 /* netdev_printk helpers, similar to dev_printk */
4226
4227 static inline const char *netdev_name(const struct net_device *dev)
4228 {
4229 if (!dev->name[0] || strchr(dev->name, '%'))
4230 return "(unnamed net_device)";
4231 return dev->name;
4232 }
4233
4234 static inline const char *netdev_reg_state(const struct net_device *dev)
4235 {
4236 switch (dev->reg_state) {
4237 case NETREG_UNINITIALIZED: return " (uninitialized)";
4238 case NETREG_REGISTERED: return "";
4239 case NETREG_UNREGISTERING: return " (unregistering)";
4240 case NETREG_UNREGISTERED: return " (unregistered)";
4241 case NETREG_RELEASED: return " (released)";
4242 case NETREG_DUMMY: return " (dummy)";
4243 }
4244
4245 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4246 return " (unknown)";
4247 }
4248
4249 __printf(3, 4)
4250 void netdev_printk(const char *level, const struct net_device *dev,
4251 const char *format, ...);
4252 __printf(2, 3)
4253 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4254 __printf(2, 3)
4255 void netdev_alert(const struct net_device *dev, const char *format, ...);
4256 __printf(2, 3)
4257 void netdev_crit(const struct net_device *dev, const char *format, ...);
4258 __printf(2, 3)
4259 void netdev_err(const struct net_device *dev, const char *format, ...);
4260 __printf(2, 3)
4261 void netdev_warn(const struct net_device *dev, const char *format, ...);
4262 __printf(2, 3)
4263 void netdev_notice(const struct net_device *dev, const char *format, ...);
4264 __printf(2, 3)
4265 void netdev_info(const struct net_device *dev, const char *format, ...);
4266
4267 #define MODULE_ALIAS_NETDEV(device) \
4268 MODULE_ALIAS("netdev-" device)
4269
4270 #if defined(CONFIG_DYNAMIC_DEBUG)
4271 #define netdev_dbg(__dev, format, args...) \
4272 do { \
4273 dynamic_netdev_dbg(__dev, format, ##args); \
4274 } while (0)
4275 #elif defined(DEBUG)
4276 #define netdev_dbg(__dev, format, args...) \
4277 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4278 #else
4279 #define netdev_dbg(__dev, format, args...) \
4280 ({ \
4281 if (0) \
4282 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4283 })
4284 #endif
4285
4286 #if defined(VERBOSE_DEBUG)
4287 #define netdev_vdbg netdev_dbg
4288 #else
4289
4290 #define netdev_vdbg(dev, format, args...) \
4291 ({ \
4292 if (0) \
4293 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4294 0; \
4295 })
4296 #endif
4297
4298 /*
4299 * netdev_WARN() acts like dev_printk(), but with the key difference
4300 * of using a WARN/WARN_ON to get the message out, including the
4301 * file/line information and a backtrace.
4302 */
4303 #define netdev_WARN(dev, format, args...) \
4304 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
4305 netdev_reg_state(dev), ##args)
4306
4307 /* netif printk helpers, similar to netdev_printk */
4308
4309 #define netif_printk(priv, type, level, dev, fmt, args...) \
4310 do { \
4311 if (netif_msg_##type(priv)) \
4312 netdev_printk(level, (dev), fmt, ##args); \
4313 } while (0)
4314
4315 #define netif_level(level, priv, type, dev, fmt, args...) \
4316 do { \
4317 if (netif_msg_##type(priv)) \
4318 netdev_##level(dev, fmt, ##args); \
4319 } while (0)
4320
4321 #define netif_emerg(priv, type, dev, fmt, args...) \
4322 netif_level(emerg, priv, type, dev, fmt, ##args)
4323 #define netif_alert(priv, type, dev, fmt, args...) \
4324 netif_level(alert, priv, type, dev, fmt, ##args)
4325 #define netif_crit(priv, type, dev, fmt, args...) \
4326 netif_level(crit, priv, type, dev, fmt, ##args)
4327 #define netif_err(priv, type, dev, fmt, args...) \
4328 netif_level(err, priv, type, dev, fmt, ##args)
4329 #define netif_warn(priv, type, dev, fmt, args...) \
4330 netif_level(warn, priv, type, dev, fmt, ##args)
4331 #define netif_notice(priv, type, dev, fmt, args...) \
4332 netif_level(notice, priv, type, dev, fmt, ##args)
4333 #define netif_info(priv, type, dev, fmt, args...) \
4334 netif_level(info, priv, type, dev, fmt, ##args)
4335
4336 #if defined(CONFIG_DYNAMIC_DEBUG)
4337 #define netif_dbg(priv, type, netdev, format, args...) \
4338 do { \
4339 if (netif_msg_##type(priv)) \
4340 dynamic_netdev_dbg(netdev, format, ##args); \
4341 } while (0)
4342 #elif defined(DEBUG)
4343 #define netif_dbg(priv, type, dev, format, args...) \
4344 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4345 #else
4346 #define netif_dbg(priv, type, dev, format, args...) \
4347 ({ \
4348 if (0) \
4349 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4350 0; \
4351 })
4352 #endif
4353
4354 #if defined(VERBOSE_DEBUG)
4355 #define netif_vdbg netif_dbg
4356 #else
4357 #define netif_vdbg(priv, type, dev, format, args...) \
4358 ({ \
4359 if (0) \
4360 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4361 0; \
4362 })
4363 #endif
4364
4365 /*
4366 * The list of packet types we will receive (as opposed to discard)
4367 * and the routines to invoke.
4368 *
4369 * Why 16. Because with 16 the only overlap we get on a hash of the
4370 * low nibble of the protocol value is RARP/SNAP/X.25.
4371 *
4372 * NOTE: That is no longer true with the addition of VLAN tags. Not
4373 * sure which should go first, but I bet it won't make much
4374 * difference if we are running VLANs. The good news is that
4375 * this protocol won't be in the list unless compiled in, so
4376 * the average user (w/out VLANs) will not be adversely affected.
4377 * --BLG
4378 *
4379 * 0800 IP
4380 * 8100 802.1Q VLAN
4381 * 0001 802.3
4382 * 0002 AX.25
4383 * 0004 802.2
4384 * 8035 RARP
4385 * 0005 SNAP
4386 * 0805 X.25
4387 * 0806 ARP
4388 * 8137 IPX
4389 * 0009 Localtalk
4390 * 86DD IPv6
4391 */
4392 #define PTYPE_HASH_SIZE (16)
4393 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4394
4395 #endif /* _LINUX_NETDEVICE_H */ 1 /*
2 * pci.h
3 *
4 * PCI defines and function prototypes
5 * Copyright 1994, Drew Eckhardt
6 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
7 *
8 * For more information, please consult the following manuals (look at
9 * http://www.pcisig.com/ for how to get them):
10 *
11 * PCI BIOS Specification
12 * PCI Local Bus Specification
13 * PCI to PCI Bridge Specification
14 * PCI System Design Guide
15 */
16 #ifndef LINUX_PCI_H
17 #define LINUX_PCI_H
18
19
20 #include <linux/mod_devicetable.h>
21
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/list.h>
26 #include <linux/compiler.h>
27 #include <linux/errno.h>
28 #include <linux/kobject.h>
29 #include <linux/atomic.h>
30 #include <linux/device.h>
31 #include <linux/io.h>
32 #include <linux/resource_ext.h>
33 #include <uapi/linux/pci.h>
34
35 #include <linux/pci_ids.h>
36
37 /*
38 * The PCI interface treats multi-function devices as independent
39 * devices. The slot/function address of each device is encoded
40 * in a single byte as follows:
41 *
42 * 7:3 = slot
43 * 2:0 = function
44 *
45 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
46 * In the interest of not exposing interfaces to user-space unnecessarily,
47 * the following kernel-only defines are being added here.
48 */
49 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
50 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
51 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
52
53 /* pci_slot represents a physical slot */
54 struct pci_slot {
55 struct pci_bus *bus; /* The bus this slot is on */
56 struct list_head list; /* node in list of slots on this bus */
57 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
58 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
59 struct kobject kobj;
60 };
61
62 static inline const char *pci_slot_name(const struct pci_slot *slot)
63 {
64 return kobject_name(&slot->kobj);
65 }
66
67 /* File state for mmap()s on /proc/bus/pci/X/Y */
68 enum pci_mmap_state {
69 pci_mmap_io,
70 pci_mmap_mem
71 };
72
73 /*
74 * For PCI devices, the region numbers are assigned this way:
75 */
76 enum {
77 /* #0-5: standard PCI resources */
78 PCI_STD_RESOURCES,
79 PCI_STD_RESOURCE_END = 5,
80
81 /* #6: expansion ROM resource */
82 PCI_ROM_RESOURCE,
83
84 /* device specific resources */
85 #ifdef CONFIG_PCI_IOV
86 PCI_IOV_RESOURCES,
87 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
88 #endif
89
90 /* resources assigned to buses behind the bridge */
91 #define PCI_BRIDGE_RESOURCE_NUM 4
92
93 PCI_BRIDGE_RESOURCES,
94 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
95 PCI_BRIDGE_RESOURCE_NUM - 1,
96
97 /* total resources associated with a PCI device */
98 PCI_NUM_RESOURCES,
99
100 /* preserve this for compatibility */
101 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
102 };
103
104 /*
105 * pci_power_t values must match the bits in the Capabilities PME_Support
106 * and Control/Status PowerState fields in the Power Management capability.
107 */
108 typedef int __bitwise pci_power_t;
109
110 #define PCI_D0 ((pci_power_t __force) 0)
111 #define PCI_D1 ((pci_power_t __force) 1)
112 #define PCI_D2 ((pci_power_t __force) 2)
113 #define PCI_D3hot ((pci_power_t __force) 3)
114 #define PCI_D3cold ((pci_power_t __force) 4)
115 #define PCI_UNKNOWN ((pci_power_t __force) 5)
116 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
117
118 /* Remember to update this when the list above changes! */
119 extern const char *pci_power_names[];
120
121 static inline const char *pci_power_name(pci_power_t state)
122 {
123 return pci_power_names[1 + (__force int) state];
124 }
125
126 #define PCI_PM_D2_DELAY 200
127 #define PCI_PM_D3_WAIT 10
128 #define PCI_PM_D3COLD_WAIT 100
129 #define PCI_PM_BUS_WAIT 50
130
131 /** The pci_channel state describes connectivity between the CPU and
132 * the pci device. If some PCI bus between here and the pci device
133 * has crashed or locked up, this info is reflected here.
134 */
135 typedef unsigned int __bitwise pci_channel_state_t;
136
137 enum pci_channel_state {
138 /* I/O channel is in normal state */
139 pci_channel_io_normal = (__force pci_channel_state_t) 1,
140
141 /* I/O to channel is blocked */
142 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
143
144 /* PCI card is dead */
145 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
146 };
147
148 typedef unsigned int __bitwise pcie_reset_state_t;
149
150 enum pcie_reset_state {
151 /* Reset is NOT asserted (Use to deassert reset) */
152 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
153
154 /* Use #PERST to reset PCIe device */
155 pcie_warm_reset = (__force pcie_reset_state_t) 2,
156
157 /* Use PCIe Hot Reset to reset device */
158 pcie_hot_reset = (__force pcie_reset_state_t) 3
159 };
160
161 typedef unsigned short __bitwise pci_dev_flags_t;
162 enum pci_dev_flags {
163 /* INTX_DISABLE in PCI_COMMAND register disables MSI
164 * generation too.
165 */
166 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
167 /* Device configuration is irrevocably lost if disabled into D3 */
168 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
169 /* Provide indication device is assigned by a Virtual Machine Manager */
170 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
171 /* Flag for quirk use to store if quirk-specific ACS is enabled */
172 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
173 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
174 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
175 /* Do not use bus resets for device */
176 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
177 /* Do not use PM reset even if device advertises NoSoftRst- */
178 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
179 /* Get VPD from function 0 VPD */
180 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
181 };
182
183 enum pci_irq_reroute_variant {
184 INTEL_IRQ_REROUTE_VARIANT = 1,
185 MAX_IRQ_REROUTE_VARIANTS = 3
186 };
187
188 typedef unsigned short __bitwise pci_bus_flags_t;
189 enum pci_bus_flags {
190 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
191 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
192 };
193
194 /* These values come from the PCI Express Spec */
195 enum pcie_link_width {
196 PCIE_LNK_WIDTH_RESRV = 0x00,
197 PCIE_LNK_X1 = 0x01,
198 PCIE_LNK_X2 = 0x02,
199 PCIE_LNK_X4 = 0x04,
200 PCIE_LNK_X8 = 0x08,
201 PCIE_LNK_X12 = 0x0C,
202 PCIE_LNK_X16 = 0x10,
203 PCIE_LNK_X32 = 0x20,
204 PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
205 };
206
207 /* Based on the PCI Hotplug Spec, but some values are made up by us */
208 enum pci_bus_speed {
209 PCI_SPEED_33MHz = 0x00,
210 PCI_SPEED_66MHz = 0x01,
211 PCI_SPEED_66MHz_PCIX = 0x02,
212 PCI_SPEED_100MHz_PCIX = 0x03,
213 PCI_SPEED_133MHz_PCIX = 0x04,
214 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
215 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
216 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
217 PCI_SPEED_66MHz_PCIX_266 = 0x09,
218 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
219 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
220 AGP_UNKNOWN = 0x0c,
221 AGP_1X = 0x0d,
222 AGP_2X = 0x0e,
223 AGP_4X = 0x0f,
224 AGP_8X = 0x10,
225 PCI_SPEED_66MHz_PCIX_533 = 0x11,
226 PCI_SPEED_100MHz_PCIX_533 = 0x12,
227 PCI_SPEED_133MHz_PCIX_533 = 0x13,
228 PCIE_SPEED_2_5GT = 0x14,
229 PCIE_SPEED_5_0GT = 0x15,
230 PCIE_SPEED_8_0GT = 0x16,
231 PCI_SPEED_UNKNOWN = 0xff,
232 };
233
234 struct pci_cap_saved_data {
235 u16 cap_nr;
236 bool cap_extended;
237 unsigned int size;
238 u32 data[0];
239 };
240
241 struct pci_cap_saved_state {
242 struct hlist_node next;
243 struct pci_cap_saved_data cap;
244 };
245
246 struct pcie_link_state;
247 struct pci_vpd;
248 struct pci_sriov;
249 struct pci_ats;
250
251 /*
252 * The pci_dev structure is used to describe PCI devices.
253 */
254 struct pci_dev {
255 struct list_head bus_list; /* node in per-bus list */
256 struct pci_bus *bus; /* bus this device is on */
257 struct pci_bus *subordinate; /* bus this device bridges to */
258
259 void *sysdata; /* hook for sys-specific extension */
260 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
261 struct pci_slot *slot; /* Physical slot this device is in */
262
263 unsigned int devfn; /* encoded device & function index */
264 unsigned short vendor;
265 unsigned short device;
266 unsigned short subsystem_vendor;
267 unsigned short subsystem_device;
268 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
269 u8 revision; /* PCI revision, low byte of class word */
270 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
271 u8 pcie_cap; /* PCIe capability offset */
272 u8 msi_cap; /* MSI capability offset */
273 u8 msix_cap; /* MSI-X capability offset */
274 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
275 u8 rom_base_reg; /* which config register controls the ROM */
276 u8 pin; /* which interrupt pin this device uses */
277 u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
278 unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
279
280 struct pci_driver *driver; /* which driver has allocated this device */
281 u64 dma_mask; /* Mask of the bits of bus address this
282 device implements. Normally this is
283 0xffffffff. You only need to change
284 this if your device has broken DMA
285 or supports 64-bit transfers. */
286
287 struct device_dma_parameters dma_parms;
288
289 pci_power_t current_state; /* Current operating state. In ACPI-speak,
290 this is D0-D3, D0 being fully functional,
291 and D3 being off. */
292 u8 pm_cap; /* PM capability offset */
293 unsigned int pme_support:5; /* Bitmask of states from which PME#
294 can be generated */
295 unsigned int pme_interrupt:1;
296 unsigned int pme_poll:1; /* Poll device's PME status bit */
297 unsigned int d1_support:1; /* Low power state D1 is supported */
298 unsigned int d2_support:1; /* Low power state D2 is supported */
299 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
300 unsigned int no_d3cold:1; /* D3cold is forbidden */
301 unsigned int bridge_d3:1; /* Allow D3 for bridge */
302 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
303 unsigned int mmio_always_on:1; /* disallow turning off io/mem
304 decoding during bar sizing */
305 unsigned int wakeup_prepared:1;
306 unsigned int runtime_d3cold:1; /* whether go through runtime
307 D3cold, not set for devices
308 powered on/off by the
309 corresponding bridge */
310 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
311 unsigned int d3_delay; /* D3->D0 transition time in ms */
312 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
313
314 #ifdef CONFIG_PCIEASPM
315 struct pcie_link_state *link_state; /* ASPM link state */
316 #endif
317
318 pci_channel_state_t error_state; /* current connectivity state */
319 struct device dev; /* Generic device interface */
320
321 int cfg_size; /* Size of configuration space */
322
323 /*
324 * Instead of touching interrupt line and base address registers
325 * directly, use the values stored here. They might be different!
326 */
327 unsigned int irq;
328 struct cpumask *irq_affinity;
329 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
330
331 bool match_driver; /* Skip attaching driver */
332 /* These fields are used by common fixups */
333 unsigned int transparent:1; /* Subtractive decode PCI bridge */
334 unsigned int multifunction:1;/* Part of multi-function device */
335 /* keep track of device state */
336 unsigned int is_added:1;
337 unsigned int is_busmaster:1; /* device is busmaster */
338 unsigned int no_msi:1; /* device may not use msi */
339 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
340 unsigned int block_cfg_access:1; /* config space access is blocked */
341 unsigned int broken_parity_status:1; /* Device generates false positive parity */
342 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
343 unsigned int msi_enabled:1;
344 unsigned int msix_enabled:1;
345 unsigned int ari_enabled:1; /* ARI forwarding */
346 unsigned int ats_enabled:1; /* Address Translation Service */
347 unsigned int is_managed:1;
348 unsigned int needs_freset:1; /* Dev requires fundamental reset */
349 unsigned int state_saved:1;
350 unsigned int is_physfn:1;
351 unsigned int is_virtfn:1;
352 unsigned int reset_fn:1;
353 unsigned int is_hotplug_bridge:1;
354 unsigned int __aer_firmware_first_valid:1;
355 unsigned int __aer_firmware_first:1;
356 unsigned int broken_intx_masking:1;
357 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
358 unsigned int irq_managed:1;
359 unsigned int has_secondary_link:1;
360 unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
361 pci_dev_flags_t dev_flags;
362 atomic_t enable_cnt; /* pci_enable_device has been called */
363
364 u32 saved_config_space[16]; /* config space saved at suspend time */
365 struct hlist_head saved_cap_space;
366 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
367 int rom_attr_enabled; /* has display of the rom attribute been enabled? */
368 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
369 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
370 #ifdef CONFIG_PCI_MSI
371 const struct attribute_group **msi_irq_groups;
372 #endif
373 struct pci_vpd *vpd;
374 #ifdef CONFIG_PCI_ATS
375 union {
376 struct pci_sriov *sriov; /* SR-IOV capability related */
377 struct pci_dev *physfn; /* the PF this VF is associated with */
378 };
379 u16 ats_cap; /* ATS Capability offset */
380 u8 ats_stu; /* ATS Smallest Translation Unit */
381 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
382 #endif
383 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
384 size_t romlen; /* Length of ROM if it's not from the BAR */
385 char *driver_override; /* Driver name to force a match */
386 };
387
388 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
389 {
390 #ifdef CONFIG_PCI_IOV
391 if (dev->is_virtfn)
392 dev = dev->physfn;
393 #endif
394 return dev;
395 }
396
397 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
398
399 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
400 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
401
402 static inline int pci_channel_offline(struct pci_dev *pdev)
403 {
404 return (pdev->error_state != pci_channel_io_normal);
405 }
406
407 struct pci_host_bridge {
408 struct device dev;
409 struct pci_bus *bus; /* root bus */
410 struct list_head windows; /* resource_entry */
411 void (*release_fn)(struct pci_host_bridge *);
412 void *release_data;
413 unsigned int ignore_reset_delay:1; /* for entire hierarchy */
414 /* Resource alignment requirements */
415 resource_size_t (*align_resource)(struct pci_dev *dev,
416 const struct resource *res,
417 resource_size_t start,
418 resource_size_t size,
419 resource_size_t align);
420 };
421
422 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
423
424 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
425
426 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
427 void (*release_fn)(struct pci_host_bridge *),
428 void *release_data);
429
430 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
431
432 /*
433 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
434 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
435 * buses below host bridges or subtractive decode bridges) go in the list.
436 * Use pci_bus_for_each_resource() to iterate through all the resources.
437 */
438
439 /*
440 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
441 * and there's no way to program the bridge with the details of the window.
442 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
443 * decode bit set, because they are explicit and can be programmed with _SRS.
444 */
445 #define PCI_SUBTRACTIVE_DECODE 0x1
446
447 struct pci_bus_resource {
448 struct list_head list;
449 struct resource *res;
450 unsigned int flags;
451 };
452
453 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
454
455 struct pci_bus {
456 struct list_head node; /* node in list of buses */
457 struct pci_bus *parent; /* parent bus this bridge is on */
458 struct list_head children; /* list of child buses */
459 struct list_head devices; /* list of devices on this bus */
460 struct pci_dev *self; /* bridge device as seen by parent */
461 struct list_head slots; /* list of slots on this bus;
462 protected by pci_slot_mutex */
463 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
464 struct list_head resources; /* address space routed to this bus */
465 struct resource busn_res; /* bus numbers routed to this bus */
466
467 struct pci_ops *ops; /* configuration access functions */
468 struct msi_controller *msi; /* MSI controller */
469 void *sysdata; /* hook for sys-specific extension */
470 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
471
472 unsigned char number; /* bus number */
473 unsigned char primary; /* number of primary bridge */
474 unsigned char max_bus_speed; /* enum pci_bus_speed */
475 unsigned char cur_bus_speed; /* enum pci_bus_speed */
476 #ifdef CONFIG_PCI_DOMAINS_GENERIC
477 int domain_nr;
478 #endif
479
480 char name[48];
481
482 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
483 pci_bus_flags_t bus_flags; /* inherited by child buses */
484 struct device *bridge;
485 struct device dev;
486 struct bin_attribute *legacy_io; /* legacy I/O for this bus */
487 struct bin_attribute *legacy_mem; /* legacy mem */
488 unsigned int is_added:1;
489 };
490
491 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
492
493 /*
494 * Returns true if the PCI bus is root (behind host-PCI bridge),
495 * false otherwise
496 *
497 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
498 * This is incorrect because "virtual" buses added for SR-IOV (via
499 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
500 */
501 static inline bool pci_is_root_bus(struct pci_bus *pbus)
502 {
503 return !(pbus->parent);
504 }
505
506 /**
507 * pci_is_bridge - check if the PCI device is a bridge
508 * @dev: PCI device
509 *
510 * Return true if the PCI device is bridge whether it has subordinate
511 * or not.
512 */
513 static inline bool pci_is_bridge(struct pci_dev *dev)
514 {
515 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
516 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
517 }
518
519 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
520 {
521 dev = pci_physfn(dev);
522 if (pci_is_root_bus(dev->bus))
523 return NULL;
524
525 return dev->bus->self;
526 }
527
528 struct device *pci_get_host_bridge_device(struct pci_dev *dev);
529 void pci_put_host_bridge_device(struct device *dev);
530
531 #ifdef CONFIG_PCI_MSI
532 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
533 {
534 return pci_dev->msi_enabled || pci_dev->msix_enabled;
535 }
536 #else
537 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
538 #endif
539
540 /*
541 * Error values that may be returned by PCI functions.
542 */
543 #define PCIBIOS_SUCCESSFUL 0x00
544 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
545 #define PCIBIOS_BAD_VENDOR_ID 0x83
546 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
547 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
548 #define PCIBIOS_SET_FAILED 0x88
549 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
550
551 /*
552 * Translate above to generic errno for passing back through non-PCI code.
553 */
554 static inline int pcibios_err_to_errno(int err)
555 {
556 if (err <= PCIBIOS_SUCCESSFUL)
557 return err; /* Assume already errno */
558
559 switch (err) {
560 case PCIBIOS_FUNC_NOT_SUPPORTED:
561 return -ENOENT;
562 case PCIBIOS_BAD_VENDOR_ID:
563 return -ENOTTY;
564 case PCIBIOS_DEVICE_NOT_FOUND:
565 return -ENODEV;
566 case PCIBIOS_BAD_REGISTER_NUMBER:
567 return -EFAULT;
568 case PCIBIOS_SET_FAILED:
569 return -EIO;
570 case PCIBIOS_BUFFER_TOO_SMALL:
571 return -ENOSPC;
572 }
573
574 return -ERANGE;
575 }
576
577 /* Low-level architecture-dependent routines */
578
579 struct pci_ops {
580 int (*add_bus)(struct pci_bus *bus);
581 void (*remove_bus)(struct pci_bus *bus);
582 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
583 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
584 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
585 };
586
587 /*
588 * ACPI needs to be able to access PCI config space before we've done a
589 * PCI bus scan and created pci_bus structures.
590 */
591 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
592 int reg, int len, u32 *val);
593 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
594 int reg, int len, u32 val);
595
596 #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
597 typedef u64 pci_bus_addr_t;
598 #else
599 typedef u32 pci_bus_addr_t;
600 #endif
601
602 struct pci_bus_region {
603 pci_bus_addr_t start;
604 pci_bus_addr_t end;
605 };
606
607 struct pci_dynids {
608 spinlock_t lock; /* protects list, index */
609 struct list_head list; /* for IDs added at runtime */
610 };
611
612
613 /*
614 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
615 * a set of callbacks in struct pci_error_handlers, that device driver
616 * will be notified of PCI bus errors, and will be driven to recovery
617 * when an error occurs.
618 */
619
620 typedef unsigned int __bitwise pci_ers_result_t;
621
622 enum pci_ers_result {
623 /* no result/none/not supported in device driver */
624 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
625
626 /* Device driver can recover without slot reset */
627 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
628
629 /* Device driver wants slot to be reset. */
630 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
631
632 /* Device has completely failed, is unrecoverable */
633 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
634
635 /* Device driver is fully recovered and operational */
636 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
637
638 /* No AER capabilities registered for the driver */
639 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
640 };
641
642 /* PCI bus error event callbacks */
643 struct pci_error_handlers {
644 /* PCI bus error detected on this device */
645 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
646 enum pci_channel_state error);
647
648 /* MMIO has been re-enabled, but not DMA */
649 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
650
651 /* PCI Express link has been reset */
652 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
653
654 /* PCI slot has been reset */
655 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
656
657 /* PCI function reset prepare or completed */
658 void (*reset_notify)(struct pci_dev *dev, bool prepare);
659
660 /* Device driver may resume normal operations */
661 void (*resume)(struct pci_dev *dev);
662 };
663
664
665 struct module;
666 struct pci_driver {
667 struct list_head node;
668 const char *name;
669 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
670 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
671 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
672 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
673 int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
674 int (*resume_early) (struct pci_dev *dev);
675 int (*resume) (struct pci_dev *dev); /* Device woken up */
676 void (*shutdown) (struct pci_dev *dev);
677 int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
678 const struct pci_error_handlers *err_handler;
679 struct device_driver driver;
680 struct pci_dynids dynids;
681 };
682
683 #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
684
685 /**
686 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
687 * @_table: device table name
688 *
689 * This macro is deprecated and should not be used in new code.
690 */
691 #define DEFINE_PCI_DEVICE_TABLE(_table) \
692 const struct pci_device_id _table[]
693
694 /**
695 * PCI_DEVICE - macro used to describe a specific pci device
696 * @vend: the 16 bit PCI Vendor ID
697 * @dev: the 16 bit PCI Device ID
698 *
699 * This macro is used to create a struct pci_device_id that matches a
700 * specific device. The subvendor and subdevice fields will be set to
701 * PCI_ANY_ID.
702 */
703 #define PCI_DEVICE(vend,dev) \
704 .vendor = (vend), .device = (dev), \
705 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
706
707 /**
708 * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
709 * @vend: the 16 bit PCI Vendor ID
710 * @dev: the 16 bit PCI Device ID
711 * @subvend: the 16 bit PCI Subvendor ID
712 * @subdev: the 16 bit PCI Subdevice ID
713 *
714 * This macro is used to create a struct pci_device_id that matches a
715 * specific device with subsystem information.
716 */
717 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
718 .vendor = (vend), .device = (dev), \
719 .subvendor = (subvend), .subdevice = (subdev)
720
721 /**
722 * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
723 * @dev_class: the class, subclass, prog-if triple for this device
724 * @dev_class_mask: the class mask for this device
725 *
726 * This macro is used to create a struct pci_device_id that matches a
727 * specific PCI class. The vendor, device, subvendor, and subdevice
728 * fields will be set to PCI_ANY_ID.
729 */
730 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
731 .class = (dev_class), .class_mask = (dev_class_mask), \
732 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
733 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
734
735 /**
736 * PCI_VDEVICE - macro used to describe a specific pci device in short form
737 * @vend: the vendor name
738 * @dev: the 16 bit PCI Device ID
739 *
740 * This macro is used to create a struct pci_device_id that matches a
741 * specific PCI device. The subvendor, and subdevice fields will be set
742 * to PCI_ANY_ID. The macro allows the next field to follow as the device
743 * private data.
744 */
745
746 #define PCI_VDEVICE(vend, dev) \
747 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
748 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
749
750 enum {
751 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
752 PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
753 PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
754 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
755 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
756 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
757 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
758 };
759
760 /* these external functions are only available when PCI support is enabled */
761 #ifdef CONFIG_PCI
762
763 extern unsigned int pci_flags;
764
765 static inline void pci_set_flags(int flags) { pci_flags = flags; }
766 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
767 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
768 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
769
770 void pcie_bus_configure_settings(struct pci_bus *bus);
771
772 enum pcie_bus_config_types {
773 PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
774 PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
775 PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
776 PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
777 PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
778 };
779
780 extern enum pcie_bus_config_types pcie_bus_config;
781
782 extern struct bus_type pci_bus_type;
783
784 /* Do NOT directly access these two variables, unless you are arch-specific PCI
785 * code, or PCI core code. */
786 extern struct list_head pci_root_buses; /* list of all known PCI buses */
787 /* Some device drivers need know if PCI is initiated */
788 int no_pci_devices(void);
789
790 void pcibios_resource_survey_bus(struct pci_bus *bus);
791 void pcibios_bus_add_device(struct pci_dev *pdev);
792 void pcibios_add_bus(struct pci_bus *bus);
793 void pcibios_remove_bus(struct pci_bus *bus);
794 void pcibios_fixup_bus(struct pci_bus *);
795 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
796 /* Architecture-specific versions may override this (weak) */
797 char *pcibios_setup(char *str);
798
799 /* Used only when drivers/pci/setup.c is used */
800 resource_size_t pcibios_align_resource(void *, const struct resource *,
801 resource_size_t,
802 resource_size_t);
803 void pcibios_update_irq(struct pci_dev *, int irq);
804
805 /* Weak but can be overriden by arch */
806 void pci_fixup_cardbus(struct pci_bus *);
807
808 /* Generic PCI functions used internally */
809
810 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
811 struct resource *res);
812 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
813 struct pci_bus_region *region);
814 void pcibios_scan_specific_bus(int busn);
815 struct pci_bus *pci_find_bus(int domain, int busnr);
816 void pci_bus_add_devices(const struct pci_bus *bus);
817 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
818 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
819 struct pci_ops *ops, void *sysdata,
820 struct list_head *resources);
821 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
822 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
823 void pci_bus_release_busn_res(struct pci_bus *b);
824 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
825 struct pci_ops *ops, void *sysdata,
826 struct list_head *resources,
827 struct msi_controller *msi);
828 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
829 struct pci_ops *ops, void *sysdata,
830 struct list_head *resources);
831 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
832 int busnr);
833 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
834 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
835 const char *name,
836 struct hotplug_slot *hotplug);
837 void pci_destroy_slot(struct pci_slot *slot);
838 #ifdef CONFIG_SYSFS
839 void pci_dev_assign_slot(struct pci_dev *dev);
840 #else
841 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
842 #endif
843 int pci_scan_slot(struct pci_bus *bus, int devfn);
844 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
845 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
846 unsigned int pci_scan_child_bus(struct pci_bus *bus);
847 void pci_bus_add_device(struct pci_dev *dev);
848 void pci_read_bridge_bases(struct pci_bus *child);
849 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
850 struct resource *res);
851 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
852 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
853 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
854 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
855 struct pci_dev *pci_dev_get(struct pci_dev *dev);
856 void pci_dev_put(struct pci_dev *dev);
857 void pci_remove_bus(struct pci_bus *b);
858 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
859 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
860 void pci_stop_root_bus(struct pci_bus *bus);
861 void pci_remove_root_bus(struct pci_bus *bus);
862 void pci_setup_cardbus(struct pci_bus *bus);
863 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
864 void pci_sort_breadthfirst(void);
865 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
866 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
867 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
868
869 /* Generic PCI functions exported to card drivers */
870
871 enum pci_lost_interrupt_reason {
872 PCI_LOST_IRQ_NO_INFORMATION = 0,
873 PCI_LOST_IRQ_DISABLE_MSI,
874 PCI_LOST_IRQ_DISABLE_MSIX,
875 PCI_LOST_IRQ_DISABLE_ACPI,
876 };
877 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
878 int pci_find_capability(struct pci_dev *dev, int cap);
879 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
880 int pci_find_ext_capability(struct pci_dev *dev, int cap);
881 int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
882 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
883 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
884 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
885
886 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
887 struct pci_dev *from);
888 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
889 unsigned int ss_vendor, unsigned int ss_device,
890 struct pci_dev *from);
891 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
892 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
893 unsigned int devfn);
894 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
895 unsigned int devfn)
896 {
897 return pci_get_domain_bus_and_slot(0, bus, devfn);
898 }
899 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
900 int pci_dev_present(const struct pci_device_id *ids);
901
902 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
903 int where, u8 *val);
904 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
905 int where, u16 *val);
906 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
907 int where, u32 *val);
908 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
909 int where, u8 val);
910 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
911 int where, u16 val);
912 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
913 int where, u32 val);
914
915 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
916 int where, int size, u32 *val);
917 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
918 int where, int size, u32 val);
919 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
920 int where, int size, u32 *val);
921 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
922 int where, int size, u32 val);
923
924 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
925
926 static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
927 {
928 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
929 }
930 static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
931 {
932 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
933 }
934 static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
935 u32 *val)
936 {
937 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
938 }
939 static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
940 {
941 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
942 }
943 static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
944 {
945 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
946 }
947 static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
948 u32 val)
949 {
950 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
951 }
952
953 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
954 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
955 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
956 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
957 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
958 u16 clear, u16 set);
959 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
960 u32 clear, u32 set);
961
962 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
963 u16 set)
964 {
965 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
966 }
967
968 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
969 u32 set)
970 {
971 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
972 }
973
974 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
975 u16 clear)
976 {
977 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
978 }
979
980 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
981 u32 clear)
982 {
983 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
984 }
985
986 /* user-space driven config access */
987 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
988 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
989 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
990 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
991 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
992 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
993
994 int __must_check pci_enable_device(struct pci_dev *dev);
995 int __must_check pci_enable_device_io(struct pci_dev *dev);
996 int __must_check pci_enable_device_mem(struct pci_dev *dev);
997 int __must_check pci_reenable_device(struct pci_dev *);
998 int __must_check pcim_enable_device(struct pci_dev *pdev);
999 void pcim_pin_device(struct pci_dev *pdev);
1000
1001 static inline int pci_is_enabled(struct pci_dev *pdev)
1002 {
1003 return (atomic_read(&pdev->enable_cnt) > 0);
1004 }
1005
1006 static inline int pci_is_managed(struct pci_dev *pdev)
1007 {
1008 return pdev->is_managed;
1009 }
1010
1011 void pci_disable_device(struct pci_dev *dev);
1012
1013 extern unsigned int pcibios_max_latency;
1014 void pci_set_master(struct pci_dev *dev);
1015 void pci_clear_master(struct pci_dev *dev);
1016
1017 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1018 int pci_set_cacheline_size(struct pci_dev *dev);
1019 #define HAVE_PCI_SET_MWI
1020 int __must_check pci_set_mwi(struct pci_dev *dev);
1021 int pci_try_set_mwi(struct pci_dev *dev);
1022 void pci_clear_mwi(struct pci_dev *dev);
1023 void pci_intx(struct pci_dev *dev, int enable);
1024 bool pci_intx_mask_supported(struct pci_dev *dev);
1025 bool pci_check_and_mask_intx(struct pci_dev *dev);
1026 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1027 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1028 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1029 int pcix_get_max_mmrbc(struct pci_dev *dev);
1030 int pcix_get_mmrbc(struct pci_dev *dev);
1031 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1032 int pcie_get_readrq(struct pci_dev *dev);
1033 int pcie_set_readrq(struct pci_dev *dev, int rq);
1034 int pcie_get_mps(struct pci_dev *dev);
1035 int pcie_set_mps(struct pci_dev *dev, int mps);
1036 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
1037 enum pcie_link_width *width);
1038 int __pci_reset_function(struct pci_dev *dev);
1039 int __pci_reset_function_locked(struct pci_dev *dev);
1040 int pci_reset_function(struct pci_dev *dev);
1041 int pci_try_reset_function(struct pci_dev *dev);
1042 int pci_probe_reset_slot(struct pci_slot *slot);
1043 int pci_reset_slot(struct pci_slot *slot);
1044 int pci_try_reset_slot(struct pci_slot *slot);
1045 int pci_probe_reset_bus(struct pci_bus *bus);
1046 int pci_reset_bus(struct pci_bus *bus);
1047 int pci_try_reset_bus(struct pci_bus *bus);
1048 void pci_reset_secondary_bus(struct pci_dev *dev);
1049 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1050 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
1051 void pci_update_resource(struct pci_dev *dev, int resno);
1052 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1053 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1054 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1055 bool pci_device_is_present(struct pci_dev *pdev);
1056 void pci_ignore_hotplug(struct pci_dev *dev);
1057
1058 /* ROM control related routines */
1059 int pci_enable_rom(struct pci_dev *pdev);
1060 void pci_disable_rom(struct pci_dev *pdev);
1061 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1062 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1063 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
1064 void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1065
1066 /* Power management related routines */
1067 int pci_save_state(struct pci_dev *dev);
1068 void pci_restore_state(struct pci_dev *dev);
1069 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1070 int pci_load_saved_state(struct pci_dev *dev,
1071 struct pci_saved_state *state);
1072 int pci_load_and_free_saved_state(struct pci_dev *dev,
1073 struct pci_saved_state **state);
1074 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
1075 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
1076 u16 cap);
1077 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
1078 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
1079 u16 cap, unsigned int size);
1080 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
1081 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1082 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1083 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1084 void pci_pme_active(struct pci_dev *dev, bool enable);
1085 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1086 bool runtime, bool enable);
1087 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1088 int pci_prepare_to_sleep(struct pci_dev *dev);
1089 int pci_back_from_sleep(struct pci_dev *dev);
1090 bool pci_dev_run_wake(struct pci_dev *dev);
1091 bool pci_check_pme_status(struct pci_dev *dev);
1092 void pci_pme_wakeup_bus(struct pci_bus *bus);
1093 void pci_d3cold_enable(struct pci_dev *dev);
1094 void pci_d3cold_disable(struct pci_dev *dev);
1095
1096 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1097 bool enable)
1098 {
1099 return __pci_enable_wake(dev, state, false, enable);
1100 }
1101
1102 /* PCI Virtual Channel */
1103 int pci_save_vc_state(struct pci_dev *dev);
1104 void pci_restore_vc_state(struct pci_dev *dev);
1105 void pci_allocate_vc_save_buffers(struct pci_dev *dev);
1106
1107 /* For use by arch with custom probe code */
1108 void set_pcie_port_type(struct pci_dev *pdev);
1109 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1110
1111 /* Functions for PCI Hotplug drivers to use */
1112 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1113 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1114 unsigned int pci_rescan_bus(struct pci_bus *bus);
1115 void pci_lock_rescan_remove(void);
1116 void pci_unlock_rescan_remove(void);
1117
1118 /* Vital product data routines */
1119 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1120 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1121 int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1122
1123 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1124 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1125 void pci_bus_assign_resources(const struct pci_bus *bus);
1126 void pci_bus_claim_resources(struct pci_bus *bus);
1127 void pci_bus_size_bridges(struct pci_bus *bus);
1128 int pci_claim_resource(struct pci_dev *, int);
1129 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1130 void pci_assign_unassigned_resources(void);
1131 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1132 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1133 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1134 void pdev_enable_device(struct pci_dev *);
1135 int pci_enable_resources(struct pci_dev *, int mask);
1136 void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
1137 int (*)(const struct pci_dev *, u8, u8));
1138 #define HAVE_PCI_REQ_REGIONS 2
1139 int __must_check pci_request_regions(struct pci_dev *, const char *);
1140 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1141 void pci_release_regions(struct pci_dev *);
1142 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1143 int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
1144 void pci_release_region(struct pci_dev *, int);
1145 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1146 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1147 void pci_release_selected_regions(struct pci_dev *, int);
1148
1149 /* drivers/pci/bus.c */
1150 struct pci_bus *pci_bus_get(struct pci_bus *bus);
1151 void pci_bus_put(struct pci_bus *bus);
1152 void pci_add_resource(struct list_head *resources, struct resource *res);
1153 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1154 resource_size_t offset);
1155 void pci_free_resource_list(struct list_head *resources);
1156 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1157 unsigned int flags);
1158 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1159 void pci_bus_remove_resources(struct pci_bus *bus);
1160 int devm_request_pci_bus_resources(struct device *dev,
1161 struct list_head *resources);
1162
1163 #define pci_bus_for_each_resource(bus, res, i) \
1164 for (i = 0; \
1165 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
1166 i++)
1167
1168 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1169 struct resource *res, resource_size_t size,
1170 resource_size_t align, resource_size_t min,
1171 unsigned long type_mask,
1172 resource_size_t (*alignf)(void *,
1173 const struct resource *,
1174 resource_size_t,
1175 resource_size_t),
1176 void *alignf_data);
1177
1178
1179 int pci_register_io_range(phys_addr_t addr, resource_size_t size);
1180 unsigned long pci_address_to_pio(phys_addr_t addr);
1181 phys_addr_t pci_pio_to_address(unsigned long pio);
1182 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1183 void pci_unmap_iospace(struct resource *res);
1184
1185 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1186 {
1187 struct pci_bus_region region;
1188
1189 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1190 return region.start;
1191 }
1192
1193 /* Proper probing supporting hot-pluggable devices */
1194 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1195 const char *mod_name);
1196
1197 /*
1198 * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
1199 */
1200 #define pci_register_driver(driver) \
1201 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1202
1203 void pci_unregister_driver(struct pci_driver *dev);
1204
1205 /**
1206 * module_pci_driver() - Helper macro for registering a PCI driver
1207 * @__pci_driver: pci_driver struct
1208 *
1209 * Helper macro for PCI drivers which do not do anything special in module
1210 * init/exit. This eliminates a lot of boilerplate. Each module may only
1211 * use this macro once, and calling it replaces module_init() and module_exit()
1212 */
1213 #define module_pci_driver(__pci_driver) \
1214 module_driver(__pci_driver, pci_register_driver, \
1215 pci_unregister_driver)
1216
1217 /**
1218 * builtin_pci_driver() - Helper macro for registering a PCI driver
1219 * @__pci_driver: pci_driver struct
1220 *
1221 * Helper macro for PCI drivers which do not do anything special in their
1222 * init code. This eliminates a lot of boilerplate. Each driver may only
1223 * use this macro once, and calling it replaces device_initcall(...)
1224 */
1225 #define builtin_pci_driver(__pci_driver) \
1226 builtin_driver(__pci_driver, pci_register_driver)
1227
1228 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1229 int pci_add_dynid(struct pci_driver *drv,
1230 unsigned int vendor, unsigned int device,
1231 unsigned int subvendor, unsigned int subdevice,
1232 unsigned int class, unsigned int class_mask,
1233 unsigned long driver_data);
1234 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1235 struct pci_dev *dev);
1236 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1237 int pass);
1238
1239 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1240 void *userdata);
1241 int pci_cfg_space_size(struct pci_dev *dev);
1242 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1243 void pci_setup_bridge(struct pci_bus *bus);
1244 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1245 unsigned long type);
1246 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
1247
1248 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1249 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1250
1251 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1252 unsigned int command_bits, u32 flags);
1253
1254 #define PCI_IRQ_NOLEGACY (1 << 0) /* don't use legacy interrupts */
1255 #define PCI_IRQ_NOMSI (1 << 1) /* don't use MSI interrupts */
1256 #define PCI_IRQ_NOMSIX (1 << 2) /* don't use MSI-X interrupts */
1257 #define PCI_IRQ_NOAFFINITY (1 << 3) /* don't auto-assign affinity */
1258
1259 /* kmem_cache style wrapper around pci_alloc_consistent() */
1260
1261 #include <linux/pci-dma.h>
1262 #include <linux/dmapool.h>
1263
1264 #define pci_pool dma_pool
1265 #define pci_pool_create(name, pdev, size, align, allocation) \
1266 dma_pool_create(name, &pdev->dev, size, align, allocation)
1267 #define pci_pool_destroy(pool) dma_pool_destroy(pool)
1268 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1269 #define pci_pool_zalloc(pool, flags, handle) \
1270 dma_pool_zalloc(pool, flags, handle)
1271 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1272
1273 struct msix_entry {
1274 u32 vector; /* kernel uses to write allocated vector */
1275 u16 entry; /* driver uses to specify entry, OS writes */
1276 };
1277
1278 #ifdef CONFIG_PCI_MSI
1279 int pci_msi_vec_count(struct pci_dev *dev);
1280 void pci_msi_shutdown(struct pci_dev *dev);
1281 void pci_disable_msi(struct pci_dev *dev);
1282 int pci_msix_vec_count(struct pci_dev *dev);
1283 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
1284 void pci_msix_shutdown(struct pci_dev *dev);
1285 void pci_disable_msix(struct pci_dev *dev);
1286 void pci_restore_msi_state(struct pci_dev *dev);
1287 int pci_msi_enabled(void);
1288 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
1289 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1290 {
1291 int rc = pci_enable_msi_range(dev, nvec, nvec);
1292 if (rc < 0)
1293 return rc;
1294 return 0;
1295 }
1296 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1297 int minvec, int maxvec);
1298 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1299 struct msix_entry *entries, int nvec)
1300 {
1301 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1302 if (rc < 0)
1303 return rc;
1304 return 0;
1305 }
1306 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1307 unsigned int max_vecs, unsigned int flags);
1308 void pci_free_irq_vectors(struct pci_dev *dev);
1309 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1310
1311 #else
1312 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1313 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
1314 static inline void pci_disable_msi(struct pci_dev *dev) { }
1315 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1316 static inline int pci_enable_msix(struct pci_dev *dev,
1317 struct msix_entry *entries, int nvec)
1318 { return -ENOSYS; }
1319 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
1320 static inline void pci_disable_msix(struct pci_dev *dev) { }
1321 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
1322 static inline int pci_msi_enabled(void) { return 0; }
1323 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
1324 int maxvec)
1325 { return -ENOSYS; }
1326 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1327 { return -ENOSYS; }
1328 static inline int pci_enable_msix_range(struct pci_dev *dev,
1329 struct msix_entry *entries, int minvec, int maxvec)
1330 { return -ENOSYS; }
1331 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1332 struct msix_entry *entries, int nvec)
1333 { return -ENOSYS; }
1334 static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
1335 unsigned int min_vecs, unsigned int max_vecs,
1336 unsigned int flags)
1337 {
1338 if (min_vecs > 1)
1339 return -EINVAL;
1340 return 1;
1341 }
1342 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1343 {
1344 }
1345
1346 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1347 {
1348 if (WARN_ON_ONCE(nr > 0))
1349 return -EINVAL;
1350 return dev->irq;
1351 }
1352 #endif
1353
1354 #ifdef CONFIG_PCIEPORTBUS
1355 extern bool pcie_ports_disabled;
1356 extern bool pcie_ports_auto;
1357 #else
1358 #define pcie_ports_disabled true
1359 #define pcie_ports_auto false
1360 #endif
1361
1362 #ifdef CONFIG_PCIEASPM
1363 bool pcie_aspm_support_enabled(void);
1364 #else
1365 static inline bool pcie_aspm_support_enabled(void) { return false; }
1366 #endif
1367
1368 #ifdef CONFIG_PCIEAER
1369 void pci_no_aer(void);
1370 bool pci_aer_available(void);
1371 #else
1372 static inline void pci_no_aer(void) { }
1373 static inline bool pci_aer_available(void) { return false; }
1374 #endif
1375
1376 #ifdef CONFIG_PCIE_ECRC
1377 void pcie_set_ecrc_checking(struct pci_dev *dev);
1378 void pcie_ecrc_get_policy(char *str);
1379 #else
1380 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
1381 static inline void pcie_ecrc_get_policy(char *str) { }
1382 #endif
1383
1384 #define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1)
1385
1386 #ifdef CONFIG_HT_IRQ
1387 /* The functions a driver should call */
1388 int ht_create_irq(struct pci_dev *dev, int idx);
1389 void ht_destroy_irq(unsigned int irq);
1390 #endif /* CONFIG_HT_IRQ */
1391
1392 #ifdef CONFIG_PCI_ATS
1393 /* Address Translation Service */
1394 void pci_ats_init(struct pci_dev *dev);
1395 int pci_enable_ats(struct pci_dev *dev, int ps);
1396 void pci_disable_ats(struct pci_dev *dev);
1397 int pci_ats_queue_depth(struct pci_dev *dev);
1398 #else
1399 static inline void pci_ats_init(struct pci_dev *d) { }
1400 static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
1401 static inline void pci_disable_ats(struct pci_dev *d) { }
1402 static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
1403 #endif
1404
1405 void pci_cfg_access_lock(struct pci_dev *dev);
1406 bool pci_cfg_access_trylock(struct pci_dev *dev);
1407 void pci_cfg_access_unlock(struct pci_dev *dev);
1408
1409 /*
1410 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1411 * a PCI domain is defined to be a set of PCI buses which share
1412 * configuration space.
1413 */
1414 #ifdef CONFIG_PCI_DOMAINS
1415 extern int pci_domains_supported;
1416 int pci_get_new_domain_nr(void);
1417 #else
1418 enum { pci_domains_supported = 0 };
1419 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1420 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1421 static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1422 #endif /* CONFIG_PCI_DOMAINS */
1423
1424 /*
1425 * Generic implementation for PCI domain support. If your
1426 * architecture does not need custom management of PCI
1427 * domains then this implementation will be used
1428 */
1429 #ifdef CONFIG_PCI_DOMAINS_GENERIC
1430 static inline int pci_domain_nr(struct pci_bus *bus)
1431 {
1432 return bus->domain_nr;
1433 }
1434 #ifdef CONFIG_ACPI
1435 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1436 #else
1437 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1438 { return 0; }
1439 #endif
1440 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1441 #endif
1442
1443 /* some architectures require additional setup to direct VGA traffic */
1444 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1445 unsigned int command_bits, u32 flags);
1446 void pci_register_set_vga_state(arch_set_vga_state_t func);
1447
1448 static inline int
1449 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1450 {
1451 return pci_request_selected_regions(pdev,
1452 pci_select_bars(pdev, IORESOURCE_IO), name);
1453 }
1454
1455 static inline void
1456 pci_release_io_regions(struct pci_dev *pdev)
1457 {
1458 return pci_release_selected_regions(pdev,
1459 pci_select_bars(pdev, IORESOURCE_IO));
1460 }
1461
1462 static inline int
1463 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1464 {
1465 return pci_request_selected_regions(pdev,
1466 pci_select_bars(pdev, IORESOURCE_MEM), name);
1467 }
1468
1469 static inline void
1470 pci_release_mem_regions(struct pci_dev *pdev)
1471 {
1472 return pci_release_selected_regions(pdev,
1473 pci_select_bars(pdev, IORESOURCE_MEM));
1474 }
1475
1476 #else /* CONFIG_PCI is not enabled */
1477
1478 static inline void pci_set_flags(int flags) { }
1479 static inline void pci_add_flags(int flags) { }
1480 static inline void pci_clear_flags(int flags) { }
1481 static inline int pci_has_flag(int flag) { return 0; }
1482
1483 /*
1484 * If the system does not have PCI, clearly these return errors. Define
1485 * these as simple inline functions to avoid hair in drivers.
1486 */
1487
1488 #define _PCI_NOP(o, s, t) \
1489 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1490 int where, t val) \
1491 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1492
1493 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1494 _PCI_NOP(o, word, u16 x) \
1495 _PCI_NOP(o, dword, u32 x)
1496 _PCI_NOP_ALL(read, *)
1497 _PCI_NOP_ALL(write,)
1498
1499 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1500 unsigned int device,
1501 struct pci_dev *from)
1502 { return NULL; }
1503
1504 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1505 unsigned int device,
1506 unsigned int ss_vendor,
1507 unsigned int ss_device,
1508 struct pci_dev *from)
1509 { return NULL; }
1510
1511 static inline struct pci_dev *pci_get_class(unsigned int class,
1512 struct pci_dev *from)
1513 { return NULL; }
1514
1515 #define pci_dev_present(ids) (0)
1516 #define no_pci_devices() (1)
1517 #define pci_dev_put(dev) do { } while (0)
1518
1519 static inline void pci_set_master(struct pci_dev *dev) { }
1520 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
1521 static inline void pci_disable_device(struct pci_dev *dev) { }
1522 static inline int pci_assign_resource(struct pci_dev *dev, int i)
1523 { return -EBUSY; }
1524 static inline int __pci_register_driver(struct pci_driver *drv,
1525 struct module *owner)
1526 { return 0; }
1527 static inline int pci_register_driver(struct pci_driver *drv)
1528 { return 0; }
1529 static inline void pci_unregister_driver(struct pci_driver *drv) { }
1530 static inline int pci_find_capability(struct pci_dev *dev, int cap)
1531 { return 0; }
1532 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1533 int cap)
1534 { return 0; }
1535 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1536 { return 0; }
1537
1538 /* Power management related routines */
1539 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
1540 static inline void pci_restore_state(struct pci_dev *dev) { }
1541 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1542 { return 0; }
1543 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1544 { return 0; }
1545 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
1546 pm_message_t state)
1547 { return PCI_D0; }
1548 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1549 int enable)
1550 { return 0; }
1551
1552 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1553 { return -EIO; }
1554 static inline void pci_release_regions(struct pci_dev *dev) { }
1555
1556 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
1557
1558 static inline void pci_block_cfg_access(struct pci_dev *dev) { }
1559 static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
1560 { return 0; }
1561 static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
1562
1563 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
1564 { return NULL; }
1565 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1566 unsigned int devfn)
1567 { return NULL; }
1568 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1569 unsigned int devfn)
1570 { return NULL; }
1571
1572 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1573 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1574 static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1575
1576 #define dev_is_pci(d) (false)
1577 #define dev_is_pf(d) (false)
1578 #define dev_num_vf(d) (0)
1579 #endif /* CONFIG_PCI */
1580
1581 /* Include architecture-dependent settings and functions */
1582
1583 #include <asm/pci.h>
1584
1585 #ifndef pci_root_bus_fwnode
1586 #define pci_root_bus_fwnode(bus) NULL
1587 #endif
1588
1589 /* these helpers provide future and backwards compatibility
1590 * for accessing popular PCI BAR info */
1591 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1592 #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1593 #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
1594 #define pci_resource_len(dev,bar) \
1595 ((pci_resource_start((dev), (bar)) == 0 && \
1596 pci_resource_end((dev), (bar)) == \
1597 pci_resource_start((dev), (bar))) ? 0 : \
1598 \
1599 (pci_resource_end((dev), (bar)) - \
1600 pci_resource_start((dev), (bar)) + 1))
1601
1602 /* Similar to the helpers above, these manipulate per-pci_dev
1603 * driver-specific data. They are really just a wrapper around
1604 * the generic device structure functions of these calls.
1605 */
1606 static inline void *pci_get_drvdata(struct pci_dev *pdev)
1607 {
1608 return dev_get_drvdata(&pdev->dev);
1609 }
1610
1611 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1612 {
1613 dev_set_drvdata(&pdev->dev, data);
1614 }
1615
1616 /* If you want to know what to call your pci_dev, ask this function.
1617 * Again, it's a wrapper around the generic device.
1618 */
1619 static inline const char *pci_name(const struct pci_dev *pdev)
1620 {
1621 return dev_name(&pdev->dev);
1622 }
1623
1624
1625 /* Some archs don't want to expose struct resource to userland as-is
1626 * in sysfs and /proc
1627 */
1628 #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
1629 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1630 const struct resource *rsrc,
1631 resource_size_t *start, resource_size_t *end);
1632 #else
1633 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
1634 const struct resource *rsrc, resource_size_t *start,
1635 resource_size_t *end)
1636 {
1637 *start = rsrc->start;
1638 *end = rsrc->end;
1639 }
1640 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
1641
1642
1643 /*
1644 * The world is not perfect and supplies us with broken PCI devices.
1645 * For at least a part of these bugs we need a work-around, so both
1646 * generic (drivers/pci/quirks.c) and per-architecture code can define
1647 * fixup hooks to be called for particular buggy devices.
1648 */
1649
1650 struct pci_fixup {
1651 u16 vendor; /* You can use PCI_ANY_ID here of course */
1652 u16 device; /* You can use PCI_ANY_ID here of course */
1653 u32 class; /* You can use PCI_ANY_ID here too */
1654 unsigned int class_shift; /* should be 0, 8, 16 */
1655 void (*hook)(struct pci_dev *dev);
1656 };
1657
1658 enum pci_fixup_pass {
1659 pci_fixup_early, /* Before probing BARs */
1660 pci_fixup_header, /* After reading configuration header */
1661 pci_fixup_final, /* Final phase of device fixups */
1662 pci_fixup_enable, /* pci_enable_device() time */
1663 pci_fixup_resume, /* pci_device_resume() */
1664 pci_fixup_suspend, /* pci_device_suspend() */
1665 pci_fixup_resume_early, /* pci_device_resume_early() */
1666 pci_fixup_suspend_late, /* pci_device_suspend_late() */
1667 };
1668
1669 /* Anonymous variables would be nice... */
1670 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1671 class_shift, hook) \
1672 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1673 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1674 = { vendor, device, class, class_shift, hook };
1675
1676 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1677 class_shift, hook) \
1678 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1679 hook, vendor, device, class, class_shift, hook)
1680 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1681 class_shift, hook) \
1682 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1683 hook, vendor, device, class, class_shift, hook)
1684 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1685 class_shift, hook) \
1686 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1687 hook, vendor, device, class, class_shift, hook)
1688 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1689 class_shift, hook) \
1690 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1691 hook, vendor, device, class, class_shift, hook)
1692 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1693 class_shift, hook) \
1694 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1695 resume##hook, vendor, device, class, \
1696 class_shift, hook)
1697 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1698 class_shift, hook) \
1699 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1700 resume_early##hook, vendor, device, \
1701 class, class_shift, hook)
1702 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1703 class_shift, hook) \
1704 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1705 suspend##hook, vendor, device, class, \
1706 class_shift, hook)
1707 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1708 class_shift, hook) \
1709 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1710 suspend_late##hook, vendor, device, \
1711 class, class_shift, hook)
1712
1713 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1714 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1715 hook, vendor, device, PCI_ANY_ID, 0, hook)
1716 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1717 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1718 hook, vendor, device, PCI_ANY_ID, 0, hook)
1719 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1720 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1721 hook, vendor, device, PCI_ANY_ID, 0, hook)
1722 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1723 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1724 hook, vendor, device, PCI_ANY_ID, 0, hook)
1725 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1726 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1727 resume##hook, vendor, device, \
1728 PCI_ANY_ID, 0, hook)
1729 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1730 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1731 resume_early##hook, vendor, device, \
1732 PCI_ANY_ID, 0, hook)
1733 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1734 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1735 suspend##hook, vendor, device, \
1736 PCI_ANY_ID, 0, hook)
1737 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1738 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1739 suspend_late##hook, vendor, device, \
1740 PCI_ANY_ID, 0, hook)
1741
1742 #ifdef CONFIG_PCI_QUIRKS
1743 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1744 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
1745 int pci_dev_specific_enable_acs(struct pci_dev *dev);
1746 #else
1747 static inline void pci_fixup_device(enum pci_fixup_pass pass,
1748 struct pci_dev *dev) { }
1749 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
1750 u16 acs_flags)
1751 {
1752 return -ENOTTY;
1753 }
1754 static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
1755 {
1756 return -ENOTTY;
1757 }
1758 #endif
1759
1760 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
1761 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
1762 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
1763 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
1764 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
1765 const char *name);
1766 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
1767
1768 extern int pci_pci_problems;
1769 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
1770 #define PCIPCI_TRITON 2
1771 #define PCIPCI_NATOMA 4
1772 #define PCIPCI_VIAETBF 8
1773 #define PCIPCI_VSFX 16
1774 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
1775 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
1776
1777 extern unsigned long pci_cardbus_io_size;
1778 extern unsigned long pci_cardbus_mem_size;
1779 extern u8 pci_dfl_cache_line_size;
1780 extern u8 pci_cache_line_size;
1781
1782 extern unsigned long pci_hotplug_io_size;
1783 extern unsigned long pci_hotplug_mem_size;
1784 extern unsigned long pci_hotplug_bus_size;
1785
1786 /* Architecture-specific versions may override these (weak) */
1787 void pcibios_disable_device(struct pci_dev *dev);
1788 void pcibios_set_master(struct pci_dev *dev);
1789 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
1790 enum pcie_reset_state state);
1791 int pcibios_add_device(struct pci_dev *dev);
1792 void pcibios_release_device(struct pci_dev *dev);
1793 void pcibios_penalize_isa_irq(int irq, int active);
1794 int pcibios_alloc_irq(struct pci_dev *dev);
1795 void pcibios_free_irq(struct pci_dev *dev);
1796
1797 #ifdef CONFIG_HIBERNATE_CALLBACKS
1798 extern struct dev_pm_ops pcibios_pm_ops;
1799 #endif
1800
1801 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
1802 void __init pci_mmcfg_early_init(void);
1803 void __init pci_mmcfg_late_init(void);
1804 #else
1805 static inline void pci_mmcfg_early_init(void) { }
1806 static inline void pci_mmcfg_late_init(void) { }
1807 #endif
1808
1809 int pci_ext_cfg_avail(void);
1810
1811 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1812 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
1813
1814 #ifdef CONFIG_PCI_IOV
1815 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
1816 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
1817
1818 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
1819 void pci_disable_sriov(struct pci_dev *dev);
1820 int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
1821 void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
1822 int pci_num_vf(struct pci_dev *dev);
1823 int pci_vfs_assigned(struct pci_dev *dev);
1824 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1825 int pci_sriov_get_totalvfs(struct pci_dev *dev);
1826 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
1827 #else
1828 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
1829 {
1830 return -ENOSYS;
1831 }
1832 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
1833 {
1834 return -ENOSYS;
1835 }
1836 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1837 { return -ENODEV; }
1838 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
1839 {
1840 return -ENOSYS;
1841 }
1842 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
1843 int id, int reset) { }
1844 static inline void pci_disable_sriov(struct pci_dev *dev) { }
1845 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
1846 static inline int pci_vfs_assigned(struct pci_dev *dev)
1847 { return 0; }
1848 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
1849 { return 0; }
1850 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
1851 { return 0; }
1852 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
1853 { return 0; }
1854 #endif
1855
1856 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
1857 void pci_hp_create_module_link(struct pci_slot *pci_slot);
1858 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
1859 #endif
1860
1861 /**
1862 * pci_pcie_cap - get the saved PCIe capability offset
1863 * @dev: PCI device
1864 *
1865 * PCIe capability offset is calculated at PCI device initialization
1866 * time and saved in the data structure. This function returns saved
1867 * PCIe capability offset. Using this instead of pci_find_capability()
1868 * reduces unnecessary search in the PCI configuration space. If you
1869 * need to calculate PCIe capability offset from raw device for some
1870 * reasons, please use pci_find_capability() instead.
1871 */
1872 static inline int pci_pcie_cap(struct pci_dev *dev)
1873 {
1874 return dev->pcie_cap;
1875 }
1876
1877 /**
1878 * pci_is_pcie - check if the PCI device is PCI Express capable
1879 * @dev: PCI device
1880 *
1881 * Returns: true if the PCI device is PCI Express capable, false otherwise.
1882 */
1883 static inline bool pci_is_pcie(struct pci_dev *dev)
1884 {
1885 return pci_pcie_cap(dev);
1886 }
1887
1888 /**
1889 * pcie_caps_reg - get the PCIe Capabilities Register
1890 * @dev: PCI device
1891 */
1892 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
1893 {
1894 return dev->pcie_flags_reg;
1895 }
1896
1897 /**
1898 * pci_pcie_type - get the PCIe device/port type
1899 * @dev: PCI device
1900 */
1901 static inline int pci_pcie_type(const struct pci_dev *dev)
1902 {
1903 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
1904 }
1905
1906 void pci_request_acs(void);
1907 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
1908 bool pci_acs_path_enabled(struct pci_dev *start,
1909 struct pci_dev *end, u16 acs_flags);
1910
1911 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
1912 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
1913
1914 /* Large Resource Data Type Tag Item Names */
1915 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
1916 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
1917 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
1918
1919 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
1920 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
1921 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
1922
1923 /* Small Resource Data Type Tag Item Names */
1924 #define PCI_VPD_STIN_END 0x0f /* End */
1925
1926 #define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
1927
1928 #define PCI_VPD_SRDT_TIN_MASK 0x78
1929 #define PCI_VPD_SRDT_LEN_MASK 0x07
1930 #define PCI_VPD_LRDT_TIN_MASK 0x7f
1931
1932 #define PCI_VPD_LRDT_TAG_SIZE 3
1933 #define PCI_VPD_SRDT_TAG_SIZE 1
1934
1935 #define PCI_VPD_INFO_FLD_HDR_SIZE 3
1936
1937 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
1938 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
1939 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
1940 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
1941
1942 /**
1943 * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
1944 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
1945 *
1946 * Returns the extracted Large Resource Data Type length.
1947 */
1948 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
1949 {
1950 return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
1951 }
1952
1953 /**
1954 * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
1955 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
1956 *
1957 * Returns the extracted Large Resource Data Type Tag item.
1958 */
1959 static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
1960 {
1961 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
1962 }
1963
1964 /**
1965 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
1966 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
1967 *
1968 * Returns the extracted Small Resource Data Type length.
1969 */
1970 static inline u8 pci_vpd_srdt_size(const u8 *srdt)
1971 {
1972 return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
1973 }
1974
1975 /**
1976 * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
1977 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
1978 *
1979 * Returns the extracted Small Resource Data Type Tag Item.
1980 */
1981 static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
1982 {
1983 return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
1984 }
1985
1986 /**
1987 * pci_vpd_info_field_size - Extracts the information field length
1988 * @lrdt: Pointer to the beginning of an information field header
1989 *
1990 * Returns the extracted information field length.
1991 */
1992 static inline u8 pci_vpd_info_field_size(const u8 *info_field)
1993 {
1994 return info_field[2];
1995 }
1996
1997 /**
1998 * pci_vpd_find_tag - Locates the Resource Data Type tag provided
1999 * @buf: Pointer to buffered vpd data
2000 * @off: The offset into the buffer at which to begin the search
2001 * @len: The length of the vpd buffer
2002 * @rdt: The Resource Data Type to search for
2003 *
2004 * Returns the index where the Resource Data Type was found or
2005 * -ENOENT otherwise.
2006 */
2007 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
2008
2009 /**
2010 * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
2011 * @buf: Pointer to buffered vpd data
2012 * @off: The offset into the buffer at which to begin the search
2013 * @len: The length of the buffer area, relative to off, in which to search
2014 * @kw: The keyword to search for
2015 *
2016 * Returns the index where the information field keyword was found or
2017 * -ENOENT otherwise.
2018 */
2019 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
2020 unsigned int len, const char *kw);
2021
2022 /* PCI <-> OF binding helpers */
2023 #ifdef CONFIG_OF
2024 struct device_node;
2025 struct irq_domain;
2026 void pci_set_of_node(struct pci_dev *dev);
2027 void pci_release_of_node(struct pci_dev *dev);
2028 void pci_set_bus_of_node(struct pci_bus *bus);
2029 void pci_release_bus_of_node(struct pci_bus *bus);
2030 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2031
2032 /* Arch may override this (weak) */
2033 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2034
2035 static inline struct device_node *
2036 pci_device_to_OF_node(const struct pci_dev *pdev)
2037 {
2038 return pdev ? pdev->dev.of_node : NULL;
2039 }
2040
2041 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2042 {
2043 return bus ? bus->dev.of_node : NULL;
2044 }
2045
2046 #else /* CONFIG_OF */
2047 static inline void pci_set_of_node(struct pci_dev *dev) { }
2048 static inline void pci_release_of_node(struct pci_dev *dev) { }
2049 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
2050 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
2051 static inline struct device_node *
2052 pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
2053 static inline struct irq_domain *
2054 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2055 #endif /* CONFIG_OF */
2056
2057 #ifdef CONFIG_ACPI
2058 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2059
2060 void
2061 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2062 #else
2063 static inline struct irq_domain *
2064 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
2065 #endif
2066
2067 #ifdef CONFIG_EEH
2068 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2069 {
2070 return pdev->dev.archdata.edev;
2071 }
2072 #endif
2073
2074 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
2075 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2076 int pci_for_each_dma_alias(struct pci_dev *pdev,
2077 int (*fn)(struct pci_dev *pdev,
2078 u16 alias, void *data), void *data);
2079
2080 /* helper functions for operation of device flag */
2081 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2082 {
2083 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2084 }
2085 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2086 {
2087 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2088 }
2089 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2090 {
2091 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2092 }
2093
2094 /**
2095 * pci_ari_enabled - query ARI forwarding status
2096 * @bus: the PCI bus
2097 *
2098 * Returns true if ARI forwarding is enabled.
2099 */
2100 static inline bool pci_ari_enabled(struct pci_bus *bus)
2101 {
2102 return bus->self && bus->self->ari_enabled;
2103 }
2104
2105 /* provide the legacy pci_dma_* API */
2106 #include <linux/pci-dma-compat.h>
2107
2108 #endif /* LINUX_PCI_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with LOADs and STOREs inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139 #ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
144 #else
145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146 {
147 __acquire(lock);
148 arch_spin_lock(&lock->raw_lock);
149 }
150
151 static inline void
152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153 {
154 __acquire(lock);
155 arch_spin_lock_flags(&lock->raw_lock, *flags);
156 }
157
158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159 {
160 return arch_spin_trylock(&(lock)->raw_lock);
161 }
162
163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164 {
165 arch_spin_unlock(&lock->raw_lock);
166 __release(lock);
167 }
168 #endif
169
170 /*
171 * Define the various spin_lock methods. Note we define these
172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173 * various methods are defined as nops in the case they are not
174 * required.
175 */
176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
177
178 #define raw_spin_lock(lock) _raw_spin_lock(lock)
179
180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
181 # define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass)
183 # define raw_spin_lock_bh_nested(lock, subclass) \
184 _raw_spin_lock_bh_nested(lock, subclass)
185
186 # define raw_spin_lock_nest_lock(lock, nest_lock) \
187 do { \
188 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
189 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
190 } while (0)
191 #else
192 /*
193 * Always evaluate the 'subclass' argument to avoid that the compiler
194 * warns about set-but-not-used variables when building with
195 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
196 */
197 # define raw_spin_lock_nested(lock, subclass) \
198 _raw_spin_lock(((void)(subclass), (lock)))
199 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
200 # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
201 #endif
202
203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
204
205 #define raw_spin_lock_irqsave(lock, flags) \
206 do { \
207 typecheck(unsigned long, flags); \
208 flags = _raw_spin_lock_irqsave(lock); \
209 } while (0)
210
211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 do { \
214 typecheck(unsigned long, flags); \
215 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
216 } while (0)
217 #else
218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
219 do { \
220 typecheck(unsigned long, flags); \
221 flags = _raw_spin_lock_irqsave(lock); \
222 } while (0)
223 #endif
224
225 #else
226
227 #define raw_spin_lock_irqsave(lock, flags) \
228 do { \
229 typecheck(unsigned long, flags); \
230 _raw_spin_lock_irqsave(lock, flags); \
231 } while (0)
232
233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
234 raw_spin_lock_irqsave(lock, flags)
235
236 #endif
237
238 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
239 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
240 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
241 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
242
243 #define raw_spin_unlock_irqrestore(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _raw_spin_unlock_irqrestore(lock, flags); \
247 } while (0)
248 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
249
250 #define raw_spin_trylock_bh(lock) \
251 __cond_lock(lock, _raw_spin_trylock_bh(lock))
252
253 #define raw_spin_trylock_irq(lock) \
254 ({ \
255 local_irq_disable(); \
256 raw_spin_trylock(lock) ? \
257 1 : ({ local_irq_enable(); 0; }); \
258 })
259
260 #define raw_spin_trylock_irqsave(lock, flags) \
261 ({ \
262 local_irq_save(flags); \
263 raw_spin_trylock(lock) ? \
264 1 : ({ local_irq_restore(flags); 0; }); \
265 })
266
267 /**
268 * raw_spin_can_lock - would raw_spin_trylock() succeed?
269 * @lock: the spinlock in question.
270 */
271 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
272
273 /* Include rwlock functions */
274 #include <linux/rwlock.h>
275
276 /*
277 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
278 */
279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
280 # include <linux/spinlock_api_smp.h>
281 #else
282 # include <linux/spinlock_api_up.h>
283 #endif
284
285 /*
286 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
287 */
288
289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
290 {
291 return &lock->rlock;
292 }
293
294 #define spin_lock_init(_lock) \
295 do { \
296 spinlock_check(_lock); \
297 raw_spin_lock_init(&(_lock)->rlock); \
298 } while (0)
299
300 static __always_inline void spin_lock(spinlock_t *lock)
301 {
302 raw_spin_lock(&lock->rlock);
303 }
304
305 static __always_inline void spin_lock_bh(spinlock_t *lock)
306 {
307 raw_spin_lock_bh(&lock->rlock);
308 }
309
310 static __always_inline int spin_trylock(spinlock_t *lock)
311 {
312 return raw_spin_trylock(&lock->rlock);
313 }
314
315 #define spin_lock_nested(lock, subclass) \
316 do { \
317 raw_spin_lock_nested(spinlock_check(lock), subclass); \
318 } while (0)
319
320 #define spin_lock_bh_nested(lock, subclass) \
321 do { \
322 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
323 } while (0)
324
325 #define spin_lock_nest_lock(lock, nest_lock) \
326 do { \
327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
328 } while (0)
329
330 static __always_inline void spin_lock_irq(spinlock_t *lock)
331 {
332 raw_spin_lock_irq(&lock->rlock);
333 }
334
335 #define spin_lock_irqsave(lock, flags) \
336 do { \
337 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
338 } while (0)
339
340 #define spin_lock_irqsave_nested(lock, flags, subclass) \
341 do { \
342 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
343 } while (0)
344
345 static __always_inline void spin_unlock(spinlock_t *lock)
346 {
347 raw_spin_unlock(&lock->rlock);
348 }
349
350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
351 {
352 raw_spin_unlock_bh(&lock->rlock);
353 }
354
355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
356 {
357 raw_spin_unlock_irq(&lock->rlock);
358 }
359
360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
361 {
362 raw_spin_unlock_irqrestore(&lock->rlock, flags);
363 }
364
365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
366 {
367 return raw_spin_trylock_bh(&lock->rlock);
368 }
369
370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
371 {
372 return raw_spin_trylock_irq(&lock->rlock);
373 }
374
375 #define spin_trylock_irqsave(lock, flags) \
376 ({ \
377 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
378 })
379
380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
381 {
382 raw_spin_unlock_wait(&lock->rlock);
383 }
384
385 static __always_inline int spin_is_locked(spinlock_t *lock)
386 {
387 return raw_spin_is_locked(&lock->rlock);
388 }
389
390 static __always_inline int spin_is_contended(spinlock_t *lock)
391 {
392 return raw_spin_is_contended(&lock->rlock);
393 }
394
395 static __always_inline int spin_can_lock(spinlock_t *lock)
396 {
397 return raw_spin_can_lock(&lock->rlock);
398 }
399
400 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
401
402 /*
403 * Pull the atomic_t declaration:
404 * (asm-mips/atomic.h needs above definitions)
405 */
406 #include <linux/atomic.h>
407 /**
408 * atomic_dec_and_lock - lock on reaching reference count zero
409 * @atomic: the atomic counter
410 * @lock: the spinlock in question
411 *
412 * Decrements @atomic by 1. If the result is 0, returns true and locks
413 * @lock. Returns false for all other cases.
414 */
415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
416 #define atomic_dec_and_lock(atomic, lock) \
417 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
418
419 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-4.8-rc1.tar.xz | drivers/net/vmxnet3/vmxnet3.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-10-07 18:24:37 | L0252 |
Comment
Reported: 7 Oct 2016
[Home]